python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run bandits example in multiprocess mode:
$ python3 examples/bandits/membership_inference.py --multiprocess
To run bandits example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/bandits/launcher.py \
examples/bandits/membership_inference.py
"""
import argparse
import logging
import os
import pickle
import examples.util
import torch
import visdom
from examples.multiprocess_launcher import MultiProcessLauncher
def compute_rewards(weights, dataset, epsilon=0.0):
"""
Perform inference using epsilon-greedy contextual bandit (without updates).
"""
context, rewards = dataset
context = context.type(torch.float32)
# compute scores:
scores = torch.matmul(weights, context.t()).squeeze()
explore = (torch.rand(scores.shape[1]) < epsilon).type(torch.float32)
rand_scores = torch.rand_like(scores)
scores.mul_(1 - explore).add_(rand_scores.mul(explore))
# select arm and observe reward:
selected_arms = scores.argmax(dim=0)
return rewards[range(rewards.shape[0]), selected_arms]
def membership_accuracy(model, positive_set, negative_set, epsilon=0.0):
"""
Measure accuracy of membership inference attacks on model using the specified
positive and negative data sets.
"""
# compute weights for all arms:
weights = model["b"].unsqueeze(1).matmul(model["A_inv"]).squeeze(1)
weights = weights.type(torch.float32)
# compute rewards for both sets:
rewards = {
"positive": compute_rewards(weights, positive_set, epsilon=epsilon),
"negative": compute_rewards(weights, negative_set, epsilon=epsilon),
}
def p_reward(x):
return torch.sum(x).type(torch.float32) / x.numel()
p_reward_pos = p_reward(rewards["positive"])
p_reward_neg = p_reward(rewards["negative"])
advantage = (p_reward_pos - p_reward_neg).abs().item()
return advantage
def parse_args():
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser(description="Perform membership inference attacks")
parser.add_argument(
"--pca", default=20, type=int, help="Number of PCA dimensions (0 for raw data)"
)
parser.add_argument(
"--number_arms",
default=None,
type=int,
help="create arbitrary number of arms via k-means",
)
parser.add_argument(
"--bandwidth",
default=1.0,
type=float,
help="bandwidth of kernel used to assign rewards",
)
parser.add_argument(
"--checkpoint_folder",
default=None,
type=str,
help="folder from which to load checkpointed models",
)
parser.add_argument(
"--permfile", default=None, type=str, help="file with sampling permutation"
)
parser.add_argument(
"--epsilon",
default=0.01,
type=float,
help="exploration parameter (default = 0.01)",
)
parser.add_argument(
"--savefile", default=None, type=str, help="file to pickle advantages"
)
parser.add_argument(
"--visualize", action="store_true", help="visualize results with visdom"
)
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
return parser.parse_args()
def membership_inference(args, load_data_module, download_mnist):
# load clusters:
clusters = None
if args.number_arms is not None:
clusters_file = "clusters_K=%d_pca=%d.torch" % (args.number_arms, args.pca)
clusters_file = os.path.join(load_data_module.MEMOIZE_FOLDER, clusters_file)
logging.info("Loading clusters from file...")
clusters = torch.load(clusters_file)
# load dataset:
train_data, _ = load_data_module.load_data(
split="train", download_mnist_func=download_mnist
)
components = examples.util.pca(train_data, args.pca)
positive_set = load_data_module.load_data(
split="train",
pca=components,
clusters=clusters,
bandwidth=args.bandwidth,
download_mnist_func=download_mnist,
)
negative_set = load_data_module.load_data(
split="test",
pca=components,
clusters=clusters,
bandwidth=args.bandwidth,
download_mnist_func=download_mnist,
)
# get list of checkpoints:
model_files = [
os.path.join(args.checkpoint_folder, filename)
for filename in os.listdir(args.checkpoint_folder)
if filename.endswith(".torch")
]
model_files = sorted(model_files)
iterations = [int(os.path.splitext(f)[0].split("_")[-1]) for f in model_files]
# load permutation used in training:
perm = load_data_module.load_data_sampler(
permfile=args.permfile, download_mnist_func=download_mnist
)
def subset(dataset, iteration):
ids = perm[:iteration]
return tuple(d[ids, :] for d in dataset)
# measure accuracies of membership inference attacs:
advantage = [
membership_accuracy(
torch.load(model_file),
subset(positive_set, iteration),
negative_set,
epsilon=args.epsilon,
)
for model_file, iteration in zip(model_files, iterations)
]
# save advantages to file:
if args.savefile is not None:
with open(args.savefile, "wb") as fid:
pickle.dump(advantage, fid)
# plot advantages:
if args.visualize:
opts = {
"xlabel": "Number of iterations",
"ylabel": "Accuracy of inference attack",
}
visdom.line(iterations, advantage, opts=opts)
def _run_experiment(args):
import launcher
membership_inference(args, launcher, launcher.download_mnist)
def main(run_experiment):
# parse command-line arguments:
args = parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
|
CrypTen-main
|
examples/bandits/membership_inference.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run bandits example in multiprocess mode:
$ python3 examples/bandits/launcher.py --multiprocess
To run bandits example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/bandits/plain_contextual_bandits.py,\
examples/bandits/private_contextual_bandits.py \
examples/bandits/launcher.py
"""
import argparse
import logging
import os
import random
import examples.util
import torch
import visdom
from examples.multiprocess_launcher import MultiProcessLauncher
from examples.util import NoopContextManager, process_mnist_files
from torchvision.datasets.mnist import MNIST
def learning_curve(visualizer, idx, value, window=None, title=""):
"""
Appends new value to learning curve, creating new curve if none exists.
"""
opts = {"title": title, "xlabel": "Number of samples", "ylabel": "Reward value"}
window = visualizer.line(
value.view(value.nelement(), 1),
idx,
update=None if window is None else "append",
opts=opts,
win=window,
env="contextual_bandits",
)
return window
def download_mnist(split="train"):
"""
Loads split from the MNIST dataset and returns data.
"""
train = split == "train"
# If need to downkload MNIST dataset and uncompress,
# it is necessary to create a separate for each process.
mnist_exists = os.path.exists(
os.path.join(
"/tmp/MNIST/processed", MNIST.training_file if train else MNIST.test_file
)
)
if mnist_exists:
mnist_root = "/tmp"
else:
rank = "0" if "RANK" not in os.environ else os.environ["RANK"]
mnist_root = os.path.join("tmp", "bandits", rank)
os.makedirs(mnist_root, exist_ok=True)
# download the MNIST dataset:
with NoopContextManager():
mnist = MNIST(mnist_root, download=not mnist_exists, train=train)
return mnist
def load_data(
split="train",
pca=None,
clusters=None,
bandwidth=1.0,
download_mnist_func=download_mnist,
):
"""
Loads split from the MNIST dataset and returns data.
"""
# download the MNIST dataset:
mnist = download_mnist_func(split)
# preprocess the MNIST dataset:
context = mnist.data.float().div_(255.0)
context = context.view(context.size(0), -1)
# apply PCA:
if pca is not None:
context -= torch.mean(context, dim=0, keepdim=True)
context = context.matmul(pca)
context /= torch.norm(context, dim=1, keepdim=True)
# compute rewards (based on clustering if clusters defined, 0-1 otherwise):
if clusters is not None:
assert clusters.size(1) == context.size(
1
), "cluster dimensionality does not match data dimensionality"
rewards = examples.util.kmeans_inference(
context, clusters, hard=False, bandwidth=bandwidth
)
else:
rewards = examples.util.onehot(mnist.targets.long())
# return data:
return context, rewards
def load_data_sampler(
split="train",
pca=None,
clusters=None,
bandwidth=1.0,
permfile=None,
download_mnist_func=download_mnist,
):
"""
Loads split from the MNIST dataset and returns sampler.
"""
# load dataset:
context, rewards = load_data(
split=split,
pca=pca,
clusters=clusters,
bandwidth=bandwidth,
download_mnist_func=download_mnist_func,
)
if permfile is not None:
perm = torch.load(permfile)
assert perm.shape[0] == context.shape[0], "Incorrect perm size for context."
else:
perm = torch.randperm(context.size(0))
# define simple dataset sampler:
def sampler():
idx = 0
while idx < context.size(0):
yield {"context": context[perm[idx], :], "rewards": rewards[perm[idx], :]}
idx += 1
# return sampler:
return sampler
def parse_args(hostname):
"""
Parse input arguments.
"""
parser = argparse.ArgumentParser(
description="Train contextual bandit model using encrypted learning signal"
)
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--plaintext", action="store_true", help="use a non-private algorithm"
)
parser.add_argument(
"--backend", default="mpc", type=str, help="crypten backend: mpc (default)"
)
parser.add_argument(
"--mnist-split",
default="train",
type=str,
help="The split from the MNIST dataset (default = train)",
)
parser.add_argument(
"--mnist-dir",
default=None,
type=str,
help="path to the dir of MNIST raw data files",
)
parser.add_argument(
"--learner",
default="epsilon_greedy",
type=str,
help="learning algorithm: epsilon_greedy or linucb",
)
parser.add_argument(
"--epsilon",
default=0.01,
type=float,
help="exploration parameter (default = 0.01)",
)
parser.add_argument(
"--visualize", action="store_true", help="visualize results with visdom"
)
parser.add_argument(
"--visdom",
default=hostname,
type=str,
help="visdom server to use (default = %s)" % hostname,
)
parser.add_argument(
"--pca", default=20, type=int, help="Number of PCA dimensions (0 for raw data)"
)
parser.add_argument(
"--precision",
default=20,
type=int,
help="Bits of precision for encoding floats.",
)
parser.add_argument(
"--nr_iters",
default=7,
type=int,
help="Newton-Rhapson iterations for mpc reciprocal",
)
parser.add_argument(
"--number_arms",
default=None,
type=int,
help="create arbitrary number of arms via k-means",
)
parser.add_argument(
"--bandwidth",
default=1.0,
type=float,
help="bandwidth of kernel used to assign rewards",
)
parser.add_argument(
"--memoize_folder",
default="/tmp/kmeans",
type=str,
help="folder to save k-means clusters",
)
parser.add_argument(
"--checkpoint_folder",
default=None,
type=str,
help="folder in which to checkpoint models",
)
parser.add_argument(
"--checkpoint_every",
default=1000,
type=int,
help="checkpoint every K iterations",
)
parser.add_argument(
"--permfile", default=None, type=str, help="file with sampling permutation"
)
parser.add_argument("--seed", default=None, type=int, help="Seed the torch rng")
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
return parser.parse_args()
def get_monitor_func(args, buffers, visualizer, window, title, progress_iter):
"""
Return closure that performs monitoring.
"""
def monitor_func(idx, reward, total_reward, iter_time, finished=False):
def mean(vals):
return torch.DoubleTensor(vals).mean().item()
# flush buffers:
if finished:
for key, val in buffers.items():
buffers[key] = [item for item in val if item is not None]
if finished or (idx > 0 and idx % progress_iter == 0):
logging.info(
"Sample %s; average reward = %2.5f, time %.3f (sec/iter) "
% (idx, mean(buffers["reward"]), mean(buffers["iter_time"]))
)
if args.visualize:
window[0] = learning_curve(
visualizer,
torch.tensor(buffers["idx"], dtype=torch.long),
torch.DoubleTensor(buffers["cumulative_reward"]),
window=window[0],
title=title,
)
for key in buffers.keys():
buffers[key] = [None] * progress_iter
# fill buffers:
if idx is not None:
cur_idx = idx % progress_iter
buffers["idx"][cur_idx] = idx
buffers["reward"][cur_idx] = reward
buffers["cumulative_reward"][cur_idx] = total_reward
buffers["iter_time"][cur_idx] = iter_time
return monitor_func
def get_checkpoint_func(args):
"""
Return closure that performs checkpointing.
"""
def checkpoint_func(idx, model):
if "RANK" not in os.environ or os.environ["RANK"] == 0:
if args.checkpoint_folder is not None:
checkpoint_file = os.path.join(
args.checkpoint_folder, "iter_%05d.torch" % idx
)
torch.save(model, checkpoint_file)
return checkpoint_func
def build_learner(args, bandits, download_mnist):
# set up loggers:
logger = logging.getLogger()
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logger.setLevel(level)
visualizer = visdom.Visdom(args.visdom) if args.visualize else None
# allow comparisons between plain and private algorithm:
if args.seed is not None:
torch.manual_seed(args.seed)
random.seed(args.seed)
if args.plaintext:
logging.info("Using plain text bandit")
kwargs = {"dtype": torch.double, "device": "cpu"}
else:
logging.info(f"Using encrypted bandit with {args.backend}")
kwargs = {
"backend": args.backend,
"precision": args.precision,
"nr_iters": args.nr_iters,
}
# set up variables for progress monitoring:
window = [None]
title = "Cumulative reward (encrypted %s, epsilon = %2.2f)" % (
args.learner,
args.epsilon,
)
progress_iter = 100
buffers = {
key: [None] * progress_iter
for key in ["idx", "reward", "cumulative_reward", "iter_time"]
}
# closures that perform progress monitoring and checkpointing:
monitor_func = get_monitor_func(
args, buffers, visualizer, window, title, progress_iter
)
checkpoint_func = get_checkpoint_func(args)
# compute pca:
context, _ = load_data(
split=args.mnist_split, pca=None, download_mnist_func=download_mnist
)
pca = examples.util.pca(context, args.pca)
# create or load clustering if custom number of arms is used:
clusters = None
if args.number_arms is not None:
clusters_file = "clusters_K=%d_pca=%d.torch" % (args.number_arms, args.pca)
clusters_file = os.path.join(args.memoize_folder, clusters_file)
# load precomputed clusters from file:
if os.path.exists(clusters_file):
logging.info("Loading clusters from file...")
clusters = torch.load(clusters_file)
else:
# load data and allocate clusters:
context, _ = load_data(
split=args.mnist_split, pca=pca, download_mnist_func=download_mnist
)
clusters = context.new((args.number_arms, context.size(1)))
# run clustering in process 0:
if (
not torch.distributed.is_initialized()
or torch.distributed.get_rank() == 0
):
logging.info("Performing clustering to get arms...")
clusters = examples.util.kmeans(context, args.number_arms)
torch.save(clusters, clusters_file)
# if run is distributed, synchronize clusters:
if torch.distributed.is_initialized():
torch.distributed.barrier()
torch.distributed.broadcast(clusters, 0)
# run contextual bandit algorithm on MNIST:
sampler = load_data_sampler(
split=args.mnist_split,
pca=pca,
clusters=clusters,
bandwidth=args.bandwidth,
permfile=args.permfile,
download_mnist_func=download_mnist,
)
assert hasattr(bandits, args.learner), "unknown learner: %s" % args.learner
def learner_func():
getattr(bandits, args.learner)(
sampler,
epsilon=args.epsilon,
monitor_func=monitor_func,
checkpoint_func=checkpoint_func,
checkpoint_every=args.checkpoint_every,
**kwargs,
)
return learner_func
def _run_experiment(args):
if args.plaintext:
import plain_contextual_bandits as bandits
else:
import private_contextual_bandits as bandits
learner_func = build_learner(args, bandits, download_mnist)
import crypten
crypten.init()
learner_func()
def main(run_experiment):
"""
Runs encrypted contextual bandits learning experiment on MNIST.
"""
# parse input arguments:
args = parse_args(os.environ.get("HOSTNAME", "localhost"))
if args.mnist_dir is not None:
process_mnist_files(args.mnist_dir, "/tmp/MNIST/processed")
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
# run all the things:
if __name__ == "__main__":
main(_run_experiment)
|
CrypTen-main
|
examples/bandits/launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import os
import random
import shutil
import tempfile
import time
import warnings
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
from examples.meters import AverageMeter
from examples.util import NoopContextManager, process_mnist_files
from torchvision import datasets, transforms
def run_tfe_benchmarks(
network="B",
epochs=5,
start_epoch=0,
batch_size=256,
lr=0.01,
momentum=0.9,
weight_decay=1e-6,
print_freq=10,
resume="",
evaluate=True,
seed=None,
skip_plaintext=False,
save_checkpoint_dir="/tmp/tfe_benchmarks",
save_modelbest_dir="/tmp/tfe_benchmarks_best",
context_manager=None,
mnist_dir=None,
):
crypten.init()
if seed is not None:
random.seed(seed)
torch.manual_seed(seed)
# create model
model = create_benchmark_model(network)
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(
model.parameters(), lr, momentum=momentum, weight_decay=weight_decay
)
# optionally resume from a checkpoint
best_prec1 = 0
if resume:
if os.path.isfile(resume):
logging.info("=> loading checkpoint '{}'".format(resume))
checkpoint = torch.load(resume)
start_epoch = checkpoint["epoch"]
best_prec1 = checkpoint["best_prec1"]
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
logging.info(
"=> loaded checkpoint '{}' (epoch {})".format(
resume, checkpoint["epoch"]
)
)
else:
logging.info("=> no checkpoint found at '{}'".format(resume))
# Loading MNIST. Normalizing per pytorch/examples/blob/master/mnist/main.py
def preprocess_data(context_manager, data_dirname):
if mnist_dir is not None:
process_mnist_files(
mnist_dir, os.path.join(data_dirname, "MNIST", "processed")
)
download = False
else:
download = True
with context_manager:
if not evaluate:
mnist_train = datasets.MNIST(
data_dirname,
download=download,
train=True,
transform=transforms.Compose(
[
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,)),
]
),
)
mnist_test = datasets.MNIST(
data_dirname,
download=download,
train=False,
transform=transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.1307,), (0.3081,))]
),
)
train_loader = (
torch.utils.data.DataLoader(
mnist_train, batch_size=batch_size, shuffle=True
)
if not evaluate
else None
)
test_loader = torch.utils.data.DataLoader(
mnist_test, batch_size=batch_size, shuffle=False
)
return train_loader, test_loader
if context_manager is None:
context_manager = NoopContextManager()
warnings.filterwarnings("ignore")
data_dir = tempfile.TemporaryDirectory()
train_loader, val_loader = preprocess_data(context_manager, data_dir.name)
flatten = False
if network == "A":
flatten = True
if evaluate:
if not skip_plaintext:
logging.info("===== Evaluating plaintext benchmark network =====")
validate(val_loader, model, criterion, print_freq, flatten=flatten)
private_model = create_private_benchmark_model(model, flatten=flatten)
logging.info("===== Evaluating Private benchmark network =====")
validate(val_loader, private_model, criterion, print_freq, flatten=flatten)
# validate_side_by_side(val_loader, model, private_model, flatten=flatten)
return
os.makedirs(save_checkpoint_dir, exist_ok=True)
os.makedirs(save_modelbest_dir, exist_ok=True)
for epoch in range(start_epoch, epochs):
adjust_learning_rate(optimizer, epoch, lr)
# train for one epoch
train(
train_loader,
model,
criterion,
optimizer,
epoch,
print_freq,
flatten=flatten,
)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, print_freq, flatten=flatten)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
checkpoint_file = "checkpoint_bn" + network + ".pth.tar"
model_best_file = "model_best_bn" + network + ".pth.tar"
save_checkpoint(
{
"epoch": epoch + 1,
"arch": "Benchmark" + network,
"state_dict": model.state_dict(),
"best_prec1": best_prec1,
"optimizer": optimizer.state_dict(),
},
is_best,
filename=os.path.join(save_checkpoint_dir, checkpoint_file),
model_best=os.path.join(save_modelbest_dir, model_best_file),
)
data_dir.cleanup()
shutil.rmtree(save_checkpoint_dir)
def train(
train_loader, model, criterion, optimizer, epoch, print_freq=10, flatten=False
):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
output = model(input)
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if i % print_freq == 0:
logging.info(
"Epoch: [{}][{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f})\t"
"Prec@5 {:.3f} ({:.3f})".format(
epoch,
i,
len(train_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
def validate_side_by_side(val_loader, plaintext_model, private_model, flatten=False):
# switch to evaluate mode
plaintext_model.eval()
private_model.eval()
with torch.no_grad():
for i, (input, target) in enumerate(val_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
output0 = plaintext_model(input)
encr_input = crypten.cryptensor(input)
output1 = private_model(encr_input)
logging.info("==============================")
logging.info("Example %d\t target = %d" % (i, target))
logging.info("Plaintext:\n%s" % output0)
logging.info("Encrypted:\n%s\n" % output1.get_plain_text())
if i > 1000:
break
def validate(val_loader, model, criterion, print_freq=10, flatten=False):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
with torch.no_grad():
end = time.time()
for i, (input, target) in enumerate(val_loader):
# compute output
if flatten:
input = input.view(input.size(0), -1)
if isinstance(model, crypten.nn.Module) and not crypten.is_encrypted_tensor(
input
):
input = crypten.cryptensor(input)
output = model(input)
if crypten.is_encrypted_tensor(output):
output = output.get_plain_text()
loss = criterion(output, target)
# measure accuracy and record loss
prec1, prec5 = accuracy(output, target, topk=(1, 5))
losses.add(loss.item(), input.size(0))
top1.add(prec1[0], input.size(0))
top5.add(prec5[0], input.size(0))
# measure elapsed time
current_batch_time = time.time() - end
batch_time.add(current_batch_time)
end = time.time()
if (i + 1) % print_freq == 0:
logging.info(
"\nTest: [{}/{}]\t"
"Time {:.3f} ({:.3f})\t"
"Loss {:.4f} ({:.4f})\t"
"Prec@1 {:.3f} ({:.3f}) \t"
"Prec@5 {:.3f} ({:.3f})".format(
i + 1,
len(val_loader),
current_batch_time,
batch_time.value(),
loss.item(),
losses.value(),
prec1[0],
top1.value(),
prec5[0],
top5.value(),
)
)
if i > 100:
break
logging.info(
" * Prec@1 {:.3f} Prec@5 {:.3f}".format(top1.value(), top5.value())
)
return top1.value()
def save_checkpoint(
state, is_best, filename="checkpoint.pth.tar", model_best="model_best.pth.tar"
):
# TODO: use crypten.save_from_party() in future.
rank = comm.get().get_rank()
# only save for process rank = 0
if rank == 0:
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, model_best)
def adjust_learning_rate(optimizer, epoch, lr=0.01):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
new_lr = lr * (0.1 ** (epoch // 5))
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].flatten().float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def create_benchmark_model(benchmark):
if benchmark == "A":
return NetworkA()
elif benchmark == "B":
return NetworkB()
elif benchmark == "C":
return NetworkC()
else:
raise RuntimeError("Invalid benchmark network")
def create_private_benchmark_model(model, flatten=False):
dummy_input = torch.empty((1, 1, 28, 28))
if flatten:
dummy_input = torch.empty((1, 28 * 28))
private_model = crypten.nn.from_pytorch(model, dummy_input)
private_model.encrypt()
return private_model
class NetworkA(nn.Module):
def __init__(self):
super(NetworkA, self).__init__()
self.fc1 = nn.Linear(784, 128)
self.fc2 = nn.Linear(128, 128)
self.fc3 = nn.Linear(128, 10)
self.batchnorm1 = nn.BatchNorm1d(128)
self.batchnorm2 = nn.BatchNorm1d(128)
def forward(self, x):
out = self.fc1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = self.fc2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = self.fc3(out)
return out
class NetworkB(nn.Module):
def __init__(self):
super(NetworkB, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.conv2 = nn.Conv2d(16, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 4 * 4, 100)
self.fc2 = nn.Linear(100, 10)
self.batchnorm1 = nn.BatchNorm2d(16)
self.batchnorm2 = nn.BatchNorm2d(16)
self.batchnorm3 = nn.BatchNorm1d(100)
def forward(self, x):
out = self.conv1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = self.conv2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = out.view(-1, 16 * 4 * 4)
out = self.fc1(out)
out = self.batchnorm3(out)
out = F.relu(out)
out = self.fc2(out)
return out
class NetworkC(nn.Module):
def __init__(self):
super(NetworkC, self).__init__()
self.conv1 = nn.Conv2d(1, 20, kernel_size=5, padding=0)
self.conv2 = nn.Conv2d(20, 50, kernel_size=5, padding=0)
self.fc1 = nn.Linear(50 * 4 * 4, 500)
self.fc2 = nn.Linear(500, 10)
self.batchnorm1 = nn.BatchNorm2d(20)
self.batchnorm2 = nn.BatchNorm2d(50)
self.batchnorm3 = nn.BatchNorm1d(500)
def forward(self, x):
out = self.conv1(x)
out = self.batchnorm1(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = self.conv2(out)
out = self.batchnorm2(out)
out = F.relu(out)
out = F.avg_pool2d(out, 2)
out = out.view(-1, 50 * 4 * 4)
out = self.fc1(out)
out = self.batchnorm3(out)
out = F.relu(out)
out = self.fc2(out)
return out
|
CrypTen-main
|
examples/tfe_benchmarks/tfe_benchmarks.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run tfe_benchmarks example in multiprocess mode:
$ python3 examples/tfe_benchmarks/launcher.py --multiprocess
To run tfe_benchmarks example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/tfe_benchmarks/tfe_benchmarks.py \
examples/tfe_benchmarks/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen TFEncrypted Benchmarks")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--network",
default="B",
type=str,
help="choose from networks A, B and C (default: B)",
)
parser.add_argument(
"--epochs", default=5, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--start-epoch",
default=0,
type=int,
metavar="N",
help="manual epoch number (useful on restarts)",
)
parser.add_argument(
"-b",
"--batch-size",
default=256,
type=int,
metavar="N",
help="mini-batch size (default: 256)",
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.01,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument("--momentum", default=0.9, type=float, metavar="M", help="momentum")
parser.add_argument(
"--weight-decay",
"--wd",
default=1e-6,
type=float,
metavar="W",
help="weight decay (default: 1e-4)",
)
parser.add_argument(
"--print-freq",
"-p",
default=10,
type=int,
metavar="N",
help="print frequency (default: 10)",
)
parser.add_argument(
"--resume",
default="",
type=str,
metavar="PATH",
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--save-checkpoint-dir",
default="/tmp/tfe_benchmarks",
type=str,
metavar="SAVE",
help="path to the dir to save checkpoint (default: /tmp/tfe_benchmarks)",
)
parser.add_argument(
"--save-modelbest-dir",
default="/tmp/tfe_benchmarks_best",
type=str,
metavar="SAVE",
help="path to the dir to save the best model (default: /tmp/tfe_benchmarks_best)",
)
parser.add_argument(
"-e",
"--evaluate",
dest="evaluate",
action="store_true",
help="evaluate model on validation set",
)
parser.add_argument(
"--seed", default=None, type=int, help="seed for initializing training. "
)
parser.add_argument("--lr-decay", default=0.1, type=float, help="lr decay factor")
parser.add_argument(
"--skip-plaintext",
default=False,
action="store_true",
help="Skip validation for plaintext network",
)
parser.add_argument(
"--mnist-dir",
default=None,
type=str,
metavar="MNIST",
help="path to the dir of MNIST raw data files",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
# only import here to initialize crypten within the subprocesses
from tfe_benchmarks import run_tfe_benchmarks
# Only Rank 0 will display logs.
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
run_tfe_benchmarks(
args.network,
args.epochs,
args.start_epoch,
args.batch_size,
args.lr,
args.momentum,
args.weight_decay,
args.print_freq,
args.resume,
args.evaluate,
args.seed,
args.skip_plaintext,
os.path.join(args.save_checkpoint_dir, os.environ.get("RANK", "")),
os.path.join(args.save_modelbest_dir, os.environ.get("RANK", "")),
mnist_dir=args.mnist_dir,
)
def main(run_experiment):
args = parser.parse_args()
os.makedirs(args.save_checkpoint_dir, exist_ok=True)
os.makedirs(args.save_modelbest_dir, exist_ok=True)
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
|
CrypTen-main
|
examples/tfe_benchmarks/launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import tempfile
import crypten
import torch
import torchvision.datasets as datasets
import torchvision.models as models
import torchvision.transforms as transforms
from examples.meters import AccuracyMeter
from examples.util import NoopContextManager
try:
from crypten.nn.tensorboard import SummaryWriter
except ImportError: # tensorboard not installed
SummaryWriter = None
def run_experiment(
model_name,
imagenet_folder=None,
tensorboard_folder="/tmp",
num_samples=None,
context_manager=None,
):
"""Runs inference using specified vision model on specified dataset."""
crypten.init()
# check inputs:
assert hasattr(models, model_name), (
"torchvision does not provide %s model" % model_name
)
if imagenet_folder is None:
imagenet_folder = tempfile.gettempdir()
download = True
else:
download = False
if context_manager is None:
context_manager = NoopContextManager()
# load dataset and model:
with context_manager:
model = getattr(models, model_name)(pretrained=True)
model.eval()
dataset = datasets.ImageNet(imagenet_folder, split="val", download=download)
# define appropriate transforms:
transform = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
]
)
to_tensor_transform = transforms.ToTensor()
# encrypt model:
dummy_input = to_tensor_transform(dataset[0][0])
dummy_input.unsqueeze_(0)
encrypted_model = crypten.nn.from_pytorch(model, dummy_input=dummy_input)
encrypted_model.encrypt()
# show encrypted model in tensorboard:
if SummaryWriter is not None:
writer = SummaryWriter(log_dir=tensorboard_folder)
writer.add_graph(encrypted_model)
writer.close()
# loop over dataset:
meter = AccuracyMeter()
for idx, sample in enumerate(dataset):
# preprocess sample:
image, target = sample
image = transform(image)
image.unsqueeze_(0)
target = torch.tensor([target], dtype=torch.long)
# perform inference using encrypted model on encrypted sample:
encrypted_image = crypten.cryptensor(image)
encrypted_output = encrypted_model(encrypted_image)
# measure accuracy of prediction
output = encrypted_output.get_plain_text()
meter.add(output, target)
# progress:
logging.info(
"[sample %d of %d] Accuracy: %f" % (idx + 1, len(dataset), meter.value()[1])
)
if num_samples is not None and idx == num_samples - 1:
break
# print final accuracy:
logging.info("Accuracy on all %d samples: %f" % (len(dataset), meter.value()[1]))
|
CrypTen-main
|
examples/mpc_imagenet/mpc_imagenet.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run tfe_benchmarks example in multiprocess mode:
$ python3 examples/mpc_imagenet/launcher.py --multiprocess
To run tfe_benchmarks example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_imagenet/mpc_imagenet.py \
examples/mpc_imagenet/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
from mpc_imagenet import run_experiment
# input arguments:
parser = argparse.ArgumentParser(description="Encrypted inference of vision models")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--model",
default="resnet18",
type=str,
help="torchvision model to use for inference (default: resnet18)",
)
parser.add_argument(
"--imagenet_folder",
default=None,
type=str,
help="folder containing the ImageNet dataset",
)
parser.add_argument(
"--tensorboard_folder",
default="/tmp",
type=str,
help="folder in which tensorboard performs logging (default: /tmp)",
)
parser.add_argument(
"--num_samples",
default=None,
type=int,
help="number of samples to test on (default: all)",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
# only worker with rank 0 will display logging information:
level = logging.INFO
rank = "0"
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
rank = os.environ["RANK"]
logging.getLogger().setLevel(level)
tensorboard_folder = "/tmp/mpc_imagenet/" + rank
os.makedirs(tensorboard_folder, exist_ok=True)
run_experiment(
args.model,
imagenet_folder=args.imagenet_folder,
tensorboard_folder=tensorboard_folder,
num_samples=args.num_samples,
)
def main():
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, _run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
_run_experiment(args)
if __name__ == "__main__":
main()
|
CrypTen-main
|
examples/mpc_imagenet/launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_linear_svm example in multiprocess mode:
$ python3 examples/mpc_linear_svm/launcher.py --multiprocess
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_linear_svm/mpc_linear_svm.py \
examples/mpc_linear_svm/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Linear SVM Training")
parser.add_argument(
"--world_size",
type=int,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=50, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--examples", default=50, type=int, metavar="N", help="number of examples per epoch"
)
parser.add_argument(
"--features",
default=100,
type=int,
metavar="N",
help="number of features per example",
)
parser.add_argument(
"--lr", "--learning-rate", default=0.5, type=float, help="initial learning rate"
)
parser.add_argument(
"--skip_plaintext",
default=False,
action="store_true",
help="skip evaluation for plaintext svm",
)
parser.add_argument(
"--multiprocess",
default=False,
action="store_true",
help="Run example in multiprocess mode",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_linear_svm import run_mpc_linear_svm
run_mpc_linear_svm(
args.epochs, args.examples, args.features, args.lr, args.skip_plaintext
)
def main(run_experiment):
args = parser.parse_args()
if args.multiprocess:
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
else:
run_experiment(args)
if __name__ == "__main__":
main(_run_experiment)
|
CrypTen-main
|
examples/mpc_linear_svm/launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
import time
import crypten
import torch
from examples.meters import AverageMeter
def train_linear_svm(features, labels, epochs=50, lr=0.5, print_time=False):
# Initialize random weights
w = features.new(torch.randn(1, features.size(0)))
b = features.new(torch.randn(1))
if print_time:
pt_time = AverageMeter()
end = time.time()
for epoch in range(epochs):
# Forward
label_predictions = w.matmul(features).add(b).sign()
# Compute accuracy
correct = label_predictions.mul(labels)
accuracy = correct.add(1).div(2).mean()
if crypten.is_encrypted_tensor(accuracy):
accuracy = accuracy.get_plain_text()
# Print Accuracy once
if crypten.communicator.get().get_rank() == 0:
print(
f"Epoch {epoch} --- Training Accuracy %.2f%%" % (accuracy.item() * 100)
)
# Backward
loss_grad = -labels * (1 - correct) * 0.5 # Hinge loss
b_grad = loss_grad.mean()
w_grad = loss_grad.matmul(features.t()).div(loss_grad.size(1))
# Update
w -= w_grad * lr
b -= b_grad * lr
if print_time:
iter_time = time.time() - end
pt_time.add(iter_time)
logging.info(" Time %.6f (%.6f)" % (iter_time, pt_time.value()))
end = time.time()
return w, b
def evaluate_linear_svm(features, labels, w, b):
"""Compute accuracy on a test set"""
predictions = w.matmul(features).add(b).sign()
correct = predictions.mul(labels)
accuracy = correct.add(1).div(2).mean().get_plain_text()
if crypten.communicator.get().get_rank() == 0:
print("Test accuracy %.2f%%" % (accuracy.item() * 100))
def run_mpc_linear_svm(
epochs=50, examples=50, features=100, lr=0.5, skip_plaintext=False
):
crypten.init()
# Set random seed for reproducibility
torch.manual_seed(1)
# Initialize x, y, w, b
x = torch.randn(features, examples)
w_true = torch.randn(1, features)
b_true = torch.randn(1)
y = w_true.matmul(x) + b_true
y = y.sign()
if not skip_plaintext:
logging.info("==================")
logging.info("PyTorch Training")
logging.info("==================")
w_torch, b_torch = train_linear_svm(x, y, lr=lr, print_time=True)
# Encrypt features / labels
x = crypten.cryptensor(x)
y = crypten.cryptensor(y)
logging.info("==================")
logging.info("CrypTen Training")
logging.info("==================")
w, b = train_linear_svm(x, y, lr=lr, print_time=True)
if not skip_plaintext:
logging.info("PyTorch Weights :")
logging.info(w_torch)
logging.info("CrypTen Weights:")
logging.info(w.get_plain_text())
if not skip_plaintext:
logging.info("PyTorch Bias :")
logging.info(b_torch)
logging.info("CrypTen Bias:")
logging.info(b.get_plain_text())
|
CrypTen-main
|
examples/mpc_linear_svm/mpc_linear_svm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import tempfile
import crypten
import crypten.communicator as comm
import torch
import torch.nn as nn
import torch.nn.functional as F
from examples.util import NoopContextManager
from torchvision import datasets, transforms
def run_mpc_autograd_cnn(
context_manager=None,
num_epochs=3,
learning_rate=0.001,
batch_size=5,
print_freq=5,
num_samples=100,
):
"""
Args:
context_manager: used for setting proxy settings during download.
"""
crypten.init()
data_alice, data_bob, train_labels = preprocess_mnist(context_manager)
rank = comm.get().get_rank()
# assumes at least two parties exist
# broadcast dummy data with same shape to remaining parties
if rank == 0:
x_alice = data_alice
else:
x_alice = torch.empty(data_alice.size())
if rank == 1:
x_bob = data_bob
else:
x_bob = torch.empty(data_bob.size())
# encrypt
x_alice_enc = crypten.cryptensor(x_alice, src=0)
x_bob_enc = crypten.cryptensor(x_bob, src=1)
# combine feature sets
x_combined_enc = crypten.cat([x_alice_enc, x_bob_enc], dim=2)
x_combined_enc = x_combined_enc.unsqueeze(1)
# reduce training set to num_samples
x_reduced = x_combined_enc[:num_samples]
y_reduced = train_labels[:num_samples]
# encrypt plaintext model
model_plaintext = CNN()
dummy_input = torch.empty((1, 1, 28, 28))
model = crypten.nn.from_pytorch(model_plaintext, dummy_input)
model.train()
model.encrypt()
# encrypted training
train_encrypted(
x_reduced, y_reduced, model, num_epochs, learning_rate, batch_size, print_freq
)
def train_encrypted(
x_encrypted,
y_encrypted,
encrypted_model,
num_epochs,
learning_rate,
batch_size,
print_freq,
):
rank = comm.get().get_rank()
loss = crypten.nn.MSELoss()
num_samples = x_encrypted.size(0)
label_eye = torch.eye(2)
for epoch in range(num_epochs):
last_progress_logged = 0
# only print from rank 0 to avoid duplicates for readability
if rank == 0:
print(f"Epoch {epoch} in progress:")
for j in range(0, num_samples, batch_size):
# define the start and end of the training mini-batch
start, end = j, min(j + batch_size, num_samples)
# switch on autograd for training examples
x_train = x_encrypted[start:end]
x_train.requires_grad = True
y_one_hot = label_eye[y_encrypted[start:end]]
y_train = crypten.cryptensor(y_one_hot, requires_grad=True)
# perform forward pass:
output = encrypted_model(x_train)
loss_value = loss(output, y_train)
# backprop
encrypted_model.zero_grad()
loss_value.backward()
encrypted_model.update_parameters(learning_rate)
# log progress
if j + batch_size - last_progress_logged >= print_freq:
last_progress_logged += print_freq
print(f"Loss {loss_value.get_plain_text().item():.4f}")
# compute accuracy every epoch
pred = output.get_plain_text().argmax(1)
correct = pred.eq(y_encrypted[start:end])
correct_count = correct.sum(0, keepdim=True).float()
accuracy = correct_count.mul_(100.0 / output.size(0))
loss_plaintext = loss_value.get_plain_text().item()
print(
f"Epoch {epoch} completed: "
f"Loss {loss_plaintext:.4f} Accuracy {accuracy.item():.2f}"
)
def preprocess_mnist(context_manager):
if context_manager is None:
context_manager = NoopContextManager()
with context_manager:
# each party gets a unique temp directory
with tempfile.TemporaryDirectory() as data_dir:
mnist_train = datasets.MNIST(data_dir, download=True, train=True)
mnist_test = datasets.MNIST(data_dir, download=True, train=False)
# modify labels so all non-zero digits have class label 1
mnist_train.targets[mnist_train.targets != 0] = 1
mnist_test.targets[mnist_test.targets != 0] = 1
mnist_train.targets[mnist_train.targets == 0] = 0
mnist_test.targets[mnist_test.targets == 0] = 0
# compute normalization factors
data_all = torch.cat([mnist_train.data, mnist_test.data]).float()
data_mean, data_std = data_all.mean(), data_all.std()
tensor_mean, tensor_std = data_mean.unsqueeze(0), data_std.unsqueeze(0)
# normalize data
data_train_norm = transforms.functional.normalize(
mnist_train.data.float(), tensor_mean, tensor_std
)
# partition features between Alice and Bob
data_alice = data_train_norm[:, :, :20]
data_bob = data_train_norm[:, :, 20:]
train_labels = mnist_train.targets
return data_alice, data_bob, train_labels
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.conv1 = nn.Conv2d(1, 16, kernel_size=5, padding=0)
self.fc1 = nn.Linear(16 * 12 * 12, 100)
self.fc2 = nn.Linear(100, 2)
def forward(self, x):
out = self.conv1(x)
out = F.relu(out)
out = F.max_pool2d(out, 2)
out = out.view(-1, 16 * 12 * 12)
out = self.fc1(out)
out = F.relu(out)
out = self.fc2(out)
return out
|
CrypTen-main
|
examples/mpc_autograd_cnn/mpc_autograd_cnn.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
To run mpc_autograd_cnn example:
$ python examples/mpc_autograd_cnn/launcher.py
To run mpc_linear_svm example on AWS EC2 instances:
$ python3 scripts/aws_launcher.py \
--ssh_key_file=$HOME/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=examples/mpc_autograd_cnn/mpc_autograd_cnn.py \
examples/mpc_autograd_cnn/launcher.py
"""
import argparse
import logging
import os
from examples.multiprocess_launcher import MultiProcessLauncher
parser = argparse.ArgumentParser(description="CrypTen Autograd CNN Training")
def validate_world_size(world_size):
world_size = int(world_size)
if world_size < 2:
raise argparse.ArgumentTypeError(f"world_size {world_size} must be > 1")
return world_size
parser.add_argument(
"--world_size",
type=validate_world_size,
default=2,
help="The number of parties to launch. Each party acts as its own process",
)
parser.add_argument(
"--epochs", default=3, type=int, metavar="N", help="number of total epochs to run"
)
parser.add_argument(
"--lr",
"--learning-rate",
default=0.01,
type=float,
metavar="LR",
help="initial learning rate",
)
parser.add_argument(
"-b",
"--batch-size",
default=5,
type=int,
metavar="N",
help="mini-batch size (default: 5)",
)
parser.add_argument(
"--print-freq",
"-p",
default=5,
type=int,
metavar="PF",
help="print frequency (default: 5)",
)
parser.add_argument(
"--num-samples",
"-n",
default=100,
type=int,
metavar="N",
help="num of samples used for training (default: 100)",
)
def _run_experiment(args):
level = logging.INFO
if "RANK" in os.environ and os.environ["RANK"] != "0":
level = logging.CRITICAL
logging.getLogger().setLevel(level)
logging.basicConfig(
level=level,
format="%(asctime)s - %(process)d - %(name)s - %(levelname)s - %(message)s",
)
from mpc_autograd_cnn import run_mpc_autograd_cnn
run_mpc_autograd_cnn(
num_epochs=args.epochs,
learning_rate=args.lr,
batch_size=args.batch_size,
print_freq=args.print_freq,
num_samples=args.num_samples,
)
def main(run_experiment):
args = parser.parse_args()
# run multiprocess by default
launcher = MultiProcessLauncher(args.world_size, run_experiment, args)
launcher.start()
launcher.join()
launcher.terminate()
if __name__ == "__main__":
main(_run_experiment)
|
CrypTen-main
|
examples/mpc_autograd_cnn/launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Generate function and model benchmarks
To Run:
$ python benchmark.py
# Only function benchmarks
$ python benchmark.py --only-functions
$ python benchmark.py --only-functions --world-size 2
# Benchmark functions and all models
$ python benchmark.py --advanced-models
# Run benchmarks on GPU
$ python benchmark.py --device cuda
$ python benchmark.py --device cuda --world-size 2
# Run benchmarks on different GPUs for each party
$ python benchmark.py --world-size=2 --multi-gpu
# Save benchmarks to csv
$ python benchmark.py -p ~/Downloads/
"""
import argparse
import functools
import os
import timeit
from collections import namedtuple
import crypten
import crypten.communicator as comm
import numpy as np
import pandas as pd
import torch
from examples import multiprocess_launcher
try:
from . import data, models
except ImportError:
# direct import if relative fails
import data
import models
Runtime = namedtuple("Runtime", "mid q1 q3")
def time_me(func=None, n_loops=10):
"""Decorator returning average runtime in seconds over n_loops
Args:
func (function): invoked with given args / kwargs
n_loops (int): number of times to invoke function for timing
Returns: tuple of (time in seconds, inner quartile range, function return value).
"""
if func is None:
return functools.partial(time_me, n_loops=n_loops)
@functools.wraps(func)
def timing_wrapper(*args, **kwargs):
return_val = func(*args, **kwargs)
times = []
for _ in range(n_loops):
start = timeit.default_timer()
func(*args, **kwargs)
times.append(timeit.default_timer() - start)
mid_runtime = np.quantile(times, 0.5)
q1_runtime = np.quantile(times, 0.25)
q3_runtime = np.quantile(times, 0.75)
runtime = Runtime(mid_runtime, q1_runtime, q3_runtime)
return runtime, return_val
return timing_wrapper
class FuncBenchmarks:
"""Benchmarks runtime and error of crypten functions against PyTorch
Args:
tensor_size (int or tuple): size of tensor for benchmarking runtimes
"""
BINARY = ["add", "sub", "mul", "matmul", "gt", "lt", "eq"]
UNARY = [
"sigmoid",
"relu",
"tanh",
"exp",
"log",
"reciprocal",
"cos",
"sin",
"sum",
"mean",
"neg",
]
LAYERS = ["conv2d"]
DOMAIN = torch.arange(start=0.01, end=100, step=0.01)
# for exponential, sin, and cos
TRUNCATED_DOMAIN = torch.arange(start=0.001, end=10, step=0.001)
def __init__(self, tensor_size=(100, 100), device="cpu"):
self.device = torch.device(device)
self.tensor_size = tensor_size
# dataframe for benchmarks
self.df = None
def __repr__(self):
if self.df is not None:
return self.df.to_string(index=False, justify="left")
return "No Function Benchmarks"
@staticmethod
@time_me
def time_func(x, func, y=None):
"""Invokes func as a method of x"""
if y is None:
return getattr(x, func)()
if func in {"conv1d", "conv2d"}:
if torch.is_tensor(x):
return getattr(torch.nn.functional, func)(x, y)
return getattr(x, func)(y)
return getattr(x, func)(y)
def get_runtimes(self):
"""Returns plain text and crypten runtimes"""
x, y = (
torch.rand(self.tensor_size, device=self.device),
torch.rand(self.tensor_size, device=self.device),
)
x_enc, y_enc = crypten.cryptensor(x), crypten.cryptensor(y)
runtimes, runtimes_enc = [], []
for func in FuncBenchmarks.UNARY + FuncBenchmarks.BINARY:
second_operand, second_operand_enc = None, None
if func in FuncBenchmarks.BINARY:
second_operand, second_operand_enc = y, y_enc
runtime, _ = FuncBenchmarks.time_func(x, func, y=second_operand)
runtimes.append(runtime)
runtime_enc, _ = FuncBenchmarks.time_func(x_enc, func, y=second_operand_enc)
runtimes_enc.append(runtime_enc)
# add layer runtimes
runtime_layers, runtime_layers_enc = self.get_layer_runtimes()
runtimes.extend(runtime_layers)
runtimes_enc.extend(runtime_layers_enc)
return runtimes, runtimes_enc
def get_layer_runtimes(self):
"""Returns runtimes for layers"""
runtime_layers, runtime_layers_enc = [], []
for layer in FuncBenchmarks.LAYERS:
if layer == "conv1d":
x, x_enc, y, y_enc = self.random_conv1d_inputs()
elif layer == "conv2d":
x, x_enc, y, y_enc = self.random_conv2d_inputs()
else:
raise ValueError(f"{layer} not supported")
runtime, _ = FuncBenchmarks.time_func(x, layer, y=y)
runtime_enc, _ = FuncBenchmarks.time_func(x_enc, layer, y=y_enc)
runtime_layers.append(runtime)
runtime_layers_enc.append(runtime_enc)
return runtime_layers, runtime_layers_enc
def random_conv2d_inputs(self):
"""Returns random input and weight tensors for 2d convolutions"""
filter_size = [size // 10 for size in self.tensor_size]
x_conv2d = torch.rand(1, 1, *self.tensor_size, device=self.device)
weight2d = torch.rand(1, 1, *filter_size, device=self.device)
x_conv2d_enc = crypten.cryptensor(x_conv2d)
weight2d_enc = crypten.cryptensor(weight2d)
return x_conv2d, x_conv2d_enc, weight2d, weight2d_enc
def random_conv1d_inputs(self):
"""Returns random input and weight tensors for 1d convolutions"""
size = self.tensor_size[0]
filter_size = size // 10
(x_conv1d,) = torch.rand(1, 1, size, device=self.device)
weight1d = torch.rand(1, 1, filter_size, device=self.device)
x_conv1d_enc = crypten.cryptensor(x_conv1d)
weight1d_enc = crypten.cryptensor(weight1d)
return x_conv1d, x_conv1d_enc, weight1d, weight1d_enc
@staticmethod
def calc_abs_error(ref, out):
"""Computes total absolute error"""
ref, out = ref.cpu(), out.cpu()
if ref.dtype == torch.bool:
errors = (out != ref).numpy().sum()
return errors
errors = torch.abs(out - ref).numpy()
return errors.sum()
@staticmethod
def calc_relative_error(ref, out):
"""Computes average relative error"""
ref, out = ref.cpu(), out.cpu()
if ref.dtype == torch.bool:
errors = (out != ref).numpy().sum() // ref.nelement()
return errors
errors = torch.abs((out - ref) / ref)
# remove inf due to division by tiny numbers
errors = errors[errors != float("inf")].numpy()
return errors.mean()
def call_function_on_domain(self, func):
"""Call plain text and CrypTen function on given function
Uses DOMAIN, TRUNCATED_DOMAIN, or appropriate layer inputs
Returns: tuple of (plain text result, encrypted result)
"""
DOMAIN, TRUNCATED_DOMAIN = (
FuncBenchmarks.DOMAIN,
FuncBenchmarks.TRUNCATED_DOMAIN,
)
if hasattr(DOMAIN, "to") and hasattr(TRUNCATED_DOMAIN, "to"):
DOMAIN, TRUNCATED_DOMAIN = (
DOMAIN.to(device=self.device),
TRUNCATED_DOMAIN.to(device=self.device),
)
y = torch.rand(DOMAIN.shape, device=self.device)
DOMAIN_enc, y_enc = crypten.cryptensor(DOMAIN), crypten.cryptensor(y)
TRUNCATED_DOMAIN_enc = crypten.cryptensor(TRUNCATED_DOMAIN)
if func in ["exp", "cos", "sin"]:
ref, out_enc = (
getattr(TRUNCATED_DOMAIN, func)(),
getattr(TRUNCATED_DOMAIN_enc, func)(),
)
elif func in FuncBenchmarks.UNARY:
ref, out_enc = getattr(DOMAIN, func)(), getattr(DOMAIN_enc, func)()
elif func in FuncBenchmarks.LAYERS:
ref, out_enc = self._call_layer(func)
elif func in FuncBenchmarks.BINARY:
ref, out_enc = (getattr(DOMAIN, func)(y), getattr(DOMAIN_enc, func)(y_enc))
else:
raise ValueError(f"{func} not supported")
return ref, out_enc
def get_errors(self):
"""Computes the total error of approximations"""
abs_errors, relative_errors = [], []
functions = FuncBenchmarks.UNARY + FuncBenchmarks.BINARY
functions += FuncBenchmarks.LAYERS
for func in functions:
ref, out_enc = self.call_function_on_domain(func)
out = out_enc.get_plain_text()
abs_error = FuncBenchmarks.calc_abs_error(ref, out)
abs_errors.append(abs_error)
relative_error = FuncBenchmarks.calc_relative_error(ref, out)
relative_errors.append(relative_error)
return abs_errors, relative_errors
def _call_layer(self, layer):
"""Call supported layers"""
if layer == "conv1d":
x, x_enc, y, y_enc = self.random_conv1d_inputs()
elif layer == "conv2d":
x, x_enc, y, y_enc = self.random_conv2d_inputs()
else:
raise ValueError(f"{layer} not supported")
ref = getattr(torch.nn.functional, layer)(x, y)
out_enc = getattr(x_enc, layer)(y_enc)
return ref, out_enc
def save(self, path):
if self.device.type == "cuda":
csv_path = os.path.join(path, "func_benchmarks_cuda.csv")
else:
csv_path = os.path.join(path, "func_benchmarks.csv")
self.df.to_csv(csv_path, index=False)
def get_results(self):
return self.df
def run(self):
"""Runs and stores benchmarks in self.df"""
runtimes, runtimes_enc = self.get_runtimes()
abs_errors, relative_errors = self.get_errors()
self.df = pd.DataFrame.from_dict(
{
"function": FuncBenchmarks.UNARY
+ FuncBenchmarks.BINARY
+ FuncBenchmarks.LAYERS,
"runtime": [r.mid for r in runtimes],
"runtime Q1": [r.q1 for r in runtimes],
"runtime Q3": [r.q3 for r in runtimes],
"runtime crypten": [r.mid for r in runtimes_enc],
"runtime crypten Q1": [r.q1 for r in runtimes_enc],
"runtime crypten Q3": [r.q3 for r in runtimes_enc],
"total abs error": abs_errors,
"average relative error": relative_errors,
}
)
class ModelBenchmarks:
"""Benchmarks runtime and accuracy of crypten models
Models are benchmarked on synthetically generated
Gaussian clusters for binary classification. Resnet18 is
benchmarks use image data.
Args:
n_samples (int): number of samples for Gaussian cluster model training
n_features (int): number of features for the Gaussian clusters.
epochs (int): number of training epochs
lr_rate (float): learning rate.
"""
def __init__(self, device="cpu", advanced_models=False):
self.device = torch.device(device)
self.df = None
self.models = models.MODELS
if not advanced_models:
self.remove_advanced_models()
def __repr__(self):
if self.df is not None:
return self.df.to_string(index=False, justify="left")
return "No Model Benchmarks"
def remove_advanced_models(self):
"""Removes advanced models from instance"""
self.models = list(filter(lambda x: not x.advanced, self.models))
@time_me(n_loops=3)
def train(self, model, x, y, epochs, lr, loss):
"""Trains PyTorch model
Args:
model (PyTorch model): model to be trained
x (torch.tensor): inputs
y (torch.tensor): targets
epochs (int): number of training epochs
lr (float): learning rate
loss (str): type of loss to use for training
Returns:
model with update weights
"""
assert isinstance(model, torch.nn.Module), "must be a PyTorch model"
criterion = getattr(torch.nn, loss)()
optimizer = torch.optim.SGD(model.parameters(), lr=lr)
for _ in range(epochs):
model.zero_grad()
output = model(x)
loss = criterion(output, y)
loss.backward()
optimizer.step()
return model
@time_me(n_loops=3)
def train_crypten(self, model, x, y, epochs, lr, loss):
"""Trains crypten encrypted model
Args:
model (CrypTen model): model to be trained
x (crypten.tensor): inputs
y (crypten.tensor): targets
epochs (int): number of training epochs
lr (float): learning rate
loss (str): type of loss to use for training
Returns:
model with update weights
"""
assert isinstance(model, crypten.nn.Module), "must be a CrypTen model"
criterion = getattr(crypten.nn, loss)()
for _ in range(epochs):
model.zero_grad()
output = model(x)
loss = criterion(output, y)
loss.backward()
model.update_parameters(lr)
return model
def time_training(self):
"""Returns training time per epoch for plain text and CrypTen"""
runtimes = []
runtimes_enc = []
for model in self.models:
x, y = model.data.x, model.data.y
x, y = x.to(device=self.device), y.to(device=self.device)
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
runtime, _ = self.train(model_plain, x, y, 1, model.lr, model.loss)
runtimes.append(runtime)
if model.advanced:
y = model.data.y_onehot.to(self.device)
x_enc = crypten.cryptensor(x)
y_enc = crypten.cryptensor(y)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_enc = model_crypten.encrypt()
runtime_enc, _ = self.train_crypten(
model_enc, x_enc, y_enc, 1, model.lr, model.loss
)
runtimes_enc.append(runtime_enc)
return runtimes, runtimes_enc
@time_me(n_loops=3)
def predict(self, model, x):
y = model(x)
return y
def time_inference(self):
"""Returns inference time for plain text and CrypTen"""
runtimes = []
runtimes_enc = []
for model in self.models:
x = model.data.x.to(self.device)
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
runtime, _ = self.predict(model_plain, x)
runtimes.append(runtime)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_enc = model_crypten.encrypt()
x_enc = crypten.cryptensor(x)
runtime_enc, _ = self.predict(model_enc, x_enc)
runtimes_enc.append(runtime_enc)
return runtimes, runtimes_enc
@staticmethod
def calc_accuracy(output, y, threshold=0.5):
"""Computes percent accuracy
Args:
output (torch.tensor): model output
y (torch.tensor): true label
threshold (float): classification threshold
Returns (float): percent accuracy
"""
output, y = output.cpu(), y.cpu()
predicted = (output > threshold).float()
correct = (predicted == y).sum().float()
accuracy = float((correct / y.shape[0]).cpu().numpy())
return accuracy
def evaluate(self):
"""Evaluates accuracy of crypten versus plain text models"""
accuracies, accuracies_crypten = [], []
for model in self.models:
model_plain = model.plain
if hasattr(model_plain, "to"):
model_plain = model_plain.to(self.device)
x, y = model.data.x, model.data.y
x, y = x.to(device=self.device), y.to(device=self.device)
_, model_plain = self.train(
model_plain, x, y, model.epochs, model.lr, model.loss
)
x_test = model.data.x_test.to(device=self.device)
y_test = model.data.y_test.to(device=self.device)
accuracy = ModelBenchmarks.calc_accuracy(model_plain(x_test), y_test)
accuracies.append(accuracy)
model_crypten = model.crypten
if hasattr(model_crypten, "to"):
model_crypten = model_crypten.to(self.device)
model_crypten = model_crypten.encrypt()
if model.advanced:
y = model.data.y_onehot.to(self.device)
x_enc = crypten.cryptensor(x)
y_enc = crypten.cryptensor(y)
_, model_crypten = self.train_crypten(
model_crypten, x_enc, y_enc, model.epochs, model.lr, model.loss
)
x_test_enc = crypten.cryptensor(x_test)
output = model_crypten(x_test_enc).get_plain_text()
accuracy = ModelBenchmarks.calc_accuracy(output, y_test)
accuracies_crypten.append(accuracy)
return accuracies, accuracies_crypten
def save(self, path):
if self.device.type == "cuda":
csv_path = os.path.join(path, "model_benchmarks_cuda.csv")
else:
csv_path = os.path.join(path, "model_benchmarks.csv")
self.df.to_csv(csv_path, index=False)
def get_results(self):
return self.df
def run(self):
"""Runs and stores benchmarks in self.df"""
training_runtimes, training_runtimes_enc = self.time_training()
inference_runtimes, inference_runtimes_enc = self.time_inference()
accuracies, accuracies_crypten = self.evaluate()
model_names = [model.name for model in self.models]
training_times_both = training_runtimes + training_runtimes_enc
inference_times_both = inference_runtimes + inference_runtimes_enc
half_n_rows = len(training_runtimes)
self.df = pd.DataFrame.from_dict(
{
"model": model_names + model_names,
"seconds per epoch": [t.mid for t in training_times_both],
"seconds per epoch q1": [t.q1 for t in training_times_both],
"seconds per epoch q3": [t.q3 for t in training_times_both],
"inference time": [t.mid for t in inference_times_both],
"inference time q1": [t.q1 for t in inference_times_both],
"inference time q3": [t.q3 for t in inference_times_both],
"is plain text": [True] * half_n_rows + [False] * half_n_rows,
"accuracy": accuracies + accuracies_crypten,
}
)
self.df = self.df.sort_values(by="model")
def get_args():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(description="Benchmark Functions")
parser.add_argument(
"--path",
"-p",
type=str,
required=False,
default=None,
help="path to save function benchmarks",
)
parser.add_argument(
"--only-functions",
"-f",
required=False,
default=False,
action="store_true",
help="run only function benchmarks",
)
parser.add_argument(
"--world-size",
"-w",
type=int,
required=False,
default=1,
help="world size for number of parties",
)
parser.add_argument(
"--device",
"-d",
required=False,
default="cpu",
help="the device to run the benchmarks",
)
parser.add_argument(
"--multi-gpu",
"-mg",
required=False,
default=False,
action="store_true",
help="use different gpu for each party. Will override --device if selected",
)
parser.add_argument(
"--ttp",
"-ttp",
required=False,
default=False,
action="store_true",
help="initialize a trusted third party (TTP) as beaver triples' provider, world_size should be greater than 2",
)
parser.add_argument(
"--advanced-models",
required=False,
default=False,
action="store_true",
help="run advanced model (resnet, transformer, etc.) benchmarks",
)
args = parser.parse_args()
return args
def multiprocess_caller(args):
"""Runs multiparty benchmarks and prints/saves from source 0"""
rank = comm.get().get_rank()
if args.multi_gpu:
assert (
args.world_size >= torch.cuda.device_count()
), f"Got {args.world_size} parties, but only {torch.cuda.device_count()} GPUs found"
device = torch.device(f"cuda:{rank}")
else:
device = torch.device(args.device)
benchmarks = [
FuncBenchmarks(device=device),
ModelBenchmarks(device=device, advanced_models=args.advanced_models),
]
if args.only_functions:
benchmarks = [FuncBenchmarks(device=device)]
for benchmark in benchmarks:
benchmark.run()
rank = comm.get().get_rank()
if rank == 0:
pd.set_option("display.precision", 3)
print(benchmark)
if args.path:
benchmark.save(args.path)
def main():
"""Runs benchmarks and saves if path is provided"""
crypten.init()
args = get_args()
device = torch.device(args.device)
if not hasattr(crypten.nn.Module, "to") or not hasattr(crypten.mpc.MPCTensor, "to"):
if device.type == "cuda":
print(
"GPU computation is not supported for this version of CrypTen, benchmark will be skipped"
)
return
benchmarks = [
FuncBenchmarks(device=device),
ModelBenchmarks(device=device, advanced_models=args.advanced_models),
]
if args.only_functions:
benchmarks = [FuncBenchmarks(device=device)]
if args.world_size > 1:
if args.ttp:
crypten.mpc.set_default_provider(crypten.mpc.provider.TrustedThirdParty)
launcher = multiprocess_launcher.MultiProcessLauncher(
args.world_size, multiprocess_caller, fn_args=args
)
launcher.start()
launcher.join()
launcher.terminate()
else:
pd.set_option("display.precision", 3)
for benchmark in benchmarks:
benchmark.run()
print(benchmark)
if args.path:
benchmark.save(args.path)
if __name__ == "__main__":
main()
|
CrypTen-main
|
benchmarks/benchmark.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains models used for benchmarking
"""
from dataclasses import dataclass
from typing import Any
import crypten
import torch
import torch.nn as nn
from torchvision import models
try:
from . import data
except ImportError:
# direct import if relative fails
import data
N_FEATURES = 20
@dataclass
class Model:
name: str
plain: torch.nn.Module
crypten: crypten.nn.Module
# must contains x, y, x_test, y_test attributes
data: Any
epochs: int
lr: float
loss: str
advanced: bool
class LogisticRegression(torch.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear = torch.nn.Linear(n_features, 1)
def forward(self, x):
return torch.sigmoid(self.linear(x))
class LogisticRegressionCrypTen(crypten.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear = crypten.nn.Linear(n_features, 1)
def forward(self, x):
return self.linear(x).sigmoid()
class FeedForward(torch.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear1 = torch.nn.Linear(n_features, n_features // 2)
self.linear2 = torch.nn.Linear(n_features // 2, n_features // 4)
self.linear3 = torch.nn.Linear(n_features // 4, 1)
def forward(self, x):
out = torch.relu(self.linear1(x))
out = torch.relu(self.linear2(out))
out = torch.sigmoid(self.linear3(out))
return out
class FeedForwardCrypTen(crypten.nn.Module):
def __init__(self, n_features=N_FEATURES):
super().__init__()
self.linear1 = crypten.nn.Linear(n_features, n_features // 2)
self.linear2 = crypten.nn.Linear(n_features // 2, n_features // 4)
self.linear3 = crypten.nn.Linear(n_features // 4, 1)
def forward(self, x):
out = (self.linear1(x)).relu()
out = (self.linear2(out)).relu()
out = (self.linear3(out)).sigmoid()
return out
class ResNet(nn.Module):
def __init__(self, n_layers=18):
super().__init__()
assert n_layers in [18, 34, 50]
self.model = getattr(models, "resnet{}".format(n_layers))(pretrained=True)
def forward(self, x):
return self.model(x)
class ResNetCrypTen(crypten.nn.Module):
def __init__(self, n_layers=18):
super().__init__()
assert n_layers in [18, 34, 50]
model = getattr(models, "resnet{}".format(n_layers))(pretrained=True)
dummy_input = torch.rand([1, 3, 224, 224])
self.model = crypten.nn.from_pytorch(model, dummy_input)
def forward(self, x):
return self.model(x)
MODELS = [
Model(
name="logistic regression",
plain=LogisticRegression(),
crypten=LogisticRegressionCrypTen(),
data=data.GaussianClusters(),
epochs=50,
lr=0.1,
loss="BCELoss",
advanced=False,
),
Model(
name="feedforward neural network",
plain=FeedForward(),
crypten=FeedForwardCrypTen(),
data=data.GaussianClusters(),
epochs=50,
lr=0.1,
loss="BCELoss",
advanced=False,
),
Model(
name="resnet18",
plain=ResNet(n_layers=18),
crypten=ResNetCrypTen(n_layers=18),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
Model(
name="resnet34",
plain=ResNet(n_layers=34),
crypten=ResNetCrypTen(n_layers=34),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
Model(
name="resnet50",
plain=ResNet(n_layers=50),
crypten=ResNetCrypTen(n_layers=50),
data=data.Images(),
epochs=2,
lr=0.1,
loss="CrossEntropyLoss",
advanced=True,
),
]
|
CrypTen-main
|
benchmarks/models.py
|
CrypTen-main
|
benchmarks/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
A script to run historical benchmarks.
- writes monthly data to 'dash_app/data/`
- example: 'dash_app/data/2019-10-26/func_benchmarks.csv'
- example: 'dash_app/data/2019-10-26/model_benchmarks.csv'
- overwrite option
- script requires ability to 'git clone'
To run:
python run_historical_benchmarks.py
# overwrite existing data directories
python run_historical_benchmarks.py --overwrite True
"""
import argparse
import datetime
import os
import shutil
import subprocess
from dateutil.relativedelta import relativedelta
def parse_args():
"""Parses command line arguments"""
parser = argparse.ArgumentParser(description="Run Historical Benchmarks")
parser.add_argument(
"--overwrite",
required=False,
default=False,
action="store_true",
help="overwrite existing data directories",
)
parser.add_argument(
"--cuda-toolkit-version",
required=False,
default="10.1",
help="build pytorch with the corresponding version of cuda-toolkit",
)
args = parser.parse_args()
return args
def get_dates(day=26):
"""Generate dates to run benchmarks
Returns: list of strings in year-month-day format.
Example: ["2020-01-26", "2019-12-26"]
"""
dates = []
today = datetime.date.today()
end = datetime.date(2019, 10, day)
one_month = relativedelta(months=+1)
if today.day >= 26:
start = datetime.date(today.year, today.month, day)
else:
start = datetime.date(today.year, today.month, day) - one_month
while start >= end:
dates.append(start.strftime("%Y-%m-%d"))
start -= one_month
return dates
args = parse_args()
overwrite = args.overwrite
cuda_version = "".join(args.cuda_toolkit_version.split("."))
dates = get_dates()
PATH = os.getcwd()
# clone
subprocess.call(
"cd /tmp && git clone https://github.com/facebookresearch/CrypTen.git", shell=True
)
# create venv
subprocess.call("cd /tmp && python3 -m venv .venv", shell=True)
venv = "cd /tmp && . .venv/bin/activate && "
# install PyTorch
subprocess.call(
f"{venv} pip3 install onnx==1.6.0 tensorboard pandas sklearn", shell=True
)
stable_url = "https://download.pytorch.org/whl/torch_stable.html"
pip_torch = f"pip install torch==1.5.1+cu{cuda_version} torchvision==0.6.1+cu{cuda_version} -f https://download.pytorch.org/whl/torch_stable.html"
subprocess.call(f"{venv} {pip_torch} -f {stable_url}", shell=True)
modes = {"1pc": "", "2pc": "--world-size=2"}
for date in dates:
path_exists = os.path.exists(f"dash_app/data/{date}/func_benchmarks.csv")
if not overwrite and path_exists:
continue
# checkout closest version before date
subprocess.call(
f"cd /tmp/CrypTen && "
+ f"git checkout `git rev-list -n 1 --before='{date} 01:01' master`",
shell=True,
)
for mode, arg in modes.items():
subprocess.call(venv + "pip3 install CrypTen/.", shell=True)
subprocess.call(f"echo Generating {date} Benchmarks for {mode}", shell=True)
path = os.path.join(PATH, f"dash_app/data/{date}", mode)
subprocess.call(f"mkdir -p {path}", shell=True)
subprocess.call(
venv + f"cd {PATH} && python3 benchmark.py -p '{path}' {arg}", shell=True
)
subprocess.call(
venv + f"cd {PATH} && python3 benchmark.py -p '{path}' -d 'cuda' {arg}",
shell=True,
)
# clean up
shutil.rmtree("/tmp/.venv", ignore_errors=True)
shutil.rmtree("/tmp/CrypTen", ignore_errors=True)
|
CrypTen-main
|
benchmarks/run_historical_benchmarks.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Profiler with snakeviz for probing inference / training call stack
Run via Jupyter
"""
from benchmark import ModelBenchmarks
# get_ipython().run_line_magic("load_ext", "snakeviz")
model_benchmarks = ModelBenchmarks()
# for logistic regression select 0
model = model_benchmarks.MODELS[1]
print(model.name)
model_crypten = model.crypten(model_benchmarks.n_features).encrypt()
# profile training
# get_ipython().run_cell_magic(
# "snakeviz", "", "\nmodel_benchmarks.train_crypten(model_crypten)"
# )
# profile inference
x_enc = model_benchmarks.x_enc
model_crypten.train = False
# get_ipython().run_cell_magic("snakeviz", "", "\n\nmodel_crypten(x_enc)")
|
CrypTen-main
|
benchmarks/profiler.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Contains data used for training / testing model benchmarks
"""
import os
from pathlib import Path
import crypten
import PIL
import torch
import torch.nn.functional as F
from sklearn.datasets import make_classification
from sklearn.model_selection import train_test_split
from torchvision import transforms
class GaussianClusters:
"""Generates Glussian clusters for binary classes"""
def __init__(self, n_samples=5000, n_features=20):
self.n_samples = n_samples
self.n_features = n_features
x, x_test, y, y_test = GaussianClusters.generate_data(n_samples, n_features)
self.x, self.y = x, y
self.x_test, self.y_test = x_test, y_test
@staticmethod
def generate_data(n_samples, n_features):
"""Generates Glussian clusters for binary classes
Args:
n_samples (int): number of samples
n_features (int): number of features
Returns: torch tensors with inputs and labels
"""
x, y = make_classification(
n_samples=n_samples,
n_features=n_features,
# by default, 2 features are redundant
n_informative=n_features - 2,
n_classes=2,
)
x = torch.tensor(x).float()
y = torch.tensor(y).float().unsqueeze(-1)
return train_test_split(x, y)
class Images:
def __init__(self):
self.x = self.preprocess_image()
# image net 1k classes
class_id = 463
self.y = torch.tensor([class_id]).long()
self.y_onehot = F.one_hot(self.y, 1000)
self.x_test, self.y_test = self.x, self.y
def preprocess_image(self):
"""Preprocesses sample image"""
path = os.path.dirname(os.path.realpath(__file__))
filename = "dog.jpg"
input_image = PIL.Image.open(Path(os.path.join(path, filename)))
preprocess = transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]
),
]
)
input_tensor = preprocess(input_image)
input_batch = input_tensor.unsqueeze(0)
return input_batch
|
CrypTen-main
|
benchmarks/data.py
|
CrypTen-main
|
benchmarks/dash_app/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pathlib
import dash
import dash_core_components as dcc
import dash_html_components as html
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.graph_objects as go
from dash.dependencies import Input, Output
from load_data import get_aggregated_data, get_available_dates
from plotly.subplots import make_subplots
external_stylesheets = ["https://codepen.io/chriddyp/pen/bWLwgP.css"]
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server
# load data using relative data folder
PATH = pathlib.Path(__file__).parent
DATA_PATH = PATH.joinpath("data").resolve()
available_dates = get_available_dates(DATA_PATH)
subdirs = ["1pc", "2pc"]
func_df, model_df = get_aggregated_data(DATA_PATH, subdirs)
colors_discrete = px.colors.qualitative.Set2
template = "simple_white"
# Since we're adding callbacks to elements that don't exist in the app.layout,
# Dash will raise an exception to warn us that we might be
# doing something wrong.
# In this case, we're adding the elements through a callback, so we can ignore
# the exception.
app.config.suppress_callback_exceptions = True
app.layout = html.Div(
[dcc.Location(id="url", refresh=False), html.Div(id="page-content")]
)
index_page = html.Div(
children=[
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("crypten-icon.png"),
id="plotly-image",
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div(
[
html.Div(
[
html.H2("CrypTen", style={"margin-bottom": "0px"}),
html.H4("Benchmarks", style={"margin-top": "0px"}),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.A(
html.Button("Compare Dates", id="learn-more-button"),
href="/compare",
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "25px"},
),
dcc.Tabs(
[
dcc.Tab(label="1 party", value="1pc"),
dcc.Tab(label="2 party", value="2pc"),
],
id="benchmark-tabs",
value="1pc",
),
html.Div(
[
dcc.Dropdown(
id="select_date",
options=[
{"label": date, "value": date}
for date in sorted(available_dates)
],
value=sorted(available_dates)[-1],
),
html.Div(
[
html.H3("Functions"),
dcc.Markdown(
"""To reproduce or view assumptions see
[benchmarks](
https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/benchmark.py#L68)
"""
),
html.H5("Runtimes"),
dcc.Markdown(
"""
* function runtimes are averaged over 10 runs using a random tensor of size (100, 100).
* `max` and `argmax` are excluded as they take considerably longer.
* As of 02/25/2020, `max` / `argmax` take 3min 13s ± 4.73s
""",
className="bullets",
),
]
),
html.Div(
[
html.Div(
[dcc.Graph(id="func-runtime-crypten")],
className="six columns",
),
html.Div(
[dcc.Graph(id="func-runtime-crypten-v-plain")],
className="six columns",
),
],
className="row",
),
html.H5("Errors"),
dcc.Markdown(
"""
* function errors are over the domain (0, 100] with step size 0.01
* exp, sin, and cos are over the domain (0, 10) with step size 0.001
""",
className="bullets",
),
html.Div(
[
html.Div(
[dcc.Graph(id="func-abs-error")], className="six columns"
),
html.Div(
[dcc.Graph(id="func-relative-error")],
className="six columns",
),
],
className="row",
),
html.Div(
[
html.H3("Models"),
dcc.Markdown(
"""
For model details or to reproduce see
[models](https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/models.py)
and
[training details](
https://github.com/facebookresearch/CrypTen/blob/master/benchmarks/benchmark.py#L293).
* trained on Gaussian clusters for binary classification
* uses SGD with 5k samples, 20 features, over 20 epochs, and 0.1 learning rate
* feedforward has three hidden layers with intermediary RELU and
final sigmoid activations
* note benchmarks run with world size 1 using CPython
""",
className="bullets",
),
dcc.Dropdown(
id="select_comparison",
options=[
{"label": comp, "value": comp}
for comp in [
"CPU vs GPU",
"CPU vs Plaintext",
"GPU vs Plaintext",
]
],
value="CPU vs GPU",
),
html.Div(
[
html.Div(
[dcc.Graph(id="model-training-time")],
className="six columns",
),
html.Div(
[dcc.Graph(id="model-inference-time")],
className="six columns",
),
html.Div(
[dcc.Graph(id="model-accuracy")],
className="six columns",
),
],
className="row",
),
]
),
]
),
],
id="mainContainer",
style={"display": "flex", "flex-direction": "column"},
)
comparison_layout = html.Div(
[
html.Div(id="compare"),
html.Div(
[
html.Div(
[
html.Img(
src=app.get_asset_url("crypten-icon.png"),
id="plotly-image",
style={
"height": "60px",
"width": "auto",
"margin-bottom": "25px",
},
)
],
className="one-third column",
),
html.Div(
[
html.Div(
[
html.H2("CrypTen", style={"margin-bottom": "0px"}),
html.H4("Benchmarks", style={"margin-top": "0px"}),
]
)
],
className="one-half column",
id="title",
),
html.Div(
[
html.A(
html.Button("Benchmarks", id="learn-more-button"), href="/"
)
],
className="one-third column",
id="button",
),
],
id="header",
className="row flex-display",
style={"margin-bottom": "25px"},
),
html.Div(
[
html.H6("Previous Date"),
dcc.Dropdown(
id="start_date",
options=[
{"label": date, "value": date} for date in available_dates
],
value=sorted(available_dates)[0],
),
html.H6("Current Date"),
dcc.Dropdown(
id="end_date",
options=[
{"label": date, "value": date} for date in available_dates
],
value=sorted(available_dates)[-1],
),
html.Div(
[
html.H3("Functions"),
dcc.Dropdown(
options=[
{"label": func, "value": func}
for func in func_df["function"].unique()
],
multi=True,
value="sigmoid",
id="funcs",
),
dcc.Markdown(
"""
* function runtimes are averaged over 10 runs using a random tensor of size (100, 100).
* `max` and `argmax` are excluded as they take considerably longer.
* As of 02/25/2020, `max` / `argmax` take 3min 13s ± 4.73s
""",
className="bullets",
),
]
),
html.Div(
[
html.Div(
[dcc.Graph(id="runtime-diff")], className="six columns"
),
html.Div([dcc.Graph(id="error-diff")], className="six columns"),
],
className="row",
),
html.Div(
[
html.Br(),
html.Br(),
html.Br(),
html.H4("Historical"),
html.Div(
[dcc.Graph(id="runtime-timeseries")],
className="six columns",
),
html.Div(
[dcc.Graph(id="error-timeseries")], className="six columns"
),
],
className="row",
),
]
),
]
)
@app.callback(
Output("func-runtime-crypten", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_runtime_crypten(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df["runtime in seconds"] = filter_df["runtime crypten"]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="runtime in seconds",
y="function",
color="device",
orientation="h",
error_x="runtime crypten error plus",
error_x_minus="runtime crypten error minus",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten",
barmode="group",
)
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-runtime-crypten-v-plain", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_runtime_crypten_v_plain(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="runtime gap",
y="function",
color="device",
orientation="h",
error_x="runtime gap error plus",
error_x_minus="runtime gap error minus",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten vs. Plaintext",
barmode="group",
)
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-abs-error", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_abs_error(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="total abs error",
text="total abs error",
color="device",
log_x=True,
y="function",
orientation="h",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten Absolute Error",
barmode="group",
)
fig.update_traces(texttemplate="%{text:.1f}", textposition="outside")
fig.update_layout(height=500)
return fig
@app.callback(
Output("func-relative-error", "figure"),
[Input("select_date", "value"), Input("benchmark-tabs", "value")],
)
def update_abs_error(selected_date, mode):
try:
filter_df = func_df[func_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="average relative error",
text="average relative error",
y="function",
color="device",
orientation="h",
color_discrete_sequence=colors_discrete,
template=template,
title="Crypten Relative Error",
barmode="group",
)
fig.update_traces(texttemplate="%{text:%}", textposition="outside")
fig.update_layout(height=500)
return fig
def process_comparison_options(filter_df, option):
color = "type"
if option == "CPU vs Plaintext":
filter_df = filter_df[filter_df["device"] == "cpu"]
filter_df["type"] = np.where(
filter_df["is plain text"], "Plain Text", "CrypTen"
)
elif option == "GPU vs Plaintext":
filter_df = filter_df[filter_df["device"] == "gpu"]
if not filter_df.empty:
filter_df["type"] = np.where(
filter_df["is plain text"], "Plain Text", "CrypTen"
)
elif option == "CPU vs GPU":
filter_df = filter_df[filter_df["is plain text"] is False]
color = "device"
return filter_df, color
def render_emtpy_figure():
return {
"layout": {
"xaxis": {"visible": False},
"yaxis": {"visible": False},
"annotations": [
{
"text": "No matching data found",
"xref": "paper",
"yref": "paper",
"showarrow": False,
"font": {"size": 28},
}
],
}
}
@app.callback(
Output("model-training-time", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_training_time(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="seconds per epoch",
text="seconds per epoch",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Training Time",
)
fig.update_layout(xaxis={"range": [0, filter_df["seconds per epoch"].max() * 1.1]})
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
return fig
@app.callback(
Output("model-inference-time", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_training_time(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="inference time",
text="inference time",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Inference Time",
)
fig.update_layout(
xaxis={"range": [0, filter_df["inference time"].max() * 1.1]},
xaxis_title="inference time in seconds",
)
fig.update_traces(texttemplate="%{text:.2f}", textposition="outside")
return fig
@app.callback(
Output("model-accuracy", "figure"),
[
Input("select_date", "value"),
Input("benchmark-tabs", "value"),
Input("select_comparison", "value"),
],
)
def update_model_accuracy(selected_date, mode, comp_opt):
try:
filter_df = model_df[model_df["mode"] == mode]
filter_df = filter_df[filter_df["date"] == selected_date]
filter_df, color = process_comparison_options(filter_df, comp_opt)
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
return render_emtpy_figure()
fig = px.bar(
filter_df,
x="accuracy",
text="accuracy",
y="model",
color=color,
orientation="h",
barmode="group",
color_discrete_sequence=colors_discrete,
template=template,
title="Model Accuracy",
)
fig.update_layout(xaxis={"range": [0, 1.0]})
fig.update_traces(texttemplate="%{text:%}", textposition="outside")
return fig
@app.callback(
Output("runtime-diff", "figure"),
[Input("start_date", "value"), Input("end_date", "value"), Input("funcs", "value")],
)
def update_runtime_diff(start_date, end_date, funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filter_df = func_df[func_df["mode"] == "1pc"]
func_df_cpu = filter_df[filter_df["device"] == "cpu"]
start_df = func_df_cpu[func_df_cpu["date"] == start_date]
end_df = func_df_cpu[func_df_cpu["date"] == end_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = make_subplots(
rows=len(funcs), cols=1, specs=[[{"type": "domain"}] for _ in range(len(funcs))]
)
for i, func in enumerate(funcs):
runtime = end_df[end_df["function"] == func]["runtime crypten"]
runtime_prev = start_df[start_df["function"] == func]["runtime crypten"]
func_text = func.capitalize()
fig.add_trace(
go.Indicator(
mode="number+delta",
value=float(runtime),
title={
"text": f"{func_text}<br><span style='font-size:0.8em;color:gray'>"
+ "runtime in seconds</span><br>"
},
delta={
"reference": float(runtime_prev),
"relative": True,
"increasing": {"color": "#ff4236"},
"decreasing": {"color": "#008000"},
},
),
row=i + 1,
col=1,
)
fig.update_layout(height=300 * len(funcs))
return fig
@app.callback(
Output("error-diff", "figure"),
[Input("start_date", "value"), Input("end_date", "value"), Input("funcs", "value")],
)
def update_error_diff(start_date, end_date, funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filter_df = func_df[func_df["mode"] == "1pc"]
func_df_cpu = filter_df[filter_df["device"] == "cpu"]
start_df = func_df_cpu[func_df_cpu["date"] == start_date]
end_df = func_df_cpu[func_df_cpu["date"] == end_date]
except KeyError:
filter_df = pd.DataFrame()
if filter_df.empty:
return render_emtpy_figure()
fig = make_subplots(
rows=len(funcs), cols=1, specs=[[{"type": "domain"}] for _ in range(len(funcs))]
)
for i, func in enumerate(funcs):
error = end_df[end_df["function"] == func]["total abs error"]
error_prev = start_df[start_df["function"] == func]["total abs error"]
func_text = func.capitalize()
fig.add_trace(
go.Indicator(
mode="number+delta",
value=float(error),
title={
"text": f"{func_text}<br><span style='font-size:0.8em;color:gray'>"
+ "total abs error</span><br>"
},
delta={
"reference": float(error_prev),
"relative": True,
"increasing": {"color": "#ff4236"},
"decreasing": {"color": "#008000"},
},
),
row=i + 1,
col=1,
)
fig.update_layout(height=300 * len(funcs))
return fig
@app.callback(Output("runtime-timeseries", "figure"), [Input("funcs", "value")])
def update_runtime_timeseries(funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filtered_df = func_df[func_df["function"].isin(funcs)]
filtered_df.sort_values("date", inplace=True)
except KeyError:
return render_emtpy_figure()
fig = px.line(
filtered_df, x="date", y="runtime crypten", template=template, color="function"
)
return fig
@app.callback(Output("error-timeseries", "figure"), [Input("funcs", "value")])
def update_error_timeseries(funcs):
if type(funcs) is str:
funcs = [funcs]
try:
filtered_df = func_df[func_df["function"].isin(funcs)]
filtered_df.sort_values("date", inplace=True)
except KeyError:
return render_emtpy_figure()
fig = px.line(
filtered_df, x="date", y="total abs error", template=template, color="function"
)
return fig
@app.callback(
dash.dependencies.Output("page-content", "children"),
[dash.dependencies.Input("url", "pathname")],
)
def display_page(pathname):
"""Routes to page based on URL"""
if pathname == "/compare":
return comparison_layout
else:
return index_page
if __name__ == "__main__":
app.run_server(debug=True)
|
CrypTen-main
|
benchmarks/dash_app/app.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import pandas as pd
def get_aggregated_data(base_dir, subdirs):
"""Aggregate dataframe for model and func benchmarks assumining directory is structured as
DATA_PATH
|_2020-02-20
|_subdir1
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
|_subdir2
...
Args:
base_dir (pathlib.path): path containing month subdirectories
subdirs (list): a list of all subdirectories to aggreagate dataframes from
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
available_dates = get_available_dates(base_dir)
func_df, model_df = pd.DataFrame(), pd.DataFrame()
for subdir in subdirs:
func_df_cpu, model_df_cpu = read_subdir(base_dir, available_dates, subdir)
func_df_gpu, model_df_gpu = read_subdir(
base_dir, available_dates, subdir, cuda=True
)
tmp_func_df = pd.concat([func_df_cpu, func_df_gpu])
tmp_model_df = pd.concat([model_df_cpu, model_df_gpu])
tmp_func_df["mode"] = subdir
tmp_model_df["mode"] = subdir
func_df = func_df.append(tmp_func_df)
model_df = model_df.append(tmp_model_df)
return func_df, model_df
def load_df(path, cuda=False):
"""Load dataframe for model and func benchmarks assumining directory is structured as
path
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
Args:
path (str): path containing model and func benchmarks
cuda (bool) : if set to true, read the corresponding func and model benchmarks for cuda
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
postfix = "_cuda" if cuda else ""
func_path = os.path.join(path, f"func_benchmarks{postfix}.csv")
model_path = os.path.join(path, f"model_benchmarks{postfix}.csv")
func_df, model_df = pd.DataFrame(), pd.DataFrame()
if os.path.exists(func_path):
func_df = pd.read_csv(func_path)
if os.path.exists(model_path):
model_df = pd.read_csv(model_path)
return func_df, model_df
def read_subdir(base_dir, dates, subdir="", cuda=False):
"""Builds dataframe for model and func benchmarks assuming directory is structured as
DATA_PATH
|_2020-02-20
|_subdir
|_func_benchmarks.csv
|_model_benchmarks.csv
|_func_benchmarks_cuda.csv (optional)
|_model_benchmarks_cuda.csv (optional)
Args:
base_dir (pathlib.path): path containing month subdirectories
dates (list of str): containing dates / subdirectories available
subdir (str) : string indicating the name of the sub directory to read enchmarks from
cuda (bool) : if set to true, read the corresponding func and model benchmarks for cuda
Returns: tuple of pd.DataFrames containing func and model benchmarks with dates
"""
func_df, model_df = pd.DataFrame(), pd.DataFrame()
device = "gpu" if cuda else "cpu"
for date in dates:
path = os.path.join(base_dir, date, subdir)
tmp_func_df, tmp_model_df = load_df(path, cuda=cuda)
set_metadata(tmp_func_df, date, device)
set_metadata(tmp_model_df, date, device)
func_df = func_df.append(tmp_func_df)
model_df = model_df.append(tmp_model_df)
if not func_df.empty:
func_df = compute_runtime_gap(func_df)
func_df = add_error_bars(func_df)
return func_df, model_df
def get_available_dates(data_dir):
"""Returns list of available dates in DATA_PATH directory"""
available_dates = []
for sub_dir in os.listdir(data_dir):
if os.path.isdir(os.path.join(data_dir, sub_dir)):
available_dates.append(sub_dir)
return available_dates
def set_metadata(df, date, device):
"""Set the device and date attribute for the dataframe"""
df["date"] = date
df["device"] = device
def compute_runtime_gap(func_df):
"""Computes runtime gap between CrypTen and Plain Text"""
func_df["runtime gap"] = func_df["runtime crypten"] / func_df["runtime"]
func_df["runtime gap Q1"] = func_df["runtime crypten Q1"] / func_df["runtime"]
func_df["runtime gap Q3"] = func_df["runtime crypten Q3"] / func_df["runtime"]
return func_df
def add_error_bars(func_df):
"""Adds error bars for plotting based on Q1 and Q3"""
columns = ["runtime crypten", "runtime gap"]
for col in columns:
func_df = calc_error_bar(func_df, col)
return func_df
def calc_error_bar(df, column_name):
"""Adds error plus and minus for plotting"""
error_plus = df[column_name + " Q3"] - df[column_name]
error_minus = df[column_name] - df[column_name + " Q1"]
df[column_name + " error plus"] = error_plus
df[column_name + " error minus"] = error_minus
return df
|
CrypTen-main
|
benchmarks/dash_app/load_data.py
|
CrypTen-main
|
configs/__init__.py
|
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
import uuid
from argparse import ArgumentParser, REMAINDER
"""
Wrapper to launch MPC scripts as multiple processes.
"""
def main():
args = parse_args()
# set PyTorch distributed related environmental variables
current_env = os.environ.copy()
current_env["WORLD_SIZE"] = str(args.world_size)
processes = []
# Use random file so multiple jobs can be run simultaneously
INIT_METHOD = "file:///tmp/crypten-rendezvous-{}".format(uuid.uuid1())
for rank in range(0, args.world_size):
# each process's rank
current_env["RANK"] = str(rank)
current_env["RENDEZVOUS"] = INIT_METHOD
# spawn the processes
cmd = [args.training_script] + args.training_script_args
process = subprocess.Popen(cmd, env=current_env)
processes.append(process)
for process in processes:
process.wait()
if process.returncode != 0:
raise subprocess.CalledProcessError(
returncode=process.returncode, cmd=process.args
)
def parse_args():
"""
Helper function parsing the command line options
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"parties for MPC scripts"
)
# Optional arguments for the launch helper
parser.add_argument(
"--world_size",
type=int,
default=1,
help="The number of parties to launch." "Each party acts as its own process",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single GPU training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
|
CrypTen-main
|
scripts/distributed_launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This file is a tool to run MPC distributed training over AWS.
To run distributed training, first multiple AWS instances needs to be created
with a public AMI "Deep Learning AMI (Ubuntu) Version 24.0":
$ aws ec2 run-instances \
--image-id ami-0ddba16a97b1dcda5 \
--count 2 \
--instance-type t2.micro \
--key-name fair-$USER \
--tag-specifications "ResourceType=instance,Tags=[{Key=fair-user,Value=$USER}]"
Two EC2 instances will be created by the command line shown above. Assume
the ids of the two instances created are i-068681e808235a851 and
i-0d7ebacfe1e3f28eb. Next, pytorch and crypten must be properly installed
on every instance.
Then the following command lines can run the mpc_linear_svm example on the two
EC2 instances created above:
$ python3 crypten/scripts/aws_launcher.py \
--SSH_keys=/home/$USER/.aws/fair-$USER.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=crypten/examples/mpc_linear_svm/mpc_linear_svm.py \
crypten/examples/mpc_linear_svm/launcher.py \
--features 50 \
--examples 100 \
--epochs 50 \
--lr 0.5 \
--skip_plaintext
If you want to train with AWS instances located at multiple regions, then you would need
to provide ssh_key_file for each instance:
$ python3 crypten/scripts/aws_launcher.py \
--regions=us-east-1,us-west-1 \
--SSH_keys=/home/$USER/.aws/east.pem,/home/$USER/.aws/west.pem \
--instances=i-038dd14b9383b9d79,i-08f057b9c03d4a916 \
--aux_files=crypten/examples/mpc_linear_svm/mpc_linear_svm.py \
crypten/examples/mpc_linear_svm/launcher.py \
--features 50 \
--examples 100 \
--epochs 50 \
--lr 0.5 \
--skip_plaintext
"""
import concurrent.futures
import configparser
import os
import sys
import time
import uuid
import warnings
from argparse import ArgumentParser, REMAINDER
from pathlib import Path
import boto3
import paramiko
def get_instances(ec2, instance_ids):
instances = list(
ec2.instances.filter(Filters=[{"Name": "instance-id", "Values": instance_ids}])
)
return instances
def connect_to_instance(instance, keypath, username, http_proxy=None):
print(f"Connecting to {instance.id}...")
ip_address = instance.public_ip_address
if http_proxy:
# paramiko.ProxyCommand does not do string substitution for %h %p,
# so 'nc --proxy-type http --proxy fwdproxy:8080 %h %p' would not work!
proxy = paramiko.ProxyCommand(
f"nc --proxy-type http --proxy {http_proxy} {ip_address} {22}"
)
proxy.settimeout(300)
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
retries = 20
while retries > 0:
try:
client.connect(
ip_address,
username=username,
key_filename=keypath,
timeout=10,
sock=proxy if http_proxy else None,
)
print(f"Connected to {instance.id}")
break
except Exception as e:
print(f"Exception: {e} Retrying...")
retries -= 1
time.sleep(10)
return client
def add_prefix_each_line(prefix, str):
lines = [f"{prefix}{line}" for line in str.split("\n")]
return "\n".join(lines)
def run_command(instance, client, cmd, environment=None, inputs=None):
stdin, stdout, stderr = client.exec_command(
cmd, get_pty=True, environment=environment
)
if inputs:
for inp in inputs:
stdin.write(inp)
def read_lines(fin, fout, line_head):
line = ""
while not fin.channel.exit_status_ready():
line += fin.read(1).decode("utf8")
if line.endswith("\n"):
print(f"{line_head}{line[:-1]}", file=fout)
line = ""
if line:
# print what remains in line buffer, in case fout does not
# end with '\n'
print(f"{line_head}{line[:-1]}", file=fout)
with concurrent.futures.ThreadPoolExecutor(max_workers=2) as printer:
printer.submit(read_lines, stdout, sys.stdout, f"[{instance} STDOUT] ")
printer.submit(read_lines, stderr, sys.stderr, f"[{instance} STDERR] ")
def upload_file(instance_id, client, localpath, remotepath):
ftp_client = client.open_sftp()
print(f"Uploading `{localpath}` to {instance_id}...")
ftp_client.put(localpath, remotepath)
ftp_client.close()
print(f"`{localpath}` uploaded to {instance_id}.")
def main():
args = parse_args()
cf = configparser.ConfigParser()
cf.read(args.credentials)
warnings.filterwarnings(
"ignore", category=ResourceWarning, message="unclosed.*<ssl.SSLSocket.*>"
)
regions = args.regions.split(",")
instance_ids = args.instances.split(",")
ssh_key_files = args.ssh_key_file.split(",")
instances = []
if len(regions) > 1:
print("Multiple regions detected")
assert len(instance_ids) == len(
ssh_key_files
), "{} instance ids are provided, but {} SSH keys found.".format(
len(instance_ids), len(ssh_key_files)
)
assert len(instance_ids) == len(
regions
), "{} instance ids are provided, but {} regions found.".format(
len(instance_ids), len(regions)
)
for i, region in enumerate(regions):
session = boto3.session.Session(
aws_access_key_id=cf["default"]["aws_access_key_id"],
aws_secret_access_key=cf["default"]["aws_secret_access_key"],
region_name=region,
)
ec2 = session.resource("ec2")
instance = get_instances(ec2, [instance_ids[i]])
instances += instance
else:
session = boto3.session.Session(
aws_access_key_id=cf["default"]["aws_access_key_id"],
aws_secret_access_key=cf["default"]["aws_secret_access_key"],
region_name=regions[0],
)
ec2 = session.resource("ec2")
instances = get_instances(ec2, instance_ids)
assert (
len(ssh_key_files) == 1
), "1 region is detected, but {} SSH keys found.".format(len(ssh_key_files))
ssh_key_files = [ssh_key_files[0] for _ in range(len(instances))]
assert len(instance_ids) == len(
instances
), "{} instance ids are provided, but {} found.".format(
len(instance_ids), len(instances)
)
# Only print the public IP addresses of the instances.
# Then do nothing else and return.
if args.only_show_instance_ips:
for instance in instances:
print(instance.public_ip_address)
return
world_size = len(instances)
print(f"Running world size {world_size} with instances: {instances}")
master_instance = instances[0]
# Key: instance id; value: paramiko.SSHClient object.
client_dict = {}
for i, instance in enumerate(instances):
client = connect_to_instance(
instance, ssh_key_files[i], args.ssh_user, args.http_proxy
)
client_dict[instance.id] = client
assert os.path.exists(
args.training_script
), f"File `{args.training_script}` does not exist"
file_paths = args.aux_files.split(",") if args.aux_files else []
for local_path in file_paths:
assert os.path.exists(local_path), f"File `{local_path}` does not exist"
remote_dir = f"aws-launcher-tmp-{uuid.uuid1()}"
script_basename = os.path.basename(args.training_script)
remote_script = os.path.join(remote_dir, script_basename)
# Upload files to all instances concurrently.
with concurrent.futures.ThreadPoolExecutor(max_workers=8) as uploaders:
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"mkdir -p {remote_dir}")
uploaders.submit(
upload_file, instance_id, client, args.training_script, remote_script
)
for local_path in file_paths:
uploaders.submit(
upload_file,
instance_id,
client,
local_path,
os.path.join(remote_dir, os.path.basename(local_path)),
)
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"chmod +x {remote_script}")
run_command(instance_id, client, f"ls -al {remote_dir}")
environment = {
"WORLD_SIZE": str(world_size),
"RENDEZVOUS": "env://",
"MASTER_ADDR": master_instance.private_ip_address,
"MASTER_PORT": str(args.master_port),
}
with concurrent.futures.ThreadPoolExecutor(max_workers=world_size) as executor:
rank = 0
for instance_id, client in client_dict.items():
environment["RANK"] = str(rank)
# TODO: Although paramiko.SSHClient.exec_command() can accept
# an argument `environment`, it seems not to take effect in
# practice. It might because "Servers may silently reject
# some environment variables" according to paramiko document.
# As a workaround, here all environment variables are explicitly
# exported.
environment_cmd = "; ".join(
[f"export {key}={value}" for (key, value) in environment.items()]
)
prepare_cmd = f"{args.prepare_cmd}; " if args.prepare_cmd else ""
cmd = "{}; {} {} {} {}".format(
environment_cmd,
f"cd {remote_dir} ;",
prepare_cmd,
f"./{script_basename}",
" ".join(args.training_script_args),
)
print(f"Run command: {cmd}")
executor.submit(run_command, instance_id, client, cmd, environment)
rank += 1
# Cleanup temp dir.
for instance_id, client in client_dict.items():
run_command(instance_id, client, f"rm -rf {remote_dir}")
client.close()
def parse_args():
"""
Helper function parsing the command line options
"""
parser = ArgumentParser(
description="PyTorch distributed training launch "
"helper utilty that will spawn up "
"parties for MPC scripts on AWS"
)
parser.add_argument(
"--credentials",
type=str,
default=f"{Path.home()}/.aws/credentials",
help="Credentials used to access AWS",
)
parser.add_argument(
"--only_show_instance_ips",
action="store_true",
default=False,
help="Only show public IPs of the given instances."
"No other actions will be done",
)
parser.add_argument("--regions", type=str, default="us-west-2", help="AWS Region")
parser.add_argument(
"--instances",
type=str,
required=True,
help="The comma-separated ids of AWS instances",
)
parser.add_argument(
"--master_port",
type=int,
default=29500,
help="The port used by master instance " "for distributed training",
)
parser.add_argument(
"--ssh_key_file",
type=str,
required=True,
help="Path to the RSA private key file " "used for instance authentication",
)
parser.add_argument(
"--ssh_user",
type=str,
default="ubuntu",
help="The username to ssh to AWS instance",
)
parser.add_argument(
"--http_proxy",
type=str,
default=None,
help="If not none, use the http proxy specified "
"(e.g., fwdproxy:8080) to ssh to AWS instance",
)
parser.add_argument(
"--aux_files",
type=str,
default=None,
help="The comma-separated paths of additional files "
" that need to be transferred to AWS instances. "
"If more than one file needs to be transferred, "
"the basename of any two files can not be the "
"same.",
)
parser.add_argument(
"--prepare_cmd",
type=str,
default="",
help="The command to run before running distribute "
"training for prepare purpose, e.g., setup "
"environment, extract data files, etc.",
)
# positional
parser.add_argument(
"training_script",
type=str,
help="The full path to the single machine training "
"program/script to be launched in parallel, "
"followed by all the arguments for the "
"training script",
)
# rest from the training program
parser.add_argument("training_script_args", nargs=REMAINDER)
return parser.parse_args()
if __name__ == "__main__":
main()
|
CrypTen-main
|
scripts/aws_launcher.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import os
import torch
from torchvision import datasets, transforms
def _get_norm_mnist(dir, reduced=None, binary=False):
"""Downloads and normalizes mnist"""
mnist_train = datasets.MNIST(dir, download=True, train=True)
mnist_test = datasets.MNIST(dir, download=True, train=False)
# compute normalization factors
data_all = torch.cat([mnist_train.data, mnist_test.data]).float()
data_mean, data_std = data_all.mean(), data_all.std()
tensor_mean, tensor_std = data_mean.unsqueeze(0), data_std.unsqueeze(0)
# normalize
mnist_train_norm = transforms.functional.normalize(
mnist_train.data.float(), tensor_mean, tensor_std
)
mnist_test_norm = transforms.functional.normalize(
mnist_test.data.float(), tensor_mean, tensor_std
)
# change all nonzero labels to 1 if binary classification required
if binary:
mnist_train.targets[mnist_train.targets != 0] = 1
mnist_test.targets[mnist_test.targets != 0] = 1
# create a reduced dataset if required
if reduced is not None:
mnist_norm = (mnist_train_norm[:reduced], mnist_test_norm[:reduced])
mnist_labels = (mnist_train.targets[:reduced], mnist_test.targets[:reduced])
else:
mnist_norm = (mnist_train_norm, mnist_test_norm)
mnist_labels = (mnist_train.targets, mnist_test.targets)
return mnist_norm, mnist_labels
def split_features(
split=0.5, dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Splits features between Party 1 and Party 2"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
num_features = mnist_train_norm.shape[1]
split_point = int(split * num_features)
party1_train = mnist_train_norm[:, :, :split_point]
party2_train = mnist_train_norm[:, :, split_point:]
party1_test = mnist_test_norm[:, :, :split_point]
party2_test = mnist_test_norm[:, :, split_point:]
torch.save(party1_train, os.path.join(dir, party1 + "_train.pth"))
torch.save(party2_train, os.path.join(dir, party2 + "_train.pth"))
torch.save(party1_test, os.path.join(dir, party1 + "_test.pth"))
torch.save(party2_test, os.path.join(dir, party2 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, "train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, "test_labels.pth"))
def split_observations(
split=0.5, dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Splits observations between Party 1 and Party 2"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
num_train_obs = mnist_train_norm.shape[0]
obs_train_split = int(split * num_train_obs)
num_test_obs = mnist_test_norm.shape[0]
obs_test_split = int(split * num_test_obs)
party1_train = mnist_train_norm[:obs_train_split, :, :]
party2_train = mnist_train_norm[obs_train_split:, :, :]
party1_test = mnist_test_norm[:obs_test_split, :, :]
party2_test = mnist_test_norm[obs_test_split:, :, :]
torch.save(party1_train, os.path.join(dir, party1 + "_train.pth"))
torch.save(party2_train, os.path.join(dir, party2 + "_train.pth"))
torch.save(party1_test, os.path.join(dir, party1 + "_test.pth"))
torch.save(party2_test, os.path.join(dir, party2 + "_test.pth"))
party1_train_labels = mnist_train_labels[:obs_train_split]
party1_test_labels = mnist_test_labels[:obs_test_split]
party2_train_labels = mnist_train_labels[obs_train_split:]
party2_test_labels = mnist_test_labels[obs_test_split:]
torch.save(party1_train_labels, os.path.join(dir, party1 + "_train_labels.pth"))
torch.save(party1_test_labels, os.path.join(dir, party1 + "_test_labels.pth"))
torch.save(party2_train_labels, os.path.join(dir, party2 + "_train_labels.pth"))
torch.save(party2_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def split_features_v_labels(
dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Gives Party 1 features and Party 2 labels"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
torch.save(mnist_train_norm, os.path.join(dir, party1 + "_train.pth"))
torch.save(mnist_test_norm, os.path.join(dir, party1 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, party2 + "_train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def split_train_v_test(
dir="/tmp", party1="alice", party2="bob", reduced=None, binary=False
):
"""Gives Party 1 training data and Party 2 the test data"""
mnist_norm, mnist_labels = _get_norm_mnist(dir, reduced, binary)
mnist_train_norm, mnist_test_norm = mnist_norm
mnist_train_labels, mnist_test_labels = mnist_labels
torch.save(mnist_train_norm, os.path.join(dir, party1 + "_train.pth"))
torch.save(mnist_test_norm, os.path.join(dir, party2 + "_test.pth"))
torch.save(mnist_train_labels, os.path.join(dir, party1 + "_train_labels.pth"))
torch.save(mnist_test_labels, os.path.join(dir, party2 + "_test_labels.pth"))
def main():
parser = argparse.ArgumentParser("Split data for use in Tutorials")
parser.add_argument(
"--option",
type=str,
choices={"features", "data", "features_v_labels", "train_v_test"},
)
parser.add_argument("--ratio", type=float, default=0.72)
parser.add_argument("--name_party1", type=str, default="alice")
parser.add_argument("--name_party2", type=str, default="bob")
parser.add_argument("--dest", type=str, default="/tmp")
parser.add_argument("--reduced", type=int, default=None)
parser.add_argument("--binary", action="store_true")
args = parser.parse_args()
if args.option == "features":
split_features(
split=args.ratio,
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "data":
split_observations(
split=args.ratio,
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "features_v_labels":
split_features_v_labels(
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
elif args.option == "train_v_test":
split_train_v_test(
dir=args.dest,
party1=args.name_party1,
party2=args.name_party2,
reduced=args.reduced,
binary=args.binary,
)
else:
raise ValueError("Invalid split option")
if __name__ == "__main__":
main()
|
CrypTen-main
|
tutorials/mnist_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from model import JOHMRLite
import os
import glob
import json
from pathlib import Path
import argparse
import re
import matplotlib.pyplot as plt
global model, index, alpha
index = 0
alpha = 0.5
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
def find_files(folder, extension):
return sorted([Path(os.path.join(folder, f)) for f in os.listdir(folder) if f.endswith(extension)])
def read_data():
"""
Load all annotated data for visualization
"""
# load gt part motion values (degree or cm)
gt_partmotion = []
fp = open(os.path.join(args.data_folder, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
with open(os.path.join(args.data_folder, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# GT global object rotation
gt_pitch = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[3])[0])
gt_yaw = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[4])[0])
gt_roll = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[5])[0])
# GT global object translation (cm)
gt_x = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[6])[0])
gt_y = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[7])[0])
gt_z = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[8])[0])
# GT object dimension (cm)
gt_xdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
gt_ydim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
gt_zdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
gt_cad = re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0]
gt_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
gt_focalX = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[-2])[0])
gt_focalY = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[-1])[0])
assert gt_focalX == gt_focalY
data = {'part_motion': gt_partmotion,
'pitch': gt_pitch,
'yaw': gt_yaw,
'roll': gt_roll,
'x_offset': gt_x,
'y_offset': gt_y,
'z_offset': gt_z,
'obj_size': [gt_xdim, gt_ydim, gt_zdim],
'cad': gt_cad,
'part': gt_part,
'focal': gt_focalX}
return data
def create_model(gt_data):
"""
create initial models
"""
global model, index, alpha
x_offset = gt_data['x_offset']
y_offset = gt_data['y_offset']
z_offset = gt_data['z_offset']
yaw = gt_data['yaw']
pitch = gt_data['pitch']
roll = gt_data['roll']
part_motion = gt_data['part_motion']
obj_size = gt_data['obj_size'] # length, height, width (x, y, z), cm
focal_x = gt_data['focal']
focal_y = gt_data['focal']
device = torch.device("cuda:0")
obj_path = os.path.join(args.cad_folder, gt_data['cad'])
verts, faces, vertexSegs, faceSegs = merge_meshes(obj_path, device)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
obj_verts = verts.to(device)
obj_faces = faces.to(device)
# load motion json file
with open(os.path.join(args.cad_folder, gt_data['cad'], 'motion.json')) as json_file:
motions = json.load(json_file)
assert len(motions) + 2 == len(vertexSegs)
rot_o, rot_axis, rot_type, limit_a, limit_b, contact_list = load_motion(motions, device)
frames = find_files(os.path.join(args.data_folder, 'frames'), '.jpg')
image_bg = np.array(Image.open(frames[index]))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
img_square = max(img_h, img_w)
img_small = 256
# render
_, phong_renderer = initialize_render(device, focal_x, focal_y, img_square, img_small)
# Model >_<
model = JOHMRLite(x_offset, y_offset, z_offset, yaw, pitch, roll, part_motion, obj_size, \
obj_verts, obj_faces, phong_renderer, gt_data['part'], rot_o, rot_axis, \
vertexSegs, rot_type)
return len(frames)
def display_img():
global model, index, alpha
frames = find_files(os.path.join(args.data_folder, 'frames'), '.jpg')
image_bg = np.array(Image.open(frames[index]))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
img_square = max(img_h, img_w)
img_small = 256
with torch.no_grad():
image = model(index)
rgb_mask = image_bg.astype(np.float32) #cv2.addWeighted(objmask.astype(np.float32), 0.5, image_bg.astype(np.float32), 0.5, 0.0)
frame_img = np.zeros((img_square, img_square,3))
start = int((max(img_h, img_w) - min(img_h, img_w))/2) - 1
end = start + min(img_h, img_w)
if img_h > img_w:
frame_img[:, start:end, :] = rgb_mask
else:
frame_img[start:end, :, :] = rgb_mask
rgb_mask = frame_img
alpha = min(1.0, max(0.0,alpha))
img_blend = cv2.addWeighted(image.astype(np.float32), alpha, rgb_mask.astype(np.float32), 1-alpha, 0.0)
img_blend = cv2.resize(img_blend, dsize=(800, 800), interpolation=cv2.INTER_NEAREST)
return img_blend
parser = argparse.ArgumentParser()
parser.add_argument("--data_folder", type=str, help="annotation data folder")
parser.add_argument("--cad_folder", type=str, help="cad data folder")
args = parser.parse_args()
gt_data = read_data()
num_frames = create_model(gt_data)
for index in range(num_frames):
img_blend = display_img()
plt.imshow(img_blend)
plt.show()
|
d3d-hoi-main
|
visualization/visualize_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
from pytorch3d.structures import Meshes
from utils import rotation_matrix
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
import os
from pytorch3d.transforms import (
euler_angles_to_matrix
)
from utils import (
rotation_matrix
)
class JOHMRLite(nn.Module):
def __init__(self, x_offset, y_offset, z_offset, yaw, pitch, roll, part_motion, obj_size, \
obj_verts, obj_faces, vis_render, part_idx, rot_o, axis, vertexSegs, rot_type):
super().__init__()
self.device = obj_verts.device
self.vis_render = vis_render
self.obj_verts = obj_verts.detach()
self.obj_faces = obj_faces.detach()
self.rot_type = rot_type
self.x_offset = x_offset
self.y_offset = y_offset
self.z_offset = z_offset
self.part_motion = part_motion
# camera is almost at the center (distance can't be zero for diff render)
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0,device=self.device)
self.T[0,2] = 0.0 # manually set to zero
x_diff = torch.max(obj_verts[:,0]) - torch.min(obj_verts[:,0])
self.x_ratio = float(obj_size[0]) / x_diff
y_diff = torch.max(obj_verts[:,1]) - torch.min(obj_verts[:,1])
self.y_ratio = float(obj_size[1]) / y_diff
z_diff = torch.max(obj_verts[:,2]) - torch.min(obj_verts[:,2])
self.z_ratio = float(obj_size[2]) / z_diff
# predefined object CAD part and axis
self.vertexStart = vertexSegs[part_idx]
self.vertexEnd = vertexSegs[part_idx+1]
self.rot_o = rot_o[part_idx]
self.axis = axis[part_idx]
# pytorch3d -> world coordinate
self.rot_o[1:] *= -1
self.axis[1:] *= -1
# rescale object
self.obj_verts[:, 0] *= self.x_ratio
self.obj_verts[:, 1] *= self.y_ratio
self.obj_verts[:, 2] *= self.z_ratio
self.rot_o[0] *= self.x_ratio
self.rot_o[1] *= self.y_ratio
self.rot_o[2] *= self.z_ratio
euler_angle = torch.tensor([pitch, yaw, roll]).reshape(1,3)
self.objR = euler_angles_to_matrix(euler_angle, ["X","Y","Z"]).to(self.device)[0]
return
def forward(self, index):
partmotion = self.part_motion[index]
obj_verts = self.obj_verts.clone()
# part motion
if self.rot_type[0] == 'prismatic':
part_state = torch.tensor(partmotion).to(self.device)
obj_verts_t1 = obj_verts[self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_t2 = obj_verts_t1 + self.axis * part_state #/float(annotation['obj_dim'][2]) * z_ratio
obj_verts[self.vertexStart:self.vertexEnd, :] = obj_verts_t2 + self.rot_o
else:
part_state = torch.tensor(partmotion*0.0174533)
part_rot_mat = rotation_matrix(self.axis, part_state)
obj_verts_t1 = obj_verts[self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_t2 = torch.mm(part_rot_mat.to(self.device), obj_verts_t1.permute(1,0)).permute(1,0)
obj_verts[self.vertexStart:self.vertexEnd, :] = obj_verts_t2 + self.rot_o
# step 3: object orientation
obj_verts = torch.mm(self.objR, obj_verts.permute(1,0)).permute(1,0)
# step 4: object offset
obj_verts[:, 0] += 100.0*self.x_offset
obj_verts[:, 1] += 100.0*self.y_offset
obj_verts[:, 2] += 100.0*self.z_offset
obj_verts[:,1:] *= -1
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
tex[:, :, 0] = 0
tex[:, :, 1] = 1
tex[:, :, 2] = 0
textures = TexturesVertex(verts_features=tex).to(self.device)
self.obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures)
vis_image = self.vis_render(meshes_world=self.obj_mesh, R=self.R, T=self.T)
silhouette = vis_image[0,:,:,:3]
return silhouette.detach().cpu().numpy()
|
d3d-hoi-main
|
visualization/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import pdb
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=100,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path, device):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
#print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
if 'contact' in jointData:
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def visualize(mask, image, alpha):
#mask = np.repeat(mask[:,:,np.newaxis], 3, axis=2)
mask_img_blend = cv2.addWeighted(mask, alpha, image.astype(np.float32), 1.0-alpha, 0)
mask_img_blend = mask_img_blend*mask + image*(1-mask)
return mask_img_blend
def visualize_curve(data, output, save_folder, title):
mask_model = output['obj_mask']
spin_points = output['spin_points']
# plot curve
obj_curve = output['obj_curve']
spin_curve = output['spin_curve']
x_offset = spin_curve[0,0] - obj_curve[0,0]
y_offset = spin_curve[0,1] - obj_curve[0,1]
z_offset = spin_curve[0,2] - obj_curve[0,2]
obj_curve[:,0] += x_offset
obj_curve[:,1] += y_offset
obj_curve[:,2] += z_offset
fig = plt.figure()
ax = plt.axes(projection='3d')
#obj_curves = obj_curve_norm
ax.scatter(spin_curve[0,0], spin_curve[0,1], spin_curve[0,2], color='red')
ax.scatter(obj_curve[0,0], obj_curve[0,1], obj_curve[0,2], color='red')
ax.plot(obj_curve[:,0], obj_curve[:,1], obj_curve[:,2], label='object curve')
ax.plot(spin_curve[:,0], spin_curve[:,1], spin_curve[:,2], label='hand curve')
ax.legend()
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
def save_mesh(id):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts1 = obj_verts_dict[str(id+1)]
verts2 = human_verts_dict[str(id+1)]
faces1 = obj_faces_dict[str(id+1)]
faces2 = human_faces_dict[str(id+1)]
verts = np.concatenate((verts1, verts2), axis=0)
faces = np.concatenate((faces1, faces2 + verts1.shape[0]), axis=0)
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts1), torch.from_numpy(faces1))
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts2), torch.from_numpy(faces2))
path = os.path.join(save_path_mesh, str(id+1)+'_joint.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
obj_verts[str(idx+1)] = obj_meshes[idx].verts_list()[0].detach().cpu().numpy()
obj_faces[str(idx+1)] = obj_meshes[idx].faces_list()[0].detach().cpu().numpy()
human_verts[str(idx+1)] = spin_meshes[idx].verts_list()[0].detach().cpu().numpy()
human_faces[str(idx+1)] = spin_meshes[idx].faces_list()[0].detach().cpu().numpy()
manager = Manager()
obj_verts_dict = manager.dict(obj_verts)
obj_faces_dict = manager.dict(obj_faces)
human_verts_dict = manager.dict(human_verts)
human_faces_dict = manager.dict(human_faces)
ids = [item for item in range(len(obj_meshes))]
pool = Pool(processes=12)
pool.map(save_mesh, ids)
'''
eft_cmd = 'python -m demo.demo_bodymocap --render wire --bg rgb --videoname '+video_name+' --vPath '+save_folder
os.chdir('/home/xuxiangx/research/eft')
os.system(eft_cmd)
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
save_path = os.path.join(save_folder, 'eft', 'side')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/sideview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_folder, title):
save_path = os.path.join(save_folder, title)
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = 1000.0*model.obj_offset.detach().cpu().numpy() #(3,)
smpl_offset = 1000.0*model.smpl_offset.detach().cpu().numpy() #(bs,3)
obj_scale = 3000.0*model.obj_scale
smpl_scale = 3000.0
focal_len = model.focal
part_rot_angle = model.part_rot_params.detach().cpu().numpy() #(bs,)
obj_rot_mat = model.obj_rot_angle_mat[0].detach().cpu().numpy() #(3,3)
part_rot_mat = model.part_rot_mat.detach().cpu().numpy() #(bs,3,3)
K_mat = model.K.detach().cpu().numpy() #(3,3)
rot_o = model.rot_o.detach().cpu().numpy() #(3,)
rot_axis = model.axis.detach().cpu().numpy() #(3,)
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['smpl_offset'] = smpl_offset
parameters['obj_scale'] = obj_scale
parameters['smpl_scale'] = smpl_scale
parameters['focal_length'] = focal_len
parameters['part_rot_angle'] = part_rot_angle
parameters['obj_rot_matrix'] = obj_rot_mat
parameters['part_rot_matrix'] = part_rot_mat
parameters['K_matrix'] = K_mat
parameters['rot_origin'] = rot_o
parameters['rot_axis'] = rot_axis
np.save(os.path.join(save_path, 'parameters.npy'), parameters)
return
def save_img(idx):
global shared_dict1
global shared_dict2
global save_path
roi_image = shared_dict1['image'].permute(0,2,3,1)[idx].numpy()
silhouette = shared_dict1['objmask'].permute(0,2,3,1)[idx]
mask_model = shared_dict2['obj_mask']
gt_points = shared_dict1['smplv2d']
spin_points = shared_dict2['spin_points']
silhouette_init = mask_model.detach().cpu().squeeze()[idx].numpy()
mask_img_blend = visualize(silhouette_init, roi_image, 0.8)
# save image
#plt.subplots_adjust(hspace = 0.2, left=0.01, right=0.99, top=0.95, bottom=0.05)
imsave(os.path.join(save_path, str(idx)+'.png'), mask_img_blend)
return
def save_imgs(data, output, save_folder):
global shared_dict1
global shared_dict2
global save_path
save_path = save_folder
manager = Manager()
shared_dict1 = manager.dict(data)
shared_dict1 = data
shared_dict2 = manager.dict(output)
shared_dict2 = output
ids = [item for item in range(data['image'].shape[0])]
pool = Pool(processes=12)
pool.map(save_img, ids)
#sceneflow = shared_dict2['sceneflow']
#objSurfaceFlow = shared_dict2['objSurfaceFlow']
#synFlow = shared_dict2['synFlow']
#sceneflowMaskSquareShrink = shared_dict2['sceneflowMaskSquareShrink']
#part_diff_images = shared_dict2['part_diff_images']
# save object part suface raft flow visualization
#save_path = os.path.join(save_folder, title, 'objSurfaceFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(objSurfaceFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), objSurfaceFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/objSurfaceFlow.mp4'
#os.system(ffmpeg_cmd)
# save synthetic rendering flow visualization
#save_path = os.path.join(save_folder, title, 'synFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(synFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), synFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/synFlow.mp4'
#os.system(ffmpeg_cmd)
# save visualize images
#for idx in range(data['image'].shape[0]):
#save_img(idx, shared_dict1, shared_dict2)
#save_path = os.path.join(save_folder, title, 'render')
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/render.mp4'
#os.system(ffmpeg_cmd)
#vid1 = os.path.join(save_folder, 'objSurfaceFlow.mp4')
#vid2 = os.path.join(save_folder, 'synFlow.mp4')
#vid3 = os.path.join(save_folder, 'visual.mp4')
#ffmpeg_cmd = 'ffmpeg -i '+vid1+' -i '+vid2+' -filter_complex hstack=inputs=2 '+vid3
#os.system(ffmpeg_cmd)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
def flow_confidence(threshold, forwardFlow, backwardFlow, img_w, img_h):
# I_t -> I_(t+1), wrap with forward flow
It1 = forwardFlow.clone()
It1[:,:,0] += torch.arange(img_w)
It1[:,:,1] += torch.arange(img_h).unsqueeze(1) # (u, v) coordinate
It1 = torch.round(It1)
withinFrameMask = (It1[:,:,0] < img_w) * (It1[:,:,0] > 0) *\
(It1[:,:,1] < img_h) * (It1[:,:,1] > 0)
pdb.set_trace()
withinFrameCoord = torch.nonzero(withinFrameMask==1) # (x, y) coordinate of within frame flow
nextCoord = It1[withinFrameCoord[:, 0], withinFrameCoord[:,1]].astype(int) # u, v order
# I_(t+1) -> I_t, wrap back with backward flow
nextCoordBackwardFlow = backwardFlow[nextCoord[:,1], nextCoord[:,0],:]
nextbackCoord = nextCoord + nextCoordBackwardFlow # u, v coord
nextbackCoord[:,[1,0]] = nextbackCoord[:,[0,1]] # swap to x,y coord
# filter out noisy flow
stableFlowMask = np.sum(np.abs(nextbackCoord - withinFrameCoord), 1) < threshold
stableFlowCoord = withinFrameCoord[stableFlowMask] # (x,y) coord
return stableFlowCoord
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = 40.0#,
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr)
|
d3d-hoi-main
|
visualization/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import cv2
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
import numpy as np
from utils import (
initialize_render, merge_meshes,
load_motion
)
import torch
from PIL import Image
from natsort import natsorted
from model import JOHMRLite
import os
import json
import pdb
import scipy.misc
import matplotlib.pyplot as plt
import open3d as o3d
import torch.optim as optim
from pytorch3d.io import save_obj
# load cad model
device = torch.device("cuda:0")
obj_path = 'processed_cads/storagefurniture/45132'
verts, faces, part_segs, _ = merge_meshes(obj_path, device)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
obj_verts = verts.to(device)
obj_faces = faces.to(device)
obj_size = np.asarray([74.0, 77.5, 68.5]) # length, height, width (x, y, z), cm
# fridge (home) np.asarray([600, 1450, 620])
# dishwasher (yang) obj_size = np.asarray([600, 800, 600])
# laptop (large) obj_size = np.asarray([415, 15, 280])
# fridge (small) obj_size = np.asarray([450, 825, 472])
# trashcan (outside) obj_size = np.asarray([650, 1050, 635])
# 677 x 1100 x 665
# load image
img_path = 'storagefurniture/b008-0109/frames/images-0001.jpg'
focal_x = 983#1505 #1680
focal_y = 983#1505 #1680
img_square = 1920
img_small = 256
# render
_, phong_renderer = initialize_render(device, focal_x, focal_y, img_square, img_small)
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
x_offset = 0.0
y_offset = 0.0
z_offset = 2.0
yaw = 0.0
pitch = 0.0
roll = 0.0
valstep = 0.01
# initialize model
image_bg = np.array(Image.open(img_path))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
model = JOHMRLite(obj_verts, obj_faces, phong_renderer, img_h, img_w)
def display_img(model, alpha):
image_bg = np.array(Image.open(img_path))/255.0
img_h = image_bg.shape[0]
img_w = image_bg.shape[1]
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
vis_image, rot_alpha, rot_beta, rot_gamma = model(obj_size, x_offset, y_offset, z_offset, yaw, pitch, roll)
image = vis_image.detach().cpu().numpy().squeeze()
#objmask = np.array(Image.open(mask_path))#/255.0 # object mask
#objmask = np.repeat(objmask[:,:,np.newaxis], 3, axis=2)
#objmask[:,:,0] *= 0
#objmask[:,:,1] *= 0
rgb_mask = image_bg.astype(np.float32) #cv2.addWeighted(objmask.astype(np.float32), 0.5, image_bg.astype(np.float32), 0.5, 0.0)
frame_img = np.zeros((img_square, img_square,3))
start = int((max(img_h, img_w) - min(img_h, img_w))/2) - 1
end = start + min(img_h, img_w)
if img_h > img_w:
frame_img[:, start:end, :] = rgb_mask
else:
frame_img[start:end, :, :] = rgb_mask
rgb_mask = frame_img
alpha = min(1.0, max(0.0,alpha))
img_blend = cv2.addWeighted(image.astype(np.float32), alpha, rgb_mask.astype(np.float32), 1-alpha, 0.0)
img_blend = cv2.resize(img_blend, dsize=(800, 800), interpolation=cv2.INTER_NEAREST)
return img_blend
img_blend = display_img(model, 0.5)
h,w,_ = img_blend.shape
img_blend = np.uint8(img_blend*255)
qimage = QtGui.QImage(img_blend.data, h, w, 3*h, QtGui.QImage.Format_RGB888)
class Annotate(QtWidgets.QWidget):
def __init__(self):
super(Annotate, self).__init__()
self.initUI()
def initUI(self):
QtWidgets.QToolTip.setFont(QtGui.QFont('Test', 10))
# Show image
self.pic = QtWidgets.QLabel(self)
self.pic.setGeometry(10, 10, 800, 800)
self.pic.setPixmap(QtGui.QPixmap(qimage))
self.alpha = 0.5
# Show button
btn1 = QtWidgets.QPushButton('Offset Z-', self)
btn1.resize(btn1.sizeHint())
btn1.clicked.connect(lambda: self.fun('dec_oz'))
btn1.move(900, 10)
btn2 = QtWidgets.QPushButton('Offset Z+', self)
btn2.resize(btn2.sizeHint())
btn2.clicked.connect(lambda: self.fun('inc_oz'))
btn2.move(1000, 10)
self.textbox1 = QtWidgets.QLineEdit(self)
self.textbox1.move(1150, 10)
self.textbox1.resize(100,25)
self.textbox1.setText(str(z_offset))
btn7 = QtWidgets.QPushButton('Offset X-', self)
btn7.resize(btn7.sizeHint())
btn7.clicked.connect(lambda: self.fun('dec_ox'))
btn7.move(900, 150)
btn8 = QtWidgets.QPushButton('Offset X+', self)
btn8.resize(btn8.sizeHint())
btn8.clicked.connect(lambda: self.fun('inc_ox'))
btn8.move(1000, 150)
self.textbox4 = QtWidgets.QLineEdit(self)
self.textbox4.move(1150, 150)
self.textbox4.resize(100,25)
self.textbox4.setText(str(x_offset))
btn9 = QtWidgets.QPushButton('Offset Y-', self)
btn9.resize(btn9.sizeHint())
btn9.clicked.connect(lambda: self.fun('dec_oy'))
btn9.move(900, 190)
btn10 = QtWidgets.QPushButton('Offset Y+', self)
btn10.resize(btn10.sizeHint())
btn10.clicked.connect(lambda: self.fun('inc_oy'))
btn10.move(1000, 190)
self.textbox5 = QtWidgets.QLineEdit(self)
self.textbox5.move(1150, 190)
self.textbox5.resize(100,25)
self.textbox5.setText(str(y_offset))
btn11 = QtWidgets.QPushButton('Yaw-', self)
btn11.resize(btn11.sizeHint())
btn11.clicked.connect(lambda: self.fun('dec_yaw'))
btn11.move(900, 250)
btn12 = QtWidgets.QPushButton('Yaw+', self)
btn12.resize(btn12.sizeHint())
btn12.clicked.connect(lambda: self.fun('inc_yaw'))
btn12.move(1000, 250)
self.textbox6 = QtWidgets.QLineEdit(self)
self.textbox6.move(1150, 250)
self.textbox6.resize(100,25)
self.textbox6.setText(str(yaw))
btn13 = QtWidgets.QPushButton('Pitch-', self)
btn13.resize(btn13.sizeHint())
btn13.clicked.connect(lambda: self.fun('dec_pitch'))
btn13.move(900, 290)
btn14 = QtWidgets.QPushButton('Pitch+', self)
btn14.resize(btn14.sizeHint())
btn14.clicked.connect(lambda: self.fun('inc_pitch'))
btn14.move(1000, 290)
self.textbox7 = QtWidgets.QLineEdit(self)
self.textbox7.move(1150, 290)
self.textbox7.resize(100,25)
self.textbox7.setText(str(pitch))
btn15 = QtWidgets.QPushButton('Roll-', self)
btn15.resize(btn15.sizeHint())
btn15.clicked.connect(lambda: self.fun('dec_roll'))
btn15.move(900, 330)
btn16 = QtWidgets.QPushButton('Roll+', self)
btn16.resize(btn16.sizeHint())
btn16.clicked.connect(lambda: self.fun('inc_roll'))
btn16.move(1000, 330)
self.textbox8 = QtWidgets.QLineEdit(self)
self.textbox8.move(1150, 330)
self.textbox8.resize(100,25)
self.textbox8.setText(str(roll))
btn22 = QtWidgets.QPushButton('Vis-', self)
btn22.resize(btn22.sizeHint())
btn22.clicked.connect(lambda: self.fun('dec_vis'))
btn22.move(900, 550)
btn23 = QtWidgets.QPushButton('Vis+', self)
btn23.resize(btn23.sizeHint())
btn23.clicked.connect(lambda: self.fun('inc_vis'))
btn23.move(1000, 550)
btn21 = QtWidgets.QPushButton('Save', self)
btn21.resize(btn21.sizeHint())
btn21.clicked.connect(lambda: self.fun('save'))
btn21.move(1000, 500)
self.setGeometry(300, 300, 2000, 1500)
self.setWindowTitle('JOHMR Annotation Tool -- Sam Xu')
self.show()
# Connect button to image updating
def fun(self, arguments):
global x_offset, y_offset, z_offset, yaw, pitch, roll, rot_alpha, rot_beta, rot_gamma
if arguments == 'dec_oz':
z_offset -= valstep
elif arguments == 'inc_oz':
z_offset += valstep
elif arguments == 'dec_ox':
x_offset -= valstep
elif arguments == 'inc_ox':
x_offset += valstep
elif arguments == 'dec_oy':
y_offset -= valstep
elif arguments == 'inc_oy':
y_offset += valstep
elif arguments == 'dec_yaw':
yaw -= valstep
elif arguments == 'inc_yaw':
yaw += valstep
elif arguments == 'dec_pitch':
pitch -= valstep
elif arguments == 'inc_pitch':
pitch += valstep
elif arguments == 'dec_roll':
roll -= valstep
elif arguments == 'inc_roll':
roll += valstep
elif arguments == 'save':
# save obj orientation
text_file = './3d_info.txt'
with open(text_file, "w") as myfile:
myfile.write('yaw: '+str(round(yaw,3))+'\n')
myfile.write('pitch: '+str(round(pitch,3))+'\n')
myfile.write('roll: '+str(round(roll,3))+'\n')
myfile.write('rot_alpha: '+str(round(rot_alpha,3))+'\n')
myfile.write('rot_beta: '+str(round(rot_beta,3))+'\n')
myfile.write('rot_gamma: '+str(round(rot_gamma,3))+'\n')
myfile.write('x_offset: '+str(round(x_offset,3))+'\n')
myfile.write('y_offset: '+str(round(y_offset,3))+'\n')
myfile.write('z_offset: '+str(round(z_offset,3))+'\n')
myfile.write('obj_size: '+str(obj_size[0])+','+str(obj_size[1])+','+str(obj_size[2])+'\n')
myfile.write('\n')
# save human & obj meshes
#save_hverts = model.smpl_verts_output.detach().cpu().numpy()
#save_hfaces = hfaces.detach().cpu().numpy()
save_objverts = model.obj_verts_output.detach().cpu().numpy()
save_objfaces = obj_faces.detach().cpu().numpy()
#verts = np.concatenate((save_hverts, save_objverts), axis=0)
#faces = np.concatenate((save_hfaces, save_objfaces + save_hverts.shape[0]), axis=0)
save_obj('./object.obj', torch.from_numpy(save_objverts), torch.from_numpy(save_objfaces))
#save_obj('annotate/person.obj', torch.from_numpy(save_hverts), torch.from_numpy(save_hfaces))
#save_obj('annotate/joint.obj', torch.from_numpy(verts), torch.from_numpy(faces))
elif arguments == 'dec_vis':
self.alpha -= 0.1
elif arguments == 'inc_vis':
self.alpha += 0.1
else:
print('not implemented')
self.textbox4.setText(str(round(x_offset,3)))
self.textbox5.setText(str(round(y_offset,3)))
self.textbox6.setText(str(round(yaw,3)))
self.textbox7.setText(str(round(pitch,3)))
self.textbox8.setText(str(round(roll,3)))
self.textbox1.setText(str(round(z_offset,3)))
img = display_img(model, self.alpha)
img = np.uint8(img*255)
h,w,_ = img.shape
qimage = QtGui.QImage(img.data, h, w, 3*h, QtGui.QImage.Format_RGB888)
self.pic.setPixmap(QtGui.QPixmap(qimage))
def main():
app = QtWidgets.QApplication(sys.argv)
ex = Annotate()
sys.exit(app.exec_())
if __name__ == '__main__':
main()
|
d3d-hoi-main
|
visualization/annotation/qt.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import pdb
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_matrix
from scipy.ndimage.filters import gaussian_filter1d
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
from utils import (
flow_to_image, flow_confidence
)
import time
from matplotlib.image import imsave
import os
from torch.autograd import Variable
from pytorch3d.transforms import (
euler_angles_to_matrix
)
from utils import (
rotation_matrix
)
class JOHMRLite(nn.Module):
def __init__(self, obj_verts, obj_faces, vis_render, img_h, img_w):
super().__init__()
self.device = obj_verts.device
self.vis_render = vis_render
self.obj_verts = obj_verts.detach()
self.obj_faces = obj_faces.detach()
self.img_h = img_h
self.img_w = img_w
# camera is almost at the center (distance can't be zero for diff render)
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0,device=self.device)
self.T[0,2] = 0.0 # manually set to zero
return
def forward(self, obj_size, x_offset, y_offset, z_offset, yaw, pitch, roll):
obj_verts = self.obj_verts.clone()
# step 1: rescale object
x_diff = torch.max(obj_verts[:,0]) - torch.min(obj_verts[:,0])
x_ratio = float(obj_size[0]) / x_diff
y_diff = torch.max(obj_verts[:,1]) - torch.min(obj_verts[:,1])
y_ratio = float(obj_size[1]) / y_diff
z_diff = torch.max(obj_verts[:,2]) - torch.min(obj_verts[:,2])
z_ratio = float(obj_size[2]) / z_diff
obj_verts[:, 0] *= x_ratio
obj_verts[:, 1] *= y_ratio
obj_verts[:, 2] *= z_ratio
# step 2: part motion
#part_state = torch.tensor(90 * (math.pi/180)).cuda()
#axis = torch.tensor([0, -0.9999999999999999, -0]).cuda().float()
#rot_o = torch.tensor([0.37487859368179954*x_ratio, -0.859491*y_ratio, -0.24141621508844158*z_ratio]).cuda()
#assert(part_state>=0) # non negative value
#start = 380
#end = 380+198
#partrot_mat = rotation_matrix(axis, part_state).cuda() # part rotation matrix
#obj_verts_part = obj_verts[start:end, :] - rot_o
#obj_verts_part2 = torch.mm(partrot_mat, obj_verts_part.permute(1,0)).permute(1,0)
#obj_verts[start:end, :] = obj_verts_part2 + rot_o
# step 3: object orientation
euler_angle = torch.tensor([pitch, yaw, roll]).reshape(1,3)
objrot_mat = euler_angles_to_matrix(euler_angle, ["X","Y","Z"]).to(self.device)
rot_alpha, rot_beta, rot_gamma = matrix_to_euler_angles(objrot_mat, ["X","Y","Z"])[0]
rot_alpha = float(rot_alpha)
rot_beta = float(rot_beta)
rot_gamma = float(rot_gamma)
objrot_mat = objrot_mat[0]
obj_verts = torch.mm(objrot_mat, obj_verts.permute(1,0)).permute(1,0)
# step 4: object offset
obj_verts[:, 0] += 100.0*x_offset
obj_verts[:, 1] += 100.0*y_offset
obj_verts[:, 2] += 100.0*z_offset
self.obj_verts_output = obj_verts.clone()
obj_verts[:,1:] *= -1
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
self.obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures)
vis_image = self.vis_render(meshes_world=self.obj_mesh, R=self.R, T=self.T)
return vis_image[...,:3], rot_alpha, rot_beta, rot_gamma
|
d3d-hoi-main
|
visualization/annotation/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import pdb
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=100,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path, device):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
#print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
if 'contact' in jointData:
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def visualize(mask, image, alpha):
#mask = np.repeat(mask[:,:,np.newaxis], 3, axis=2)
mask_img_blend = cv2.addWeighted(mask, alpha, image.astype(np.float32), 1.0-alpha, 0)
mask_img_blend = mask_img_blend*mask + image*(1-mask)
return mask_img_blend
def visualize_curve(data, output, save_folder, title):
mask_model = output['obj_mask']
spin_points = output['spin_points']
# plot curve
obj_curve = output['obj_curve']
spin_curve = output['spin_curve']
x_offset = spin_curve[0,0] - obj_curve[0,0]
y_offset = spin_curve[0,1] - obj_curve[0,1]
z_offset = spin_curve[0,2] - obj_curve[0,2]
obj_curve[:,0] += x_offset
obj_curve[:,1] += y_offset
obj_curve[:,2] += z_offset
fig = plt.figure()
ax = plt.axes(projection='3d')
#obj_curves = obj_curve_norm
ax.scatter(spin_curve[0,0], spin_curve[0,1], spin_curve[0,2], color='red')
ax.scatter(obj_curve[0,0], obj_curve[0,1], obj_curve[0,2], color='red')
ax.plot(obj_curve[:,0], obj_curve[:,1], obj_curve[:,2], label='object curve')
ax.plot(spin_curve[:,0], spin_curve[:,1], spin_curve[:,2], label='hand curve')
ax.legend()
ax.set_xlabel("X")
ax.set_ylabel("Y")
ax.set_zlabel("Z")
plt.show()
def save_mesh(id):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts1 = obj_verts_dict[str(id+1)]
verts2 = human_verts_dict[str(id+1)]
faces1 = obj_faces_dict[str(id+1)]
faces2 = human_faces_dict[str(id+1)]
verts = np.concatenate((verts1, verts2), axis=0)
faces = np.concatenate((faces1, faces2 + verts1.shape[0]), axis=0)
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts1), torch.from_numpy(faces1))
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts2), torch.from_numpy(faces2))
path = os.path.join(save_path_mesh, str(id+1)+'_joint.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
obj_verts[str(idx+1)] = obj_meshes[idx].verts_list()[0].detach().cpu().numpy()
obj_faces[str(idx+1)] = obj_meshes[idx].faces_list()[0].detach().cpu().numpy()
human_verts[str(idx+1)] = spin_meshes[idx].verts_list()[0].detach().cpu().numpy()
human_faces[str(idx+1)] = spin_meshes[idx].faces_list()[0].detach().cpu().numpy()
manager = Manager()
obj_verts_dict = manager.dict(obj_verts)
obj_faces_dict = manager.dict(obj_faces)
human_verts_dict = manager.dict(human_verts)
human_faces_dict = manager.dict(human_faces)
ids = [item for item in range(len(obj_meshes))]
pool = Pool(processes=12)
pool.map(save_mesh, ids)
'''
eft_cmd = 'python -m demo.demo_bodymocap --render wire --bg rgb --videoname '+video_name+' --vPath '+save_folder
os.chdir('/home/xuxiangx/research/eft')
os.system(eft_cmd)
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
save_path = os.path.join(save_folder, 'eft', 'side')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/sideview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_folder, title):
save_path = os.path.join(save_folder, title)
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = 1000.0*model.obj_offset.detach().cpu().numpy() #(3,)
smpl_offset = 1000.0*model.smpl_offset.detach().cpu().numpy() #(bs,3)
obj_scale = 3000.0*model.obj_scale
smpl_scale = 3000.0
focal_len = model.focal
part_rot_angle = model.part_rot_params.detach().cpu().numpy() #(bs,)
obj_rot_mat = model.obj_rot_angle_mat[0].detach().cpu().numpy() #(3,3)
part_rot_mat = model.part_rot_mat.detach().cpu().numpy() #(bs,3,3)
K_mat = model.K.detach().cpu().numpy() #(3,3)
rot_o = model.rot_o.detach().cpu().numpy() #(3,)
rot_axis = model.axis.detach().cpu().numpy() #(3,)
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['smpl_offset'] = smpl_offset
parameters['obj_scale'] = obj_scale
parameters['smpl_scale'] = smpl_scale
parameters['focal_length'] = focal_len
parameters['part_rot_angle'] = part_rot_angle
parameters['obj_rot_matrix'] = obj_rot_mat
parameters['part_rot_matrix'] = part_rot_mat
parameters['K_matrix'] = K_mat
parameters['rot_origin'] = rot_o
parameters['rot_axis'] = rot_axis
np.save(os.path.join(save_path, 'parameters.npy'), parameters)
return
def save_img(idx):
global shared_dict1
global shared_dict2
global save_path
roi_image = shared_dict1['image'].permute(0,2,3,1)[idx].numpy()
silhouette = shared_dict1['objmask'].permute(0,2,3,1)[idx]
mask_model = shared_dict2['obj_mask']
gt_points = shared_dict1['smplv2d']
spin_points = shared_dict2['spin_points']
silhouette_init = mask_model.detach().cpu().squeeze()[idx].numpy()
mask_img_blend = visualize(silhouette_init, roi_image, 0.8)
# save image
#plt.subplots_adjust(hspace = 0.2, left=0.01, right=0.99, top=0.95, bottom=0.05)
imsave(os.path.join(save_path, str(idx)+'.png'), mask_img_blend)
return
def save_imgs(data, output, save_folder):
global shared_dict1
global shared_dict2
global save_path
save_path = save_folder
manager = Manager()
shared_dict1 = manager.dict(data)
shared_dict1 = data
shared_dict2 = manager.dict(output)
shared_dict2 = output
ids = [item for item in range(data['image'].shape[0])]
pool = Pool(processes=12)
pool.map(save_img, ids)
#sceneflow = shared_dict2['sceneflow']
#objSurfaceFlow = shared_dict2['objSurfaceFlow']
#synFlow = shared_dict2['synFlow']
#sceneflowMaskSquareShrink = shared_dict2['sceneflowMaskSquareShrink']
#part_diff_images = shared_dict2['part_diff_images']
# save object part suface raft flow visualization
#save_path = os.path.join(save_folder, title, 'objSurfaceFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(objSurfaceFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), objSurfaceFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/objSurfaceFlow.mp4'
#os.system(ffmpeg_cmd)
# save synthetic rendering flow visualization
#save_path = os.path.join(save_folder, title, 'synFlow')
#if not os.path.exists(save_path):
#os.makedirs(save_path)
#for idx in range(synFlow.shape[0]):
#imsave(os.path.join(save_path, str(idx)+'.png'), synFlow[idx])
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/synFlow.mp4'
#os.system(ffmpeg_cmd)
# save visualize images
#for idx in range(data['image'].shape[0]):
#save_img(idx, shared_dict1, shared_dict2)
#save_path = os.path.join(save_folder, title, 'render')
#ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/%d.png '+save_folder+'/render.mp4'
#os.system(ffmpeg_cmd)
#vid1 = os.path.join(save_folder, 'objSurfaceFlow.mp4')
#vid2 = os.path.join(save_folder, 'synFlow.mp4')
#vid3 = os.path.join(save_folder, 'visual.mp4')
#ffmpeg_cmd = 'ffmpeg -i '+vid1+' -i '+vid2+' -filter_complex hstack=inputs=2 '+vid3
#os.system(ffmpeg_cmd)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
def flow_confidence(threshold, forwardFlow, backwardFlow, img_w, img_h):
# I_t -> I_(t+1), wrap with forward flow
It1 = forwardFlow.clone()
It1[:,:,0] += torch.arange(img_w)
It1[:,:,1] += torch.arange(img_h).unsqueeze(1) # (u, v) coordinate
It1 = torch.round(It1)
withinFrameMask = (It1[:,:,0] < img_w) * (It1[:,:,0] > 0) *\
(It1[:,:,1] < img_h) * (It1[:,:,1] > 0)
pdb.set_trace()
withinFrameCoord = torch.nonzero(withinFrameMask==1) # (x, y) coordinate of within frame flow
nextCoord = It1[withinFrameCoord[:, 0], withinFrameCoord[:,1]].astype(int) # u, v order
# I_(t+1) -> I_t, wrap back with backward flow
nextCoordBackwardFlow = backwardFlow[nextCoord[:,1], nextCoord[:,0],:]
nextbackCoord = nextCoord + nextCoordBackwardFlow # u, v coord
nextbackCoord[:,[1,0]] = nextbackCoord[:,[0,1]] # swap to x,y coord
# filter out noisy flow
stableFlowMask = np.sum(np.abs(nextbackCoord - withinFrameCoord), 1) < threshold
stableFlowCoord = withinFrameCoord[stableFlowMask] # (x,y) coord
return stableFlowCoord
def make_colorwheel():
"""
Generates a color wheel for optical flow visualization as presented in:
Baker et al. "A Database and Evaluation Methodology for Optical Flow" (ICCV, 2007)
URL: http://vision.middlebury.edu/flow/flowEval-iccv07.pdf
Code follows the original C++ source code of Daniel Scharstein.
Code follows the the Matlab source code of Deqing Sun.
Returns:
np.ndarray: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros((ncols, 3))
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.floor(255*np.arange(0,RY)/RY)
col = col+RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.floor(255*np.arange(0,YG)/YG)
colorwheel[col:col+YG, 1] = 255
col = col+YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.floor(255*np.arange(0,GC)/GC)
col = col+GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.floor(255*np.arange(CB)/CB)
colorwheel[col:col+CB, 2] = 255
col = col+CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.floor(255*np.arange(0,BM)/BM)
col = col+BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.floor(255*np.arange(MR)/MR)
colorwheel[col:col+MR, 0] = 255
return colorwheel
def flow_uv_to_colors(u, v, convert_to_bgr=False):
"""
Applies the flow color wheel to (possibly clipped) flow components u and v.
According to the C++ source code of Daniel Scharstein
According to the Matlab source code of Deqing Sun
Args:
u (np.ndarray): Input horizontal flow of shape [H,W]
v (np.ndarray): Input vertical flow of shape [H,W]
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
flow_image = np.zeros((u.shape[0], u.shape[1], 3), np.uint8)
colorwheel = make_colorwheel() # shape [55x3]
ncols = colorwheel.shape[0]
rad = np.sqrt(np.square(u) + np.square(v))
a = np.arctan2(-v, -u)/np.pi
fk = (a+1) / 2*(ncols-1)
k0 = np.floor(fk).astype(np.int32)
k1 = k0 + 1
k1[k1 == ncols] = 0
f = fk - k0
for i in range(colorwheel.shape[1]):
tmp = colorwheel[:,i]
col0 = tmp[k0] / 255.0
col1 = tmp[k1] / 255.0
col = (1-f)*col0 + f*col1
idx = (rad <= 1)
col[idx] = 1 - rad[idx] * (1-col[idx])
col[~idx] = col[~idx] * 0.75 # out of range
# Note the 2-i => BGR instead of RGB
ch_idx = 2-i if convert_to_bgr else i
flow_image[:,:,ch_idx] = np.floor(255 * col)
return flow_image
def flow_to_image(flow_uv, clip_flow=None, convert_to_bgr=False):
"""
Expects a two dimensional flow image of shape.
Args:
flow_uv (np.ndarray): Flow UV image of shape [H,W,2]
clip_flow (float, optional): Clip maximum of flow values. Defaults to None.
convert_to_bgr (bool, optional): Convert output image to BGR. Defaults to False.
Returns:
np.ndarray: Flow visualization image of shape [H,W,3]
"""
assert flow_uv.ndim == 3, 'input flow must have three dimensions'
assert flow_uv.shape[2] == 2, 'input flow must have shape [H,W,2]'
if clip_flow is not None:
flow_uv = np.clip(flow_uv, 0, clip_flow)
u = flow_uv[:,:,0]
v = flow_uv[:,:,1]
rad = np.sqrt(np.square(u) + np.square(v))
rad_max = 40.0#,
epsilon = 1e-5
u = u / (rad_max + epsilon)
v = v / (rad_max + epsilon)
return flow_uv_to_colors(u, v, convert_to_bgr)
|
d3d-hoi-main
|
visualization/annotation/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import os
from model import JOHMRModel
from utils import (
initialize_render, merge_meshes,
load_motion,
save_meshes, save_parameters
)
import json
import tqdm
from matplotlib.image import imsave
import matplotlib.pyplot as plt
import cv2
import re
import numpy as np
from PIL import Image
import glob
from dataloader import MyOwnDataset
import torch.nn as nn
import torch.optim as optim
import argparse
import itertools
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
#################
# training code #
#################
def run_exp(inputvideo):
# Initialize gpu device
assert torch.cuda.is_available()
device = torch.device("cuda:"+str(args.device))
global available_category
vidpath = inputvideo[1]
video_name = inputvideo[0]
if(video_name[:4]=='b001'):
video_category = 'dishwasher'
elif(video_name[:4]=='b003'):
video_category = 'laptop'
elif(video_name[:4]=='b004'):
video_category = 'microwave'
elif(video_name[:4]=='b005'):
video_category = 'refrigerator'
elif(video_name[:4]=='b006'):
video_category = 'trashcan'
elif(video_name[:4]=='b007'):
video_category = 'washingmachine'
elif(video_name[:4]=='b008'):
video_category = 'storage_revolute'
elif(video_name[:4]=='b108'):
video_category = 'storage_prismatic'
elif(video_name[:4]=='b009'):
video_category = 'oven'
else:
print('not available category...')
print('processing '+video_name+' for category '+video_category)
# load gt annotation, find the correct object size, cad model, part id, and focal len
with open(os.path.join(vidpath, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# Initialize object scale (x, y, z)
obj_sizeX = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
obj_sizeY = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
obj_sizeZ = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
obj_dimension = [obj_sizeX, obj_sizeY, obj_sizeZ] # in cm
# initialize object cad model and part id
if args.use_gt_objmodel:
if args.use_gt_objpart:
cad_object = os.path.join(args.cadpath, video_category, re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0])
cad_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
cad_models = [(cad_object, cad_part)]
else:
cad_models = []
cad_object = os.path.join(args.cadpath, video_category, re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0])
with open(os.path.join(cad_object, 'motion.json')) as json_file:
cad_parts = len(json.load(json_file))
for cad_part in range(cad_parts):
cad_models.append((cad_object,cad_part))
else:
# iter through all cad models in that category
cad_models = []
cad_objects = [f.path for f in os.scandir(os.path.join(args.cadpath, video_category))]
for cad_object in cad_objects:
with open(os.path.join(cad_object, 'motion.json')) as json_file:
cad_parts = len(json.load(json_file))
for cad_part in range(cad_parts):
cad_models.append((cad_object,cad_part))
# initialize focal len
focal_len = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[12])[0])
assert(focal_len == float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[13])[0])) # in pixel (for 1280x720 only)
# initalize data loader (use all frames at once)
dataset = MyOwnDataset(inputvideo[1])
img_square, img_small = dataset.correct_image_size(200,300)
if img_small <= 0:
print('can not find small image size')
return False
trainloader = torch.utils.data.DataLoader(dataset, batch_size=len(dataset), pin_memory=True, shuffle=False, num_workers=4)
# initialize render
silhouette_renderer, phong_renderer = initialize_render(device, focal_len, focal_len, img_square, img_small)
# load all data per video
for idx, data in enumerate(trainloader):
imgs = data['image'].permute(0,2,3,1).to(device)
batch_size = imgs.shape[0]
img_h = imgs.shape[1]
img_w = imgs.shape[2]
points = data['smplv2d'].to(device).float()
smpl_verts = data['ver'].to(device).float()
smpl_faces = data['f'].to(device).float()
joints = data['joint3d'].to(device).float()
normal = data['normal'].to(device).float()
normal2 = data['normal2'].to(device).float()
objmask = data['objmask'].permute(0,2,3,1).to(device).float()
print('data loaded...')
# load gt part motion
gt_partmotion = []
fp = open(os.path.join(vidpath, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
# Infer HOI snippet from GT part motions
diff = gt_partmotion[:-1] - gt_partmotion[1:] # (i - i+1)
if video_category == 'storage_prismatic':
large_diff = np.where(abs(diff)>0.5)[0]
else:
large_diff = np.where(abs(diff)>2)[0]
care_idx = np.union1d(large_diff, large_diff+1)
care_idx = np.clip(care_idx, 0, len(gt_partmotion)-1)
# compute object mask center
obj_x_center = 0
obj_y_center = 0
count = 1e-5
for mask in objmask:
if torch.sum(mask) > 0:
count += 1
small_img = mask.squeeze().detach().cpu().numpy()
large_img = cv2.resize(small_img, dsize=(img_square, img_square), interpolation=cv2.INTER_NEAREST)
x, y, w, h = cv2.boundingRect(np.uint8(large_img))
obj_x_center += int(x+0.5*w)
obj_y_center += int(y+0.5*h)
obj_x_center /= count
obj_y_center /= count
###############################################
# optimize different cad model configurations #
###############################################
final_losses = []
folders = []
for (obj_path, part_idx) in cad_models:
cad_name = re.findall(r'\d+', obj_path)[-1]
# load object mesh
verts, faces, vertexSegs, faceSegs = merge_meshes(obj_path)
verts[:,1:] *= -1 # pytorch3d -> world coordinate
if args.use_gt_objscale:
# compute object rescale value if using gt dimension (cm)
x_diff = torch.max(verts[:,0]) - torch.min(verts[:,0])
x_ratio = obj_dimension[0] / x_diff
y_diff = torch.max(verts[:,1]) - torch.min(verts[:,1])
y_ratio = obj_dimension[1] / y_diff
z_diff = torch.max(verts[:,2]) - torch.min(verts[:,2])
z_ratio = obj_dimension[2] / z_diff
else:
if video_category == 'laptop':
initial_dim = 5.0
elif cad_name == '10797':
initial_dim = 20.0 # small fridge
elif video_category == 'refrigerator':
initial_dim = 100.0 # large fridge
else:
initial_dim = 50.0
x_diff = torch.max(verts[:,0]) - torch.min(verts[:,0])
x_ratio = x_diff * initial_dim
y_diff = torch.max(verts[:,1]) - torch.min(verts[:,1])
y_ratio = y_diff * initial_dim
z_diff = torch.max(verts[:,2]) - torch.min(verts[:,2])
z_ratio = z_diff * initial_dim
obj_verts = verts.to(device)
obj_faces = faces.to(device)
# load motion json file
with open(os.path.join(obj_path, 'motion.json')) as json_file:
motions = json.load(json_file)
assert len(motions) + 2 == len(vertexSegs)
rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list = load_motion(motions, device)
# Hand, object contact vertex id
handcontact = [2005, 5466] # left, right hand from SMPL
objcontact = contact_list[part_idx]
# Optimize for all possible settings
for handcontact_v in handcontact:
for objcontact_v in objcontact:
meta_info = str(part_idx)+'_'+str(objcontact_v)+'_'+str(handcontact_v)
# initalize model
model = JOHMRModel(imgs.detach(), obj_verts.detach(), obj_faces.detach(),
smpl_verts.detach(), smpl_faces.detach(), points.detach(),
silhouette_renderer, phong_renderer, normal.detach(), normal2.detach(), objmask.detach(),
rot_origin, rot_axis, rot_type, vertexSegs, faceSegs, limit_a, limit_b,
img_small ,focal_len, joints.detach())
# initialize optimizer
optimizer = optim.Adam(model.parameters(), lr=0.05) # 0.05
scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=int(0.75*args.iter), gamma=0.1)
# Start optimizing
for iteration in range(args.iter):
loss, loss_meta = model(iteration, args, cad_name, care_idx, part_idx,
handcontact_v, objcontact_v, obj_x_center, obj_y_center,
x_ratio, y_ratio, z_ratio)
if loss_meta is not None:
print('Iteration %d lr %.4f, total loss %.4f, smpl %.4f, mask %.4f, hfacing %.4f, depth %.4f, gamma %.4f, alpha %.4f, size %.3f, contact %.4f'
% (iteration, optimizer.param_groups[0]['lr'], loss.data, loss_meta['l_points'], loss_meta['l_mask'], loss_meta['l_direction'],
loss_meta['l_depth'],loss_meta['l_gamma'],loss_meta['l_alpha'], loss_meta['l_prior'],loss_meta['l_contact']))
optimizer.zero_grad()
loss.backward()
optimizer.step()
scheduler.step()
# save results
param_path = os.path.join(args.exp_name, 'params', video_name,cad_name, meta_info)
save_parameters(model, param_path)
final_losses.append(loss_meta['final_loss'])
folders.append(param_path)
# Only keep best result
best_run = final_losses.index(min(final_losses))
folders.remove(folders[best_run])
for folder in folders:
os.system('rm -r '+folder)
if __name__ == "__main__":
global available_category
parser = argparse.ArgumentParser()
parser.add_argument('--iter', type=int)
parser.add_argument('--use_gt_objscale', action='store_true')
parser.add_argument('--use_gt_objmodel', action='store_true')
parser.add_argument('--use_gt_objpart', action='store_true')
parser.add_argument('--objmask', type=float)
parser.add_argument('--hfacing', type=float)
parser.add_argument('--depth', type=float)
parser.add_argument('--gamma', type=float)
parser.add_argument('--alpha', type=float)
parser.add_argument('--range', type=float)
parser.add_argument('--smpl', type=float)
parser.add_argument('--contact', type=float)
parser.add_argument('--size', type=float)
parser.add_argument('--center', type=float)
parser.add_argument('--smooth', type=float)
parser.add_argument('--scale', type=float)
parser.add_argument('--category', type=str, help="which category to run")
parser.add_argument('--exp_name', type=str, help="experiment main folder")
parser.add_argument('--datapath', type=str, help="experiment data folder")
parser.add_argument('--cadpath', type=str, help="experiment data folder")
parser.add_argument("--device", type=int, help="CUDA Device Index")
args = parser.parse_args()
available_category = ['dishwasher', 'laptop', 'microwave', 'refrigerator', 'trashcan', 'washingmachine', 'oven', 'storage_revolute', 'storage_prismatic']
if args.category not in available_category and args.category!='all':
print('please choose a vaild category')
# create main exp folder
args.exp_path = os.path.join(os.getcwd(), args.exp_name)
if not os.path.exists(args.exp_path):
os.makedirs(args.exp_path)
videopath = []
# run on single object category
if args.category != 'all':
videopath = sorted([(f.name,f.path) for f in os.scandir(os.path.join(args.datapath, args.category))])
# run on all object categories
else:
videopath = []
for obj_class in available_category:
videopath.append(sorted([(f.name, f.path) for f in os.scandir(os.path.join(args.datapath, obj_class))]))
videopath = sorted(list(itertools.chain.from_iterable(videopath)))
print('total of '+str(len(videopath))+' experiments...')
# run locally
for i in range(len(videopath)):
run_exp(videopath[i])
|
d3d-hoi-main
|
optimization/optimize.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
import torch
import numpy as np
from pytorch3d.renderer import (
look_at_view_transform, TexturesVertex
)
import math
from pytorch3d.structures import Meshes
import cv2
import matplotlib.pyplot as plt
from utils import rotation_matrix_batch
from scipy.ndimage.filters import gaussian_filter1d
from pytorch3d.io import save_obj
from pytorch3d.transforms import (
RotateAxisAngle, matrix_to_euler_angles, euler_angles_to_matrix
)
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix, matrix_to_rotation_6d
)
import time
from matplotlib.image import imsave
import os
from torch.autograd import Variable
import open3d as o3d
class JOHMRModel(nn.Module):
""" Differentiable render for fitting CAD model based on silhouette and human. """
def __init__(self, imgs, obj_verts, obj_faces, smpl_verts, smpl_faces, points,
diff_render, vis_render, normal, normal2, objmask,
rot_o, axis, rot_type, vertexSegs, faceSegs, limit_a, limit_b,
img_size_small ,focal_len, joints):
super(JOHMRModel, self).__init__()
self.imgs = imgs
self.objmask = objmask[..., 0]
self.objmask.requires_grad = False
self.device = smpl_verts.device
self.diff_render = diff_render
self.vis_render = vis_render
self.obj_verts_orig = obj_verts
self.obj_faces = obj_faces
self.smpl_verts_orig = smpl_verts
self.smpl_faces = smpl_faces
self.points = points
self.rot_origs = rot_o
self.rot_axises = axis
self.vertexSegs = vertexSegs
self.faceSegs = faceSegs
self.limit_as = limit_a
self.limit_bs = limit_b
self.rot_type = rot_type
self.bs = self.imgs.shape[0]
self.normal = normal
self.normal2 = normal2
self.img_h = self.imgs.shape[1]
self.img_w = self.imgs.shape[2]
self.new_s = int((max(self.img_h, self.img_w) - min(self.img_h, self.img_w))/2)-1
self.img_small = img_size_small
self.focal = focal_len
self.joints = joints
self.normalize = 1.0/(0.5*(self.img_h+self.img_w))
K = torch.from_numpy(np.array([[self.focal, 0, self.img_w/2],
[0, self.focal, self.img_h/2],
[0,0,1]]))
self.K = K.float().to(self.device)
# camera is at the center
self.R, self.T = look_at_view_transform(0.1, 0.0, 0.0, device=self.device)
self.T[0,2] = 0.0 # manually set to zero
# object CAD x, y, z offset in 3D
obj_offset = np.array([0.0, 0.0, 2.5], dtype=np.float32)
self.obj_offset = nn.Parameter(torch.from_numpy(obj_offset).to(self.device))
# object CAD scale in 3D
obj_scale = np.array([1.0, 1.0, 1.0], dtype=np.float32)
self.obj_scale = nn.Parameter(torch.from_numpy(obj_scale).to(self.device))
# SPIN mesh x, y, z offset in 3D
smpl_offset = np.zeros((self.bs,3), dtype=np.float32)
smpl_offset[:,0] = 0.0
smpl_offset[:,1] = 0.0
smpl_offset[:,2] = 2.5
self.smpl_offset = nn.Parameter(torch.from_numpy(smpl_offset).to(self.device))
# local rotation angle or translation offset for the parts
part_motion = 0.0*np.ones(self.bs, dtype=np.float32)
self.part_motion = nn.Parameter(torch.from_numpy(part_motion).to(self.device))
# global rotation angle for the object CAD
yaw_degree = 0.0 * 180/np.pi #-20.0# * 180/np.pi #0.0* 180/np.pi
rot_mat = RotateAxisAngle(yaw_degree, axis='X').get_matrix()
rot_mat = rot_mat[0,:3,:3].unsqueeze(0)
ortho6d = matrix_to_rotation_6d(rot_mat)
self.obj_rot_angle = nn.Parameter(ortho6d.to(self.device))
# curve rotation in 3D
yaw_degree2 = 0.0 * 180/np.pi #0.0* 180/np.pi
rot_mat2 = RotateAxisAngle(yaw_degree2, axis='Y').get_matrix()
rot_mat2 = rot_mat2[0,:3,:3].unsqueeze(0)
ortho6d2 = matrix_to_rotation_6d(rot_mat2)
self.curve_rot_angle = nn.Parameter(ortho6d2.to(self.device))
curve_offset = np.array([0.0, 0.0, 0.0], dtype=np.float32)
self.curve_offset = nn.Parameter(torch.from_numpy(curve_offset).to(self.device))
self.cos = nn.CosineSimilarity(dim=1, eps=1e-6)
self.relu = nn.ReLU()
return
def forward(self, iteration, args, cad_name, care_idx, part_idx, handcontact_v, objcontact_v,
obj_x_center, obj_y_center, x_ratio, y_ratio, z_ratio):
# Predefined CAD segmentation and motion axis from SAPIEN
self.vertexStart = self.vertexSegs[part_idx]
self.vertexEnd = self.vertexSegs[part_idx+1]
faceStart = self.faceSegs[part_idx]
faceEnd = self.faceSegs[part_idx+1]
self.rot_o = self.rot_origs[part_idx].clone().to(self.device).detach()
self.axis = self.rot_axises[part_idx].clone().to(self.device).detach()
limit_a = self.limit_as[part_idx]
limit_b = self.limit_bs[part_idx]
self.rot_o.requires_grad = False
self.axis.requires_grad = False
# Transform pytorch3d -> world coordinate
self.rot_o[1:] *= -1
self.axis[1:] *= -1
####################
## fit human mesh ##
####################
self.smpl_verts = self.smpl_verts_orig.clone()
# Resize human mesh
smplmesh_calibrate_path = 'smplmesh-calibrate.obj'
smplmesh_calibrate = o3d.io.read_triangle_mesh(smplmesh_calibrate_path) # load smpl mesh
hverts_cal = torch.from_numpy(np.asarray(smplmesh_calibrate.vertices)).float()
human_height = 175 #cm
h_diff = torch.max(hverts_cal[:,1]) - torch.min(hverts_cal[:,1])
h_ratio = (human_height / h_diff).detach()
self.smpl_verts *= h_ratio
# Add x y z offsets to SMPL mesh (camera looking at positive depth z)
smpl_offset = self.smpl_offset.reshape(-1,1,3).repeat(1,self.smpl_verts_orig.shape[1],1) # (bs, 6890, 3)
self.smpl_verts[:,:,0] += args.scale*smpl_offset[:,:,0]
self.smpl_verts[:,:,1] += args.scale*smpl_offset[:,:,1]
self.smpl_verts[:,:,2] += args.scale*smpl_offset[:,:,2] #smpl_offset[:,:,2] #smpl_offset[0,:,2]
# Compute projection matrix K
K_batch = self.K.expand(self.smpl_verts.shape[0],-1,-1)
# Prespective projection
points_out_v = torch.bmm(self.smpl_verts, K_batch.permute(0,2,1))
self.smpl_2d = points_out_v[...,:2] / points_out_v[...,2:]
# Human fitting error
l_points = torch.mean(self.normalize*(self.points - self.smpl_2d)**2)
#####################
## optimize object ##
#####################
self.obj_rot_mat = rotation_6d_to_matrix(self.obj_rot_angle)[0].to(self.device)
# pitch, yaw, roll
alpha,beta,gamma = matrix_to_euler_angles(rotation_6d_to_matrix(self.obj_rot_angle), ["X","Y","Z"])[0]
obj_verts_batch = self.obj_verts_orig.reshape(1,-1,3).repeat(self.bs,1,1) # (bs, ver, 3)
# Step 1: rescale object and rotation orig
if not args.use_gt_objscale:
sx = self.obj_scale[0] * x_ratio
sy = self.obj_scale[1] * y_ratio
sz = self.obj_scale[2] * z_ratio
else:
sx = x_ratio
sy = y_ratio
sz = z_ratio
obj_verts_batch[:,:,0] *= sx
obj_verts_batch[:,:,1] *= sy
obj_verts_batch[:,:,2] *= sz
self.rot_o[0] *= sx
self.rot_o[1] *= sy
self.rot_o[2] *= sz
# Oject real-world dimension after scaling
self.x_dim = torch.max(obj_verts_batch[0,:,0]) - torch.min(obj_verts_batch[0,:,0])
self.y_dim = torch.max(obj_verts_batch[0,:,1]) - torch.min(obj_verts_batch[0,:,1])
self.z_dim = torch.max(obj_verts_batch[0,:,2]) - torch.min(obj_verts_batch[0,:,2])
# Step 2: add part motion (prismatic or revolute)
if cad_name == '45261' or cad_name == '45132':
obj_verts_batch_t1 = obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] - self.rot_o
self.part_motion_scaled = self.part_motion * args.scale
batch_offset = self.axis.unsqueeze(0).repeat(self.bs,1) * self.part_motion_scaled.unsqueeze(-1).repeat(1,3)
obj_verts_batch_t2 = obj_verts_batch_t1 + batch_offset.unsqueeze(1).repeat(1,obj_verts_batch_t1.shape[1], 1)
obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] = obj_verts_batch_t2 + self.rot_o
else:
self.part_rot_mat = rotation_matrix_batch(self.axis, self.part_motion, self.device)
obj_verts_batch_t1 = obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] - self.rot_o
obj_verts_batch_t2 = torch.bmm(self.part_rot_mat, obj_verts_batch_t1.permute(0,2,1)).permute(0,2,1)
obj_verts_batch[:, self.vertexStart:self.vertexEnd, :] = obj_verts_batch_t2 + self.rot_o
# Step 3: add global object rotation
obj_verts_batch = torch.bmm(self.obj_rot_mat.reshape(1,3,3).repeat(self.bs,1,1),
obj_verts_batch.permute(0,2,1)).permute(0,2,1)
# Step 4: add global object translation
self.obj_verts_batch = obj_verts_batch + args.scale*self.obj_offset
# Object center error
obj_2d = torch.bmm(self.obj_verts_batch, K_batch.permute(0,2,1))
self.obj_2d = (obj_2d[...,:2] / (obj_2d[...,2:])) # (bs, objV, 2)
obj_2d_x_center = torch.mean(self.obj_2d[:,:,0])
obj_2d_y_center = torch.mean(self.obj_2d[:,:,1])
if self.img_w > self.img_h:
obj_2d_y_center += self.new_s
else:
obj_2d_x_center += self.new_s
l_mask_center = self.normalize*(obj_y_center - obj_2d_y_center)**2 + self.normalize*(obj_x_center - obj_2d_x_center)**2
# Object & human orientation error
if '10213' in cad_name or '9968' in cad_name:
# Difficult to predefine orientation for laptop
# Use CAD base part
front_vertex = self.obj_verts_orig[645+581].detach()
top_vertex = self.obj_verts_orig[645+285].detach()
base_center = self.obj_verts_orig[self.vertexSegs[-2]:self.vertexSegs[-1]].detach()
obj_norm = torch.mean(base_center, 0) - front_vertex
obj_norm_rot = torch.mm(self.obj_rot_mat, obj_norm.float().reshape(-1,1)).permute(1,0)
output = self.cos(self.normal, obj_norm_rot.repeat(self.bs, 1))
l_direction = torch.mean((1.0 - output)[care_idx])
obj_norm2 = top_vertex - torch.mean(base_center, 0)
obj_norm_rot2 = torch.mm(self.obj_rot_mat, obj_norm2.float().reshape(-1,1)).permute(1,0)
output2 = self.cos(self.normal2, obj_norm_rot2.repeat(self.bs, 1))
l_direction2 = torch.mean((1.0 - output2))
else:
obj_norm = torch.from_numpy(np.asarray([0,0,1])).to(self.device)
obj_norm2 = torch.from_numpy(np.asarray([0,-1,0])).to(self.device)
obj_norm_rot = torch.mm(self.obj_rot_mat, obj_norm.float().reshape(-1,1)).permute(1,0)
obj_norm_rot2 = torch.mm(self.obj_rot_mat, obj_norm2.float().reshape(-1,1)).permute(1,0)
output = self.cos(self.normal, obj_norm_rot.repeat(self.bs, 1))
output2 = self.cos(self.normal2, obj_norm_rot2.repeat(self.bs, 1))
l_direction = torch.mean((1.0 - output)[care_idx])
l_direction2 = torch.mean((1.0 - output2))
# Differentiable mask error
diff_images = []
for index in range(self.bs):
# convert object mesh for diff render, opengl -> pytorch3d
p3d_obj_verts = self.obj_verts_batch[index].clone()
p3d_obj_verts[:,1] *= -1
p3d_obj_verts[:,2] *= -1
# pass through diff render
tex = torch.ones_like(p3d_obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
obj_mesh = Meshes(verts=[p3d_obj_verts],faces=[self.obj_faces],textures=textures)
diff_img = self.diff_render(meshes_world=obj_mesh, R=self.R, T=self.T)
diff_img = diff_img[..., 3:]
diff_img = diff_img.permute(0,3,1,2)[0,0,:,:] #(h,w)
diff_images.append(diff_img)
diff_images = torch.stack(diff_images) #(bs,h,w)
mask2 = (diff_images>0).detach()
l_gtMask = torch.mean(self.objmask*(diff_images-self.objmask)**2)
l_rendMask = torch.mean(mask2*((diff_images-self.objmask)**2))
mask_diff = torch.mean((diff_images-self.objmask)**2)
l_mask = 0.3*l_rendMask + 0.6*l_gtMask + 0.1*mask_diff
# Hand & object 3D contact error
self.curve_rot_angle_mat = rotation_6d_to_matrix(self.curve_rot_angle)[0].to(self.device)
l_contact = torch.zeros(1).to(self.device)
if '102156' not in cad_name and '103635' not in cad_name:
obj_contact_curve = self.obj_verts_batch[care_idx, objcontact_v, :].clone()
smpl_contact_curve = self.smpl_verts[care_idx, handcontact_v, :].clone().detach()
obj_contact_curve_after = torch.t(torch.mm(self.curve_rot_angle_mat, torch.t(obj_contact_curve))) + 5.0*self.curve_offset
l_contact = self.normalize * torch.mean((obj_contact_curve_after- smpl_contact_curve)**2)
# Smoothing error
nomotion_idx = list(set(list(range(0, len(self.part_motion)-1))) - set(care_idx.tolist()))
partrot_first = self.part_motion[:-1]
partrot_second = self.part_motion[1:]
l_smooth = torch.mean((partrot_first - partrot_second)[np.array(nomotion_idx)]**2)
# Motion range error
l_range = torch.mean(self.relu(limit_a - self.part_motion) + self.relu(self.part_motion-limit_b))
# Roll, pitch constraint (except for laptop)
if '10213' in cad_name or '9968' in cad_name:
l_gamma = torch.zeros(1).to(self.device)
l_alpha = torch.zeros(1).to(self.device)
else:
l_alpha = self.relu(-alpha-0.2)**2
l_gamma = self.relu(torch.abs(gamma)-0.2)**2
# Depth constraint
l_depth = torch.mean((self.smpl_offset[care_idx,2].detach() - self.obj_offset[2])**2)
# Object size constraint
#l_size = torch.sum(self.relu(0.1 - self.obj_scale))
l_size = torch.mean(self.relu(self.obj_scale-0.1)**2)
# Overall error
overall_loss = args.smpl*l_points + args.objmask*l_mask +\
args.depth*l_depth + args.smooth*l_smooth + args.range*l_range +\
args.gamma*l_gamma + args.alpha*l_alpha +\
args.hfacing*(l_direction+l_direction2) + args.contact*l_contact
if iteration <= int(0.5*args.iter):
overall_loss += args.center*l_mask_center
if iteration > int(0.5*args.iter):
overall_loss += (args.size*l_size )
loss_meta = {}
loss_meta['l_mask'] = args.objmask*l_mask.data.detach().cpu().numpy()
loss_meta['l_center'] = args.center*l_mask_center.data.detach().cpu().numpy()
loss_meta['l_contact'] = args.contact*l_contact.data.detach().cpu().numpy()
loss_meta['l_depth'] = args.depth*l_depth.data.detach().cpu().numpy()
loss_meta['l_gamma'] = args.gamma*l_gamma.data.detach().cpu().numpy()
loss_meta['l_alpha'] = args.alpha*l_alpha.data.detach().cpu().numpy()
loss_meta['l_range'] = args.alpha*l_range.data.detach().cpu().numpy()
loss_meta['l_smooth'] = args.alpha*l_smooth.data.detach().cpu().numpy()
loss_meta['l_prior'] = args.size*l_size.data.detach().cpu().numpy()
loss_meta['l_direction'] = args.hfacing*(l_direction.data.detach().cpu().numpy() + l_direction2.data.detach().cpu().numpy() )
loss_meta['l_points'] = args.smpl*l_points.data.detach().cpu().numpy()
loss_meta['overall_loss'] = overall_loss.data.detach().cpu().item()
loss_meta['final_loss'] = loss_meta['l_mask'] + 0.3*loss_meta['l_contact'] + loss_meta['l_depth'] + loss_meta['l_range'] + loss_meta['l_smooth'] +\
loss_meta['l_gamma'] + loss_meta['l_alpha'] + loss_meta['l_prior'] + 0.3*loss_meta['l_direction']
return overall_loss, loss_meta
def render(self, save_folder=None):
obj_meshes = []
smpl_meshes = []
for index in range(self.bs):
smpl_verts = self.smpl_verts[index]
obj_verts = self.obj_verts_batch[index]
# create SPIN mesh (opengl)
tex = torch.ones_like(smpl_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
smpl_mesh = Meshes(verts=[smpl_verts],faces=[self.smpl_faces[index]],textures=textures).detach()
smpl_meshes.append(smpl_mesh)
# create object mesh for diff render and visualization
tex = torch.ones_like(obj_verts).unsqueeze(0)
textures = TexturesVertex(verts_features=tex).to(self.device)
obj_mesh = Meshes(verts=[obj_verts],faces=[self.obj_faces],textures=textures).detach()
obj_meshes.append(obj_mesh)
meshes = {'obj_mesh':obj_meshes, 'spin_mesh':smpl_meshes}
return meshes
|
d3d-hoi-main
|
optimization/model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import natsort
import glob
import open3d as o3d
# rendering components
from pytorch3d.renderer import (
FoVPerspectiveCameras,RasterizationSettings,
MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, DirectionalLights,
PerspectiveCameras
)
from pytorch3d.io import save_obj, load_obj
import math
import cv2
import matplotlib.pyplot as plt
import os
import imageio
from decimal import Decimal
import json
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
from mpl_toolkits.mplot3d import axes3d
from scipy.ndimage.filters import gaussian_filter1d
from numpy.linalg import svd
from multiprocessing import Pool, Manager, cpu_count
from pytorch3d.transforms import Rotate, Translate
from matplotlib.image import imsave
from pathlib import Path
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, svd(M)[0][:,-1]
def initialize_render(device, focal_x, focal_y, img_square_size, img_small_size):
""" initialize camera, rasterizer, and shader. """
# Initialize an OpenGL perspective camera.
#cameras = FoVPerspectiveCameras(znear=1.0, zfar=9000.0, fov=20, device=device)
#cameras = FoVPerspectiveCameras(device=device)
#cam_proj_mat = cameras.get_projection_transform()
img_square_center = int(img_square_size/2)
shrink_ratio = int(img_square_size/img_small_size)
focal_x_small = int(focal_x/shrink_ratio)
focal_y_small = int(focal_y/shrink_ratio)
img_small_center = int(img_small_size/2)
camera_sfm = PerspectiveCameras(
focal_length=((focal_x, focal_y),),
principal_point=((img_square_center, img_square_center),),
image_size = ((img_square_size, img_square_size),),
device=device)
camera_sfm_small = PerspectiveCameras(
focal_length=((focal_x_small, focal_y_small),),
principal_point=((img_small_center, img_small_center),),
image_size = ((img_small_size, img_small_size),),
device=device)
# To blend the 100 faces we set a few parameters which control the opacity and the sharpness of
# edges. Refer to blending.py for more details.
blend_params = BlendParams(sigma=1e-4, gamma=1e-4)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 256x256. To form the blended image we use 100 faces for each pixel. We also set bin_size and max_faces_per_bin to None which ensure that
# the faster coarse-to-fine rasterization method is used. Refer to rasterize_meshes.py for
# explanations of these parameters. Refer to docs/notes/renderer.md for an explanation of
# the difference between naive and coarse-to-fine rasterization.
raster_settings = RasterizationSettings(
image_size=img_small_size,
blur_radius=np.log(1. / 1e-4 - 1.) * blend_params.sigma,
faces_per_pixel=50,
)
# Create a silhouette mesh renderer by composing a rasterizer and a shader.
silhouette_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm_small,
raster_settings=raster_settings
),
shader=SoftSilhouetteShader(blend_params=blend_params)
)
# We will also create a phong renderer. This is simpler and only needs to render one face per pixel.
raster_settings = RasterizationSettings(
image_size=img_square_size,
blur_radius=0.0,
faces_per_pixel=1,
)
# We can add a point light in front of the object.
lights = PointLights(device=device, location=((2.0, 2.0, -2.0),))
#lights = DirectionalLights(device=device, direction=((0, 0, 1),))
phong_renderer = MeshRenderer(
rasterizer=MeshRasterizer(
cameras=camera_sfm,
raster_settings=raster_settings
),
shader=HardPhongShader(device=device, cameras=camera_sfm, lights=lights)
)
return silhouette_renderer, phong_renderer
def merge_meshes(obj_path):
""" helper function for loading and merging meshes. """
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
num_faces = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
verts, faces, aux = load_obj(part_mesh)
faces = faces.verts_idx
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
num_faces.append(faces_list.shape[0])
return verts_list, faces_list, num_vtx, num_faces
def load_motion(motions, device):
""" load rotation axis, origin, and limit. """
rot_origin = []
rot_axis = []
rot_type = []
limit_a = []
limit_b = []
contact_list = []
# load all meta data
for idx, key in enumerate(motions.keys()):
jointData = motions[key]
# if contains movable parts
if jointData is not None:
origin = torch.FloatTensor(jointData['axis']['origin']).to(device)
axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
mobility_type = jointData['type']
contact_list.append(jointData['contact'])
# convert to radians if necessary
if mobility_type == 'revolute':
mobility_a = math.pi*jointData['limit']['a'] / 180.0
mobility_b = math.pi*jointData['limit']['b'] / 180.0
else:
assert mobility_type == 'prismatic'
mobility_a = jointData['limit']['a']
mobility_b = jointData['limit']['b']
rot_origin.append(origin)
rot_axis.append(axis)
rot_type.append(mobility_type)
limit_a.append(mobility_a)
limit_b.append(mobility_b)
return rot_origin, rot_axis, rot_type, limit_a, limit_b, contact_list
def save_object(id):
global obj_verts_dict
global obj_faces_dict
global save_path_mesh
verts = obj_verts_dict[str(id+1)]
faces = obj_faces_dict[str(id+1)]
path = os.path.join(save_path_mesh, str(id+1)+'_object.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_human(id):
global human_verts_dict
global human_faces_dict
global save_path_mesh
verts = human_verts_dict[str(id+1)]
faces = human_faces_dict[str(id+1)]
path = os.path.join(save_path_mesh, str(id+1)+'_person.obj')
save_obj(path, torch.from_numpy(verts), torch.from_numpy(faces))
def save_meshes(meshes, save_folder, video_name, title):
global obj_verts_dict
global obj_faces_dict
global human_verts_dict
global human_faces_dict
global save_path_mesh
save_path_mesh = os.path.join(save_folder, title)
if not os.path.exists(save_path_mesh):
os.makedirs(save_path_mesh)
obj_meshes = meshes['obj_mesh']
spin_meshes = meshes['spin_mesh']
# merge object + SPIN meshes
obj_verts = {}
obj_faces = {}
human_verts = {}
human_faces = {}
for idx in range(len(obj_meshes)):
path = os.path.join(save_path_mesh, str(idx+1)+'_person.obj')
save_obj(path, spin_meshes[idx].verts_list()[0], spin_meshes[idx].faces_list()[0])
path = os.path.join(save_path_mesh, str(idx+1)+'_object.obj')
save_obj(path, obj_meshes[idx].verts_list()[0], obj_meshes[idx].faces_list()[0])
eft_cmd = 'python -m demo.demo_bodymocapnewnew --render solid --videoname '+video_name+' --vPath '+save_folder
os.chdir('/local-scratch/projects/d3dhoi/eft')
os.system(eft_cmd)
os.chdir('/local-scratch/projects/d3dhoi')
'''
save_path = os.path.join(save_folder, 'eft', 'front')
ffmpeg_cmd = 'ffmpeg -r 3 -i '+save_path+'/scene_%08d.jpg '+save_folder+'/frontview.mp4'
os.system(ffmpeg_cmd)
'''
return
def save_parameters(model, save_path):
if not os.path.exists(save_path):
os.makedirs(save_path)
obj_offset = model.obj_offset.detach().cpu().numpy()
x_dim = model.x_dim.item()
y_dim = model.y_dim.item()
z_dim = model.z_dim.item()
obj_rot_angle = model.obj_rot_angle.detach().cpu().numpy() #(3,3)
part_motion = model.part_motion.detach().cpu().numpy()
parameters = {}
parameters['obj_offset'] = obj_offset
parameters['obj_dim'] = [x_dim, y_dim, z_dim]
parameters['obj_rot_angle'] = obj_rot_angle
parameters['part_motion'] = part_motion
np.save(os.path.join(save_path, 'params.npy'), parameters)
return
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
def rotation_matrix_batch(axis, theta, device):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b = -axis[0] * torch.sin(theta / 2.0)
c = -axis[1] * torch.sin(theta / 2.0)
d = -axis[2] * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(aa.shape[0],3,3).to(device)
rot_mat[:,0,0] = aa + bb - cc - dd
rot_mat[:,0,1] = 2 * (bc + ad)
rot_mat[:,0,2] = 2 * (bd - ac)
rot_mat[:,1,0] = 2 * (bc - ad)
rot_mat[:,1,1] = aa + cc - bb - dd
rot_mat[:,1,2] = 2 * (cd + ab)
rot_mat[:,2,0] = 2 * (bd + ac)
rot_mat[:,2,1] = 2 * (cd - ab)
rot_mat[:,2,2] = aa + dd - bb - cc
return rot_mat
|
d3d-hoi-main
|
optimization/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
import torch
from pytorch3d.transforms import (
so3_relative_angle,
euler_angles_to_matrix
)
from scipy.spatial.distance import cdist
import json
from utils import (
load_motion,
)
import re
import argparse
from pytorch3d.transforms.rotation_conversions import (
rotation_6d_to_matrix
)
def isfloat(x):
try:
a = float(x)
except (TypeError, ValueError):
return False
else:
return True
def isint(x):
try:
a = float(x)
b = int(a)
except (TypeError, ValueError):
return False
else:
return a == b
parser = argparse.ArgumentParser()
parser.add_argument('--cad_path', type=str, help="experiment cad folder")
parser.add_argument('--result_folder', type=str, help="experiment result folder")
parser.add_argument('--data_path', type=str, help="experiment data folder")
parser.add_argument('--scale', type=float)
args = parser.parse_args()
cad_path = args.cad_path
result_folder = args.result_folder
anno_path = args.data_path
videos = sorted([(f.name, f.path) for f in os.scandir(result_folder)])
results = {}
# loop through all videos
for idx, video in enumerate(videos):
vidname = video[0]
vidpath = video[1]
cads = sorted([(f.name, f.path) for f in os.scandir(vidpath)])
if(vidname[:4]=='b001'):
category = 'dishwasher'
elif(vidname[:4]=='b003'):
category = 'laptop'
elif(vidname[:4]=='b004'):
category = 'microwave'
elif(vidname[:4]=='b005'):
category = 'refrigerator'
elif(vidname[:4]=='b006'):
category = 'trashcan'
elif(vidname[:4]=='b007'):
category = 'washingmachine'
elif(vidname[:4]=='b008'):
category = 'storage_revolute'
elif(vidname[:4]=='b108'):
category = 'storage_prismatic'
elif(vidname[:4]=='b009'):
category = 'oven'
# loop through all cad models
for cad in cads:
cadname = cad[0]
cadpath = cad[1]
settings = sorted([(f.name, f.path) for f in os.scandir(cadpath)])
# loop through all settings
for setting in settings:
expname = setting[0]
exppath = setting[1]
partid = int(setting[0][0])
# load experiment meta
if not os.path.exists(os.path.join(exppath, 'params.npy')):
print('missing '+vidname +' for setting '+expname)
continue
expmeta = np.load(os.path.join(exppath, 'params.npy'), allow_pickle=True)
expmeta = expmeta.item()
# load estimated global object rotation
exp_rot_angle = torch.from_numpy(expmeta['obj_rot_angle'])
exp_rot_mat = rotation_6d_to_matrix(exp_rot_angle)
# load estimated global object translation (cm)
exp_t = expmeta['obj_offset'] * args.scale
# load estimated object dimension (cm)
exp_dim = expmeta['obj_dim']
# load estimated part motion (degree or cm)
if cadname == '45132' or cadname == '45261':
exp_partmotion = expmeta['part_motion'] * args.scale
else:
exp_partmotion = expmeta['part_motion'] * 57.296
# load gt part motion values (degree or cm)
gt_partmotion = []
fp = open(os.path.join(anno_path, category, vidname, 'jointstate.txt'))
for i, line in enumerate(fp):
line = line.strip('\n')
if isfloat(line) or isint(line):
gt_partmotion.append(float(line))
gt_partmotion = np.asarray(gt_partmotion)
with open(os.path.join(anno_path, category, vidname, '3d_info.txt')) as myfile:
gt_data = [next(myfile).strip('\n') for x in range(14)]
# GT global object rotation
gt_alpha = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[3])[0])
gt_beta = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[4])[0])
gt_gamma = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[5])[0])
gt_alpha_tensor = torch.tensor(gt_alpha).reshape(-1)
gt_beta_tensor = torch.tensor(gt_beta).reshape(-1)
gt_gamma_tensor = torch.tensor(gt_gamma).reshape(-1)
euler_angle = torch.cat((gt_alpha_tensor,gt_beta_tensor,gt_gamma_tensor),0).reshape(1,3)
rot_mat_gt = euler_angles_to_matrix(euler_angle, ["X","Y","Z"])
# GT global object translation (cm)
gt_x = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[6])[0])*100.0
gt_y = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[7])[0])*100.0
gt_z = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[8])[0])*100.0
# GT object dimension (cm)
gt_xdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[0])
gt_ydim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[1])
gt_zdim = float(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[9])[2])
gt_cad = re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[10])[0]
gt_part = int(re.findall(r"[-+]?\d*\.\d+|\d+", gt_data[11])[0])
# CAD model correctness
correctness = gt_cad==cadname #and gt_part == partid
# Avg part motion abs error (degree or cm)
motion_error = np.mean(np.abs(gt_partmotion - exp_partmotion))
# Global object rotation error [relative angle (in degree) between the rotation matrixs in so3 space]
R_dist = (so3_relative_angle(rot_mat_gt, exp_rot_mat, cos_angle=False).numpy()*57.296)[0]
# Global object translation error (in cm)
x_error = np.square(gt_x - exp_t[0])
y_error = np.square(gt_y - exp_t[1])
z_error = np.square(gt_z - exp_t[2])
T_dist = np.sqrt(x_error+y_error+z_error)
# Avg object dimension abs error (in cm)
xdim_error = np.abs(gt_xdim - exp_dim[0])
ydim_error = np.abs(gt_ydim - exp_dim[1])
zdim_error = np.abs(gt_zdim - exp_dim[2])
dim_error = (xdim_error + ydim_error + zdim_error)/3.0
# print per video result
with open(os.path.join(os.path.dirname(result_folder),"result.txt"), 'a') as f:
print(vidname+': ', file=f)
print('model: '+str(cadname)+', part: '+str(partid), file=f)
print('correctness: '+str(correctness), file=f)
print('orientation (degree): '+str(round(R_dist,4)), file=f)
print('location (cm): '+str(round(T_dist,4)), file=f)
if cadname == '45132' or cadname == '45261':
print('motion (cm): '+str(round(motion_error,4)), file=f)
else:
print('motion (degree): '+str(round(motion_error,4)), file=f)
print('dimension (cm): '+str(round(dim_error,4)), file=f)
print('--------------------------', file=f)
if not category in results:
results[category] = {}
results[category]['correctness'] = []
results[category]['orientation'] = []
results[category]['location'] = []
results[category]['motion'] = []
results[category]['dimension'] = []
results[category]['correctness'].append(int(correctness))
if not correctness:
continue
results[category]['orientation'].append(R_dist)
results[category]['location'].append(T_dist)
results[category]['motion'].append(motion_error)
results[category]['dimension'].append(dim_error)
# per-category results:
for key, value in results.items():
correct_percent = sum(value['correctness'])/len(value['correctness'])*100.0
motion_mean = sum(value['motion'])/len(value['motion'])
oriens_mean = sum(value['orientation'])/len(value['orientation'])
locs_mean = sum(value['location'])/len(value['location'])
dims_mean = sum(value['dimension'])/len(value['dimension'])
with open(os.path.join(os.path.dirname(result_folder),"result.txt"), 'a') as f:
print('--------------------------', file=f)
print(key+' model correctness: '+str(correct_percent)+'%', file=f)
print('motion_mean: '+str(motion_mean), file=f)
print('orientation_mean: '+str(oriens_mean), file=f)
print('location_mean: '+str(locs_mean), file=f)
print('dimension_mean: '+str(dims_mean), file=f)
print('--------------------------', file=f)
|
d3d-hoi-main
|
optimization/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from skimage import io
from torch.utils.data import Dataset
import json
import os
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from PIL import Image
import cv2
from natsort import natsorted
from utils import planeFit
from numpy.linalg import norm
import glob
from pytorch3d.io import load_obj
class MyOwnDataset(Dataset):
""" My Own data loader. """
def __init__(self, root_dir):
"""
Args:
root_dir (string): Directory with all the images, masks and meta file.
category (string): Category class.
"""
self.img_paths = sorted(glob.glob(os.path.join(root_dir, 'frames', '*.jpg')))
self.smplv2d_paths = sorted(glob.glob(os.path.join(root_dir, 'smplv2d', '*.npy')))
self.smplmesh_paths = sorted(glob.glob(os.path.join(root_dir, 'smplmesh', '*.obj')))
self.joint3d_paths = sorted(glob.glob(os.path.join(root_dir, 'joints3d', '*.npy')))
self.objmask_paths = sorted(glob.glob(os.path.join(root_dir, 'gt_mask', '*object_mask.npy')))
# transformations
transform_list = [transforms.ToTensor()]
self.transforms = transforms.Compose(transform_list)
def correct_image_size(self,low,high):
# automatically finds a good ratio in the given range
image = np.array(Image.open(self.img_paths[0]))
img_h = image.shape[0]
img_w = image.shape[1]
img_square = max(img_h,img_w)
img_small = -1
for i in range(low, high):
if img_square % i == 0:
img_small = i
break
return img_square, img_small
def __len__(self):
return len(self.img_paths)
def getImgPath(self):
return self.img_paths
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
image = np.array(Image.open(self.img_paths[idx])) # load image
img_h = image.shape[0]
img_w = image.shape[1]
square_len = max(img_h, img_w) # compute add in region for square
new_s = int((max(img_h, img_w) - min(img_h, img_w))/2)-1
add_l = min(img_h, img_w)
objmask = np.load(self.objmask_paths[idx]).astype(np.uint8)
smplv2d = np.load(self.smplv2d_paths[idx]) # load 2D points
joint3d = np.load(self.joint3d_paths[idx])
if (joint3d.shape[0] == 147):
pdb.set_trace()
# no avaiable frame
normal = np.zeros((3))
normal2 = np.zeros((3))
else:
# estimate the body fitting plane and its normal vector
joints_np = np.transpose(joint3d) # (3xN)
lhip_to_rShoulder = joint3d[33] - joint3d[28]
rhip_to_lShoulder = joint3d[34] - joint3d[27]
normal = np.cross(lhip_to_rShoulder, rhip_to_lShoulder)
normal = normal / np.sqrt(np.sum(normal**2))
arm = joint3d[31,:] - joint3d[33,:]
cos_sim = np.inner(normal, arm)/(norm(normal)*norm(arm))
if cos_sim < 0:
normal *= -1
lankle_to_rtoe = joint3d[22] - joint3d[30]
rankle_to_ltoe = joint3d[19] - joint3d[25]
normal2 = np.cross(lankle_to_rtoe, rankle_to_ltoe)
normal2 = normal2 / np.sqrt(np.sum(normal2**2))
leg = joint3d[29,:] - joint3d[30,:]
cos_sim2 = np.inner(normal2, leg)/(norm(normal2)*norm(leg))
if cos_sim2 < 0:
normal2 *= -1
# SMPL mesh
verts, faces, aux = load_obj(self.smplmesh_paths[idx])
faces = faces.verts_idx
verts = verts.float()
faces = faces.long()
joints = torch.from_numpy(joint3d).float()
normal = torch.from_numpy(normal).float()
normal2 = torch.from_numpy(normal2).float()
# apply transformations
image = self.transforms(np.uint8(image))
objmask = self.transforms(np.uint8(objmask))
objmask[objmask>0.0] = 1.0
data = {'image': image, 'objmask': objmask,
'smplv2d': smplv2d, 'ver': verts, 'f': faces,
'normal': normal, 'normal2': normal2, 'joint3d': joints}
return data
|
d3d-hoi-main
|
optimization/dataloader.py
|
import os
import argparse
import ntpath
import common
import pdb
import open3d as o3d
import numpy as np
class Simplification:
"""
Perform simplification of watertight meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.simplification_script = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'simplification.mlx')
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def run(self):
"""
Run simplification.
"""
if not os.path.exists(self.options.in_dir):
return
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
# count number of faces
num_faces = []
for filepath in files:
mesh = o3d.io.read_triangle_mesh(filepath)
faces = np.asarray(mesh.triangles).shape[0]
num_faces.append(faces)
num_faces = np.array(num_faces)
total_faces = np.sum(num_faces)
num_faces = np.around(2500 * (num_faces / (total_faces+0.0))).astype(int) # total 2500 faces
for idx, filepath in enumerate(files):
# write new simply mlx file
with open(os.path.join(self.options.out_dir,'tmp.mlx'), 'w') as out_file:
with open(self.simplification_script, 'r') as in_file:
Lines = in_file.readlines()
for count, line in enumerate(Lines):
# modify target face number according to ratio
if count == 3:
front = line[:51]
back = line[57:]
line = front+"\""+str(num_faces[idx])+"\""+back
out_file.write(line)
os.system('meshlabserver -i %s -o %s -s %s' % (
filepath,
os.path.join(self.options.out_dir, ntpath.basename(filepath)),
os.path.join(self.options.out_dir,'tmp.mlx')
))
if __name__ == '__main__':
app = Simplification()
app.run()
|
d3d-hoi-main
|
preprocess/3_simplify.py
|
# Copyright (c) Facebook, Inc. and its affiliates.import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
import pdb
import glob
import natsort
from torch.autograd import Variable
import trimesh
import copy
import re
# io utils
from pytorch3d.io import load_obj, save_obj, save_ply, load_ply
# datastructures
from pytorch3d.structures import Meshes
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights, HardFlatShader, DirectionalLights, cameras
)
import json
import csv
import open3d as o3d
device = torch.device("cuda:0")
torch.cuda.set_device(device)
# helper function for computing roation matrix in 3D
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = axis / torch.sqrt(torch.dot(axis, axis))
a = torch.cos(theta / 2.0)
b, c, d = -axis * torch.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
rot_mat = torch.empty(3,3)
rot_mat[0,0] = aa + bb - cc - dd
rot_mat[0,1] = 2 * (bc + ad)
rot_mat[0,2] = 2 * (bd - ac)
rot_mat[1,0] = 2 * (bc - ad)
rot_mat[1,1] = aa + cc - bb - dd
rot_mat[1,2] = 2 * (cd + ab)
rot_mat[2,0] = 2 * (bd + ac)
rot_mat[2,1] = 2 * (cd - ab)
rot_mat[2,2] = aa + dd - bb - cc
return rot_mat
# helper function for loading and merging meshes
def merge_meshes(obj_path):
verts_list = torch.empty(0,3)
faces_list = torch.empty(0,3).long()
num_vtx = [0]
# merge meshes, load in ascending order
meshes = natsort.natsorted(glob.glob(obj_path+'/final/*_rescaled_sapien.obj'))
for part_mesh in meshes:
print('loading %s' %part_mesh)
mesh = o3d.io.read_triangle_mesh(part_mesh)
verts = torch.from_numpy(np.asarray(mesh.vertices)).float()
faces = torch.from_numpy(np.asarray(mesh.triangles)).long()
faces = faces + verts_list.shape[0]
verts_list = torch.cat([verts_list, verts])
faces_list = torch.cat([faces_list, faces])
num_vtx.append(verts_list.shape[0])
verts_list = verts_list.to(device)
faces_list = faces_list.to(device)
return verts_list, faces_list, num_vtx
cad_folder = 'test' # cad data folder (after mesh fusion)
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
for obj_path in object_paths:
print('processing %s' % obj_path)
# load merged mesh and number of vtx for each part
verts_list, faces_list, num_vtx = merge_meshes(obj_path)
# load motion json file
with open(os.path.join(obj_path, 'motion.json')) as json_file:
motion = json.load(json_file)
# create gif writer
filename_output = os.path.join(obj_path, 'motion.gif')
writer = imageio.get_writer(filename_output, mode='I', duration=0.3)
vis = o3d.visualization.Visualizer()
vis.create_window(height=500, width=500)
distance = 2.4 # distance from camera to the object
elevation = 25 # angle of elevation in degrees
azimuth = 20 # No rotation so the camera is positioned on the +Z axis.
# at least render one frame
if len(motion) == 0:
motion['placeholder'] = {}
# rotate or translate individual part
for idx, key in enumerate(motion.keys()):
jointData = motion[key]
# rotation part
if jointData and jointData['type'] == 'revolute':
start = num_vtx[idx]
end = num_vtx[idx+1]
rot_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
rot_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = math.pi*jointData['limit']['a'] / 180.0
bb = math.pi*jointData['limit']['b'] / 180.0
print(aa)
print(bb)
rot_angles = np.linspace(aa, bb, num=5)
rot_angles_rev = np.linspace(bb, aa, num=5)
angles = np.concatenate((rot_angles, rot_angles_rev),0)
for angle in angles:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= rot_orig[0]
verts[start:end, 1] -= rot_orig[1]
verts[start:end, 2] -= rot_orig[2]
# rotate around local axis [-1 0 0]
init_value = torch.tensor([angle])
theta = Variable(init_value.cuda())
rot_mat = rotation_matrix(rot_axis, theta).float() # 3x3
verts[start:end,:] = torch.t(torch.mm(rot_mat.to(device),
torch.t(verts[start:end,:])))
# local coordinate to world coordinate
verts[start:end, 0] += rot_orig[0]
verts[start:end, 1] += rot_orig[1]
verts[start:end, 2] += rot_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# translation part
elif jointData and jointData['type'] == 'prismatic':
start = num_vtx[idx]
end = num_vtx[idx+1]
trans_orig = torch.FloatTensor(jointData['axis']['origin']).to(device)
trans_axis = torch.FloatTensor(jointData['axis']['direction']).to(device)
aa = jointData['limit']['a']
bb = jointData['limit']['b']
trans_len = np.linspace(aa, bb, num=5)
trans_len_rev = np.linspace(bb, aa, num=5)
trans_lens = np.concatenate((trans_len, trans_len_rev),0)
for tran_len in trans_lens:
verts = verts_list.clone()
faces = faces_list.clone()
# world coordinate to local coordinate (rotation origin)
verts[start:end, 0] -= trans_orig[0]
verts[start:end, 1] -= trans_orig[1]
verts[start:end, 2] -= trans_orig[2]
# add value in translation direction
verts[start:end, 0] += (trans_axis[0] * tran_len)
verts[start:end, 1] += (trans_axis[1] * tran_len)
verts[start:end, 2] += (trans_axis[2] * tran_len)
# local coordinate to world coordinate
verts[start:end, 0] += trans_orig[0]
verts[start:end, 1] += trans_orig[1]
verts[start:end, 2] += trans_orig[2]
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
# no motion
else:
assert not jointData
# world --> view coordinate
R, T = look_at_view_transform(distance, elevation, azimuth, device=device)
T = Translate(T, device=T.device)
R = Rotate(R, device=R.device)
MM = R.compose(T)
opt_mesh = o3d.geometry.TriangleMesh()
# transform
tmp = MM.transform_points(verts_list).detach().cpu().numpy()
tmp[:,0] *= -1
tmp[:,2] *= -1
# visualize
opt_mesh.vertices = o3d.utility.Vector3dVector(tmp)
opt_mesh.triangles = o3d.utility.Vector3iVector(faces_list.cpu().numpy())
opt_mesh.compute_vertex_normals()
vis.clear_geometries()
vis.add_geometry(opt_mesh)
vis.poll_events()
img = np.asarray(vis.capture_screen_float_buffer(True))
image = img_as_ubyte(img)
writer.append_data(image)
vis.destroy_window()
writer.close()
|
d3d-hoi-main
|
preprocess/visualize_data.py
|
import math
import numpy as np
import os
from scipy import ndimage
import common
import argparse
import ntpath
# Import shipped libraries.
import librender
import libmcubes
use_gpu = True
if use_gpu:
import libfusiongpu as libfusion
from libfusiongpu import tsdf_gpu as compute_tsdf
else:
import libfusioncpu as libfusion
from libfusioncpu import tsdf_cpu as compute_tsdf
class Fusion:
"""
Performs TSDF fusion.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
self.render_intrinsics = np.array([
self.options.focal_length_x,
self.options.focal_length_y,
self.options.principal_point_x,
self.options.principal_point_x
], dtype=float)
# Essentially the same as above, just a slightly different format.
self.fusion_intrisics = np.array([
[self.options.focal_length_x, 0, self.options.principal_point_x],
[0, self.options.focal_length_y, self.options.principal_point_y],
[0, 0, 1]
])
self.image_size = np.array([
self.options.image_height,
self.options.image_width,
], dtype=np.int32)
# Mesh will be centered at (0, 0, 1)!
self.znf = np.array([
1 - 0.75,
1 + 0.75
], dtype=float)
# Derive voxel size from resolution.
self.voxel_size = 1./self.options.resolution
self.truncation = self.options.truncation_factor*self.voxel_size
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--mode', type=str, default='render', help='Operation mode: render or fuse.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--depth_dir', type=str, help='Path to depth directory; files are overwritten!')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
parser.add_argument('--n_views', type=int, default=100, help='Number of views per model.')
parser.add_argument('--image_height', type=int, default=640, help='Depth image height.')
parser.add_argument('--image_width', type=int, default=640, help='Depth image width.')
parser.add_argument('--focal_length_x', type=float, default=640, help='Focal length in x direction.')
parser.add_argument('--focal_length_y', type=float, default=640, help='Focal length in y direction.')
parser.add_argument('--principal_point_x', type=float, default=320, help='Principal point location in x direction.')
parser.add_argument('--principal_point_y', type=float, default=320, help='Principal point location in y direction.')
parser.add_argument('--depth_offset_factor', type=float, default=1.5, help='The depth maps are offsetted using depth_offset_factor*voxel_size.')
parser.add_argument('--resolution', type=float, default=256, help='Resolution for fusion.')
parser.add_argument('--truncation_factor', type=float, default=10, help='Truncation for fusion is derived as truncation_factor*voxel_size.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def get_points(self):
"""
See https://stackoverflow.com/questions/9600801/evenly-distributing-n-points-on-a-sphere.
:param n_points: number of points
:type n_points: int
:return: list of points
:rtype: numpy.ndarray
"""
rnd = 1.
points = []
offset = 2. / self.options.n_views
increment = math.pi * (3. - math.sqrt(5.));
for i in range(self.options.n_views):
y = ((i * offset) - 1) + (offset / 2);
r = math.sqrt(1 - pow(y, 2))
phi = ((i + rnd) % self.options.n_views) * increment
x = math.cos(phi) * r
z = math.sin(phi) * r
points.append([x, y, z])
# visualization.plot_point_cloud(np.array(points))
return np.array(points)
def get_views(self):
"""
Generate a set of views to generate depth maps from.
:param n_views: number of views per axis
:type n_views: int
:return: rotation matrices
:rtype: [numpy.ndarray]
"""
Rs = []
points = self.get_points()
for i in range(points.shape[0]):
# https://math.stackexchange.com/questions/1465611/given-a-point-on-a-sphere-how-do-i-find-the-angles-needed-to-point-at-its-ce
longitude = - math.atan2(points[i, 0], points[i, 1])
latitude = math.atan2(points[i, 2], math.sqrt(points[i, 0] ** 2 + points[i, 1] ** 2))
R_x = np.array([[1, 0, 0], [0, math.cos(latitude), -math.sin(latitude)], [0, math.sin(latitude), math.cos(latitude)]])
R_y = np.array([[math.cos(longitude), 0, math.sin(longitude)], [0, 1, 0], [-math.sin(longitude), 0, math.cos(longitude)]])
R = R_y.dot(R_x)
Rs.append(R)
return Rs
def render(self, mesh, Rs):
"""
Render the given mesh using the generated views.
:param base_mesh: mesh to render
:type base_mesh: mesh.Mesh
:param Rs: rotation matrices
:type Rs: [numpy.ndarray]
:return: depth maps
:rtype: numpy.ndarray
"""
depthmaps = []
for i in range(len(Rs)):
np_vertices = Rs[i].dot(mesh.vertices.astype(np.float64).T)
np_vertices[2, :] += 1
np_faces = mesh.faces.astype(np.float64)
np_faces += 1
depthmap, mask, img = librender.render(np_vertices.copy(), np_faces.T.copy(), self.render_intrinsics, self.znf, self.image_size)
# This is mainly result of experimenting.
# The core idea is that the volume of the object is enlarged slightly
# (by subtracting a constant from the depth map).
# Dilation additionally enlarges thin structures (e.g. for chairs).
depthmap -= self.options.depth_offset_factor * self.voxel_size
depthmap = ndimage.morphology.grey_erosion(depthmap, size=(3, 3))
depthmaps.append(depthmap)
return depthmaps
def fusion(self, depthmaps, Rs):
"""
Fuse the rendered depth maps.
:param depthmaps: depth maps
:type depthmaps: numpy.ndarray
:param Rs: rotation matrices corresponding to views
:type Rs: [numpy.ndarray]
:return: (T)SDF
:rtype: numpy.ndarray
"""
Ks = self.fusion_intrisics.reshape((1, 3, 3))
Ks = np.repeat(Ks, len(depthmaps), axis=0).astype(np.float32)
Ts = []
for i in range(len(Rs)):
Rs[i] = Rs[i]
Ts.append(np.array([0, 0, 1]))
Ts = np.array(Ts).astype(np.float32)
Rs = np.array(Rs).astype(np.float32)
depthmaps = np.array(depthmaps).astype(np.float32)
views = libfusion.PyViews(depthmaps, Ks, Rs, Ts)
# Note that this is an alias defined as libfusiongpu.tsdf_gpu or libfusioncpu.tsdf_cpu!
return compute_tsdf(views, self.options.resolution, self.options.resolution, self.options.resolution,
self.voxel_size, self.truncation, False)
def run(self):
"""
Run the tool.
"""
if self.options.mode == 'render':
self.run_render()
elif self.options.mode == 'fuse':
self.run_fuse()
else:
print('Invalid model, choose render or fuse.')
exit()
def run_render(self):
"""
Run rendering.
"""
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.depth_dir)
files = self.read_directory(self.options.in_dir)
timer = common.Timer()
Rs = self.get_views()
for filepath in files:
timer.reset()
mesh = common.Mesh.from_off(filepath)
depths = self.render(mesh, Rs)
depth_file = os.path.join(self.options.depth_dir, os.path.basename(filepath) + '.h5')
common.write_hdf5(depth_file, np.array(depths))
print('[Data] wrote %s (%f seconds)' % (depth_file, timer.elapsed()))
def run_fuse(self):
"""
Run fusion.
"""
assert os.path.exists(self.options.depth_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.depth_dir)
timer = common.Timer()
Rs = self.get_views()
for filepath in files:
# As rendering might be slower, we wait for rendering to finish.
# This allows to run rendering and fusing in parallel (more or less).
depths = common.read_hdf5(filepath)
timer.reset()
tsdf = self.fusion(depths, Rs)
tsdf = tsdf[0]
vertices, triangles = libmcubes.marching_cubes(-tsdf, 0)
vertices /= self.options.resolution
vertices -= 0.5
off_file = os.path.join(self.options.out_dir, ntpath.basename(filepath)[:-3])
libmcubes.export_off(vertices, triangles, off_file)
print('[Data] wrote %s (%f seconds)' % (off_file, timer.elapsed()))
if __name__ == '__main__':
app = Fusion()
app.run()
|
d3d-hoi-main
|
preprocess/2_fusion.py
|
import os
import subprocess
from tqdm import tqdm
from multiprocessing import Pool
def convert(obj_path):
try:
load_folder = os.path.join(obj_path, 'parts_ply')
save_folder = os.path.join(obj_path, 'parts_off')
part_paths = [f.path for f in os.scandir(load_folder)]
if not os.path.exists(save_folder):
os.makedirs(save_folder)
for part in part_paths:
target_mesh = save_folder+'/'+part[-5:-3]+'off'
subprocess.run(["meshlabserver", "-i", part, "-o", target_mesh])
except Exception as ex:
return
cad_folder = './cad_sapien'
cad_classes = [f.name for f in os.scandir(cad_folder)]
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in os.scandir(folder_path)]
# Parallel
threads = 16 # number of threads in your computer
convert_iter = Pool(threads).imap(convert, object_paths)
for _ in tqdm(convert_iter, total=len(object_paths)):
pass
|
d3d-hoi-main
|
preprocess/convert_off.py
|
import pdb
import subprocess
import scandir
from multiprocessing import Pool
import json
import common
def remesh(obj_path):
in_dir = os.path.join(obj_path, 'parts_off/')
scaled_dir = os.path.join(obj_path, 'parts_scaled_off/')
depth_dir = os.path.join(obj_path, 'parts_depth_off/')
fused_dir = os.path.join(obj_path, 'parts_watertight_off/')
out_dir = os.path.join(obj_path, 'parts_out_off/')
final_dir = os.path.join(obj_path, 'final/')
rescale_dir = os.path.join(obj_path, 'rescale/')
# scale to .5 cube
subprocess.call(["python", "1_scale.py", "--in_dir", in_dir, "--out_dir", scaled_dir])
# re-mesh using tsdf
subprocess.call(["python", "2_fusion.py", "--mode", "render", "--in_dir", scaled_dir, "--depth_dir", depth_dir, "--out_dir", fused_dir])
subprocess.call(["python", "2_fusion.py", "--mode", "fuse", "--in_dir", scaled_dir, "--depth_dir", depth_dir, "--out_dir", fused_dir])
# simplify mesh
subprocess.call(["python", "3_simplify.py", "--in_dir", fused_dir, "--out_dir", out_dir])
if not os.path.exists(final_dir):
os.makedirs(final_dir)
for file in os.listdir(rescale_dir):
if file.endswith("rescale.json"):
with open(os.path.join(rescale_dir, file)) as json_file:
# load rescale value
rescale_dict = json.load(json_file)
scales = (1.0/rescale_dict['scales'][0], 1.0/rescale_dict['scales'][1], 1.0/rescale_dict['scales'][2])
translation = (-rescale_dict['translation'][2], -rescale_dict['translation'][1], -rescale_dict['translation'][0])
# load mesh
mesh = common.Mesh.from_off(os.path.join(out_dir, file[0]+'.off'))
# apply rescaling
mesh.scale(scales)
mesh.translate(translation)
mesh.to_off(os.path.join(final_dir, file[0]+'_rescaled.off'))
# change axis
apply_script = "change_axis.mlx"
source_mesh = os.path.join(final_dir, file[0]+'_rescaled.off')
target_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.off')
subprocess.call(["meshlabserver", "-i", source_mesh, "-o", target_mesh, "-s", apply_script])
# convert to obj
source_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.off')
target_mesh = os.path.join(final_dir, file[0]+'_rescaled_sapien.obj')
subprocess.call(["meshlabserver", "-i", source_mesh, "-o", target_mesh])
return
cad_folder = 'test' # cad data path (after convert_off)
cad_classes = [f.name for f in scandir.scandir(cad_folder)]
Processors = 10 # n of processors you want to use
for cad_category in cad_classes:
folder_path = os.path.join(cad_folder, cad_category)
object_paths = [f.path for f in scandir.scandir(folder_path)]
pool = Pool(processes=Processors)
pool.map(remesh, object_paths)
print('All jobs finished...')
|
d3d-hoi-main
|
preprocess/re-meshing.py
|
"""
Some I/O utilities.
"""
import os
import time
import h5py
import math
import numpy as np
def write_hdf5(file, tensor, key = 'tensor'):
"""
Write a simple tensor, i.e. numpy array ,to HDF5.
:param file: path to file to write
:type file: str
:param tensor: tensor to write
:type tensor: numpy.ndarray
:param key: key to use for tensor
:type key: str
"""
assert type(tensor) == np.ndarray, 'expects numpy.ndarray'
h5f = h5py.File(file, 'w')
chunks = list(tensor.shape)
if len(chunks) > 2:
chunks[2] = 1
if len(chunks) > 3:
chunks[3] = 1
if len(chunks) > 4:
chunks[4] = 1
h5f.create_dataset(key, data = tensor, chunks = tuple(chunks), compression = 'gzip')
h5f.close()
def read_hdf5(file, key = 'tensor'):
"""
Read a tensor, i.e. numpy array, from HDF5.
:param file: path to file to read
:type file: str
:param key: key to read
:type key: str
:return: tensor
:rtype: numpy.ndarray
"""
assert os.path.exists(file), 'file %s not found' % file
h5f = h5py.File(file, 'r')
assert key in h5f.keys(), 'key %s not found in file %s' % (key, file)
tensor = h5f[key][()]
h5f.close()
return tensor
def write_off(file, vertices, faces):
"""
Writes the given vertices and faces to OFF.
:param vertices: vertices as tuples of (x, y, z) coordinates
:type vertices: [(float)]
:param faces: faces as tuples of (num_vertices, vertex_id_1, vertex_id_2, ...)
:type faces: [(int)]
"""
num_vertices = len(vertices)
num_faces = len(faces)
assert num_vertices > 0
assert num_faces > 0
with open(file, 'w') as fp:
fp.write('OFF\n')
fp.write(str(num_vertices) + ' ' + str(num_faces) + ' 0\n')
for vertex in vertices:
assert len(vertex) == 3, 'invalid vertex with %d dimensions found (%s)' % (len(vertex), file)
fp.write(str(vertex[0]) + ' ' + str(vertex[1]) + ' ' + str(vertex[2]) + '\n')
for face in faces:
assert face[0] == 3, 'only triangular faces supported (%s)' % file
assert len(face) == 4, 'faces need to have 3 vertices, but found %d (%s)' % (len(face), file)
for i in range(len(face)):
assert face[i] >= 0 and face[i] < num_vertices, 'invalid vertex index %d (of %d vertices) (%s)' % (face[i], num_vertices, file)
fp.write(str(face[i]))
if i < len(face) - 1:
fp.write(' ')
fp.write('\n')
# add empty line to be sure
fp.write('\n')
def read_off(file):
"""
Reads vertices and faces from an off file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines]
# Fix for ModelNet bug were 'OFF' and the number of vertices and faces are
# all in the first line.
if len(lines[0]) > 3:
assert lines[0][:3] == 'OFF' or lines[0][:3] == 'off', 'invalid OFF file %s' % file
parts = lines[0][3:].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 1
# This is the regular case!
else:
assert lines[0] == 'OFF' or lines[0] == 'off', 'invalid OFF file %s' % file
parts = lines[1].split(' ')
assert len(parts) == 3
num_vertices = int(parts[0])
assert num_vertices > 0
num_faces = int(parts[1])
assert num_faces > 0
start_index = 2
vertices = []
for i in range(num_vertices):
vertex = lines[start_index + i].split(' ')
vertex = [float(point.strip()) for point in vertex if point != '']
assert len(vertex) == 3
vertices.append(vertex)
faces = []
for i in range(num_faces):
face = lines[start_index + num_vertices + i].split(' ')
face = [index.strip() for index in face if index != '']
# check to be sure
for index in face:
assert index != '', 'found empty vertex index: %s (%s)' % (lines[start_index + num_vertices + i], file)
face = [int(index) for index in face]
assert face[0] == len(face) - 1, 'face should have %d vertices but as %d (%s)' % (face[0], len(face) - 1, file)
assert face[0] == 3, 'only triangular meshes supported (%s)' % file
for index in face:
assert index >= 0 and index < num_vertices, 'vertex %d (of %d vertices) does not exist (%s)' % (index, num_vertices, file)
assert len(face) > 1
faces.append(face)
return vertices, faces
assert False, 'could not open %s' % file
def write_obj(file, vertices, faces):
"""
Writes the given vertices and faces to OBJ.
:param vertices: vertices as tuples of (x, y, z) coordinates
:type vertices: [(float)]
:param faces: faces as tuples of (num_vertices, vertex_id_1, vertex_id_2, ...)
:type faces: [(int)]
"""
num_vertices = len(vertices)
num_faces = len(faces)
assert num_vertices > 0
assert num_faces > 0
with open(file, 'w') as fp:
for vertex in vertices:
assert len(vertex) == 3, 'invalid vertex with %d dimensions found (%s)' % (len(vertex), file)
fp.write('v' + ' ' + str(vertex[0]) + ' ' + str(vertex[1]) + ' ' + str(vertex[2]) + '\n')
for face in faces:
assert len(face) == 3, 'only triangular faces supported (%s)' % file
fp.write('f ')
for i in range(len(face)):
assert face[i] >= 0 and face[i] < num_vertices, 'invalid vertex index %d (of %d vertices) (%s)' % (face[i], num_vertices, file)
# face indices are 1-based
fp.write(str(face[i] + 1))
if i < len(face) - 1:
fp.write(' ')
fp.write('\n')
# add empty line to be sure
fp.write('\n')
def read_obj(file):
"""
Reads vertices and faces from an obj file.
:param file: path to file to read
:type file: str
:return: vertices and faces as lists of tuples
:rtype: [(float)], [(int)]
"""
assert os.path.exists(file), 'file %s not found' % file
with open(file, 'r') as fp:
lines = fp.readlines()
lines = [line.strip() for line in lines if line.strip()]
vertices = []
faces = []
for line in lines:
parts = line.split(' ')
parts = [part.strip() for part in parts if part]
if parts[0] == 'v':
assert len(parts) == 4, \
'vertex should be of the form v x y z, but found %d parts instead (%s)' % (len(parts), file)
assert parts[1] != '', 'vertex x coordinate is empty (%s)' % file
assert parts[2] != '', 'vertex y coordinate is empty (%s)' % file
assert parts[3] != '', 'vertex z coordinate is empty (%s)' % file
vertices.append([float(parts[1]), float(parts[2]), float(parts[3])])
elif parts[0] == 'f':
assert len(parts) == 4, \
'face should be of the form f v1/vt1/vn1 v2/vt2/vn2 v2/vt2/vn2, but found %d parts (%s) instead (%s)' % (len(parts), line, file)
components = parts[1].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v1 = int(components[0])
components = parts[2].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v2 = int(components[0])
components = parts[3].split('/')
assert len(components) >= 1 and len(components) <= 3, \
'face component should have the forms v, v/vt or v/vt/vn, but found %d components instead (%s)' % (len(components), file)
assert components[0].strip() != '', \
'face component is empty (%s)' % file
v3 = int(components[0])
#assert v1 != v2 and v2 != v3 and v3 != v2, 'degenerate face detected: %d %d %d (%s)' % (v1, v2, v3, file)
if v1 == v2 or v2 == v3 or v1 == v3:
print('[Info] skipping degenerate face in %s' % file)
else:
faces.append([v1 - 1, v2 - 1, v3 - 1]) # indices are 1-based!
else:
assert False, 'expected either vertex or face but got line: %s (%s)' % (line, file)
return vertices, faces
assert False, 'could not open %s' % file
def makedir(dir):
"""
Creates directory if it does not exist.
:param dir: directory path
:type dir: str
"""
if not os.path.exists(dir):
os.makedirs(dir)
class Mesh:
"""
Represents a mesh.
"""
def __init__(self, vertices = [[]], faces = [[]]):
"""
Construct a mesh from vertices and faces.
:param vertices: list of vertices, or numpy array
:type vertices: [[float]] or numpy.ndarray
:param faces: list of faces or numpy array, i.e. the indices of the corresponding vertices per triangular face
:type faces: [[int]] fo rnumpy.ndarray
"""
self.vertices = np.array(vertices, dtype = float)
""" (numpy.ndarray) Vertices. """
self.faces = np.array(faces, dtype = int)
""" (numpy.ndarray) Faces. """
assert self.vertices.shape[1] == 3
assert self.faces.shape[1] == 3
def extents(self):
"""
Get the extents.
:return: (min_x, min_y, min_z), (max_x, max_y, max_z)
:rtype: (float, float, float), (float, float, float)
"""
min = [0]*3
max = [0]*3
for i in range(3):
min[i] = np.min(self.vertices[:, i])
max[i] = np.max(self.vertices[:, i])
return tuple(min), tuple(max)
def switch_axes(self, axis_1, axis_2):
"""
Switch the two axes, this is usually useful for switching y and z axes.
:param axis_1: index of first axis
:type axis_1: int
:param axis_2: index of second axis
:type axis_2: int
"""
temp = np.copy(self.vertices[:, axis_1])
self.vertices[:, axis_1] = self.vertices[:, axis_2]
self.vertices[:, axis_2] = temp
def mirror(self, axis):
"""
Mirror given axis.
:param axis: axis to mirror
:type axis: int
"""
self.vertices[:, axis] *= -1
def scale(self, scales):
"""
Scale the mesh in all dimensions.
:param scales: tuple of length 3 with scale for (x, y, z)
:type scales: (float, float, float)
"""
assert len(scales) == 3
for i in range(3):
self.vertices[:, i] *= scales[i]
def translate(self, translation):
"""
Translate the mesh.
:param translation: translation as (x, y, z)
:type translation: (float, float, float)
"""
assert len(translation) == 3
for i in range(3):
self.vertices[:, i] += translation[i]
def _rotate(self, R):
self.vertices = np.dot(R, self.vertices.T)
self.vertices = self.vertices.T
def rotate(self, rotation):
"""
Rotate the mesh.
:param rotation: rotation in (angle_x, angle_y, angle_z); angles in radians
:type rotation: (float, float, float
:return:
"""
assert len(rotation) == 3
x = rotation[0]
y = rotation[1]
z = rotation[2]
# rotation around the x axis
R = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
self._rotate(R)
# rotation around the y axis
R = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
self._rotate(R)
# rotation around the z axis
R = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
self._rotate(R)
def inv_rotate(self, rotation):
"""
Rotate the mesh.
:param rotation: rotation in (angle_x, angle_y, angle_z); angles in radians
:type rotation: (float, float, float
:return:
"""
assert len(rotation) == 3
x = rotation[0]
y = rotation[1]
z = rotation[2]
# rotation around the x axis
R = np.array([[1, 0, 0], [0, math.cos(x), -math.sin(x)], [0, math.sin(x), math.cos(x)]])
R = R.T
self._rotate(R)
# rotation around the y axis
R = np.array([[math.cos(y), 0, math.sin(y)], [0, 1, 0], [-math.sin(y), 0, math.cos(y)]])
R = R.T
self._rotate(R)
# rotation around the z axis
R = np.array([[math.cos(z), -math.sin(z), 0], [math.sin(z), math.cos(z), 0], [0, 0, 1]])
R = R.T
self._rotate(R)
def copy(self):
"""
Copy the mesh.
:return: copy of the mesh
:rtype: Mesh
"""
mesh = Mesh(self.vertices.copy(), self.faces.copy())
return mesh
@staticmethod
def from_off(filepath):
"""
Read a mesh from OFF.
:param filepath: path to OFF file
:type filepath: str
:return: mesh
:rtype: Mesh
"""
vertices, faces = read_off(filepath)
real_faces = []
for face in faces:
assert len(face) == 4
real_faces.append([face[1], face[2], face[3]])
return Mesh(vertices, real_faces)
def to_off(self, filepath):
"""
Write mesh to OFF.
:param filepath: path to write file to
:type filepath: str
"""
faces = np.ones((self.faces.shape[0], 4), dtype = int)*3
faces[:, 1:4] = self.faces[:, :]
write_off(filepath, self.vertices.tolist(), faces.tolist())
@staticmethod
def from_obj(filepath):
"""
Read a mesh from OBJ.
:param filepath: path to OFF file
:type filepath: str
:return: mesh
:rtype: Mesh
"""
vertices, faces = read_obj(filepath)
return Mesh(vertices, faces)
def to_obj(self, filepath):
"""
Write mesh to OBJ file.
:param filepath: path to OBJ file
:type filepath: str
"""
write_obj(filepath, self.vertices.tolist(), self.faces.tolist())
class Timer:
"""
Simple wrapper for time.clock().
"""
def __init__(self):
"""
Initialize and start timer.
"""
self.start = time.clock()
""" (float) Seconds. """
def reset(self):
"""
Reset timer.
"""
self.start = time.clock()
def elapsed(self):
"""
Get elapsed time in seconds
:return: elapsed time in seconds
:rtype: float
"""
return (time.clock() - self.start)
|
d3d-hoi-main
|
preprocess/common.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import os
import torch
import numpy as np
from tqdm import tqdm_notebook
import imageio
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage import img_as_ubyte
from tqdm import tqdm
import re
import open3d as o3d
import itertools
# io utils
from pytorch3d.io import load_obj, save_obj
# datastructures
from pytorch3d.structures import Meshes
# 3D transformations functions
from pytorch3d.transforms import Rotate, Translate
# rendering components
from pytorch3d.renderer import (
OpenGLPerspectiveCameras, look_at_view_transform, look_at_rotation,
RasterizationSettings, MeshRenderer, MeshRasterizer, BlendParams,
SoftSilhouetteShader, HardPhongShader, PointLights
)
import json
import csv
SAPIEN_FOLDER = './partnet-mobility-v0'
OUT_FOLDER = './cad_sapien'
# helper function for computing roation matrix in 3D
def rotation_matrix(axis, theta):
"""
Return the rotation matrix associated with counterclockwise rotation about
the given axis by theta radians.
"""
axis = np.asarray(axis)
axis = axis / math.sqrt(np.dot(axis, axis))
a = math.cos(theta / 2.0)
b, c, d = -axis * math.sin(theta / 2.0)
aa, bb, cc, dd = a * a, b * b, c * c, d * d
bc, ad, ac, ab, bd, cd = b * c, a * d, a * c, a * b, b * d, c * d
return np.array([[aa + bb - cc - dd, 2 * (bc + ad), 2 * (bd - ac)],
[2 * (bc - ad), aa + cc - bb - dd, 2 * (cd + ab)],
[2 * (bd + ac), 2 * (cd - ab), aa + dd - bb - cc]])
# helper function for traversing a tree
def traverse_tree(current_node, mesh_dict):
# further traverse the tree if not at leaf node yet
if 'children' in current_node.keys():
for idx in range(len(current_node['children'])):
traverse_tree(current_node['children'][idx], mesh_dict)
else:
# insert meshes associated with an unique part id
assert current_node['id'] not in mesh_dict.keys()
mesh_dict[current_node['id']] = current_node['objs']
return
# helper function for loading and merging meshes
def merge_meshes(save_folder, ids, mesh_dict):
for count, part_ids in enumerate(ids):
part_meshes = [mesh_dict[x] for x in part_ids]
part_meshes = list(itertools.chain(*part_meshes))
verts_list = np.empty((0,3))
faces_list = np.empty((0,3))#.long()
for part_mesh in part_meshes:
obj_path = os.path.join(part_folder, 'textured_objs', part_mesh,)+'.obj'
# check if mesh exist
if not os.path.exists(obj_path):
print(obj_path)
continue
mesh = o3d.io.read_triangle_mesh(obj_path)
verts = np.asarray(mesh.vertices)
faces = np.asarray(mesh.triangles)
faces = faces + verts_list.shape[0]
verts_list = np.concatenate([verts_list, verts])
faces_list = np.concatenate([faces_list, faces])
mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(verts_list),
triangles=o3d.utility.Vector3iVector(faces_list))
mesh.compute_vertex_normals()
save_path = os.path.join(save_folder, 'parts_ply')
if not os.path.exists(save_path):
os.makedirs(save_path)
o3d.io.write_triangle_mesh(save_path+'/'+str(count)+'.ply', mesh)
return
part_home = SAPIEN_FOLDER
save_home = OUT_FOLDER
classes = ['StorageFurniture','Microwave','Laptop','WashingMachine','TrashCan','Oven',
'Dishwasher','Refrigerator'] # 8 categories
# Manually verify the part category
careParts = {}
careParts['Refrigerator'] = ['door', 'other_leaf', 'display_panel', 'door_frame',
'control_panel', 'glass']
careParts['Microwave'] = ['door']
careParts['Laptop'] = ['shaft', 'other_leaf', 'screen_side', 'screen', 'screen_frame']
careParts['WashingMachine'] = ['door']
careParts['TrashCan'] = ['opener', 'lid', 'drawer', 'cover', 'cover_lid',
'frame_vertical_bar', 'container', 'other_leaf']
careParts['Oven'] = ['door', 'door_frame']
careParts['Dishwasher'] = ['door', 'shelf', 'display_panel', 'door_frame']
careParts['StorageFurniture'] = ['cabinet_door', 'mirror', 'drawer', 'drawer_box',
'door', 'shelf', 'handle', 'glass', 'cabinet_door_surface',
'other_leaf', 'countertop']
careParts['Toilet'] = ['lid', 'seat']
#careParts['Table'] = ['drawer', 'cabinet_door_surface', 'drawer_box', 'handle',
#'drawer_front', 'board', 'cabinet_door', 'shelf', 'keyboard_tray_surface']
#careParts['Box'] = ['rotation_lid', 'drawer', 'countertop', 'lid_surface'] # font on top
#careParts['FoldingChair'] = ['seat']
#careParts['Suitcase'] = ['lid', 'pull-out_handle']
count = 0
# all dirIDs within this class
with open('partnetsim.models.csv', 'r') as file:
reader = csv.DictReader(file)
for row in reader:
if row['category'] in classes:
part_dir = row['category']
part_id = row['dirId']
part_folder = os.path.join(part_home, str(part_id))
save_folder = os.path.join(save_home, part_dir, str(part_id))
if not os.path.exists(save_folder):
os.makedirs(save_folder)
count+=1
# load meshes referenced json file
if not os.path.isfile(os.path.join(part_folder, 'result.json')):
continue
with open(os.path.join(part_folder, 'result.json')) as json_file:
part_meshes = json.load(json_file)
# traverse through a tree
mesh_dict = {}
root = part_meshes[0]
traverse_tree(root, mesh_dict)
types = []
with open(os.path.join(part_folder, 'mobility.urdf')) as f:
our_lines = f.readlines()
for line in our_lines:
myString = re.sub('\s+',' ',line)
if '<joint name=' in myString:
m_type = myString.split("type=",1)[1][1:-3]
types.append(m_type)
type_idx = 0
details = {}
details_saved = {}
# load mobility_v2 json file
with open(os.path.join(part_folder, 'mobility_v2.json')) as json_file:
mobility_parts = json.load(json_file)
print('processing %s' % part_folder)
part_div = []
for idx, joint_part in enumerate(mobility_parts):
# visual names belonging to one joint part
joint_part_names = joint_part['parts']
assert(joint_part_names) # make sure not empty
# parse ids for each part
ids = [x['id'] for x in joint_part_names]
part_div.append(ids)
# save motion information
details[str(idx)] = joint_part['jointData'].copy()
details_saved[str(idx)] = joint_part['jointData'].copy()
# set type for care part
if type_idx<len(types):
if joint_part['name'] in careParts[part_dir]:
details[str(idx)]['type'] = types[type_idx]
details_saved[str(idx)]['type'] = types[type_idx]
type_idx += 1
else:
if details[str(idx)]:
assert type_idx>=len(types)
assert joint_part['name'] not in careParts[part_dir]
# remove non-care part
if not joint_part['jointData'] or joint_part['name'] not in careParts[part_dir]:
details[str(idx)] = {}
details_saved.pop(str(idx), None)
with open(os.path.join(save_folder, 'motion.json'), 'w') as outfile:
json.dump(details_saved, outfile)
assert len(details) == len(part_div)
part_idx = 0
fix_part = []
parts = []
for key, value in details.items():
if value == {}:
fix_part.append(part_div[part_idx])
else:
parts.append(part_div[part_idx])
part_idx += 1
fix_part = list(itertools.chain(*fix_part))
parts.append(fix_part)
# load, merge, and save part mesh file
merge_meshes(save_folder, parts, mesh_dict)
print(count)
print('all done...')
|
d3d-hoi-main
|
preprocess/process_data.py
|
import os
import common
import argparse
import numpy as np
import json
class Scale:
"""
Scales a bunch of meshes.
"""
def __init__(self):
"""
Constructor.
"""
parser = self.get_parser()
self.options = parser.parse_args()
def get_parser(self):
"""
Get parser of tool.
:return: parser
"""
parser = argparse.ArgumentParser(description='Scale a set of meshes stored as OFF files.')
parser.add_argument('--in_dir', type=str, help='Path to input directory.')
parser.add_argument('--out_dir', type=str, help='Path to output directory; files within are overwritten!')
parser.add_argument('--padding', type=float, default=0.1, help='Relative padding applied on each side.')
return parser
def read_directory(self, directory):
"""
Read directory.
:param directory: path to directory
:return: list of files
"""
files = []
for filename in os.listdir(directory):
files.append(os.path.normpath(os.path.join(directory, filename)))
return files
def run(self):
"""
Run the tool, i.e. scale all found OFF files.
"""
assert os.path.exists(self.options.in_dir)
common.makedir(self.options.out_dir)
files = self.read_directory(self.options.in_dir)
for filepath in files:
mesh = common.Mesh.from_off(filepath)
# Get extents of model.
min, max = mesh.extents()
total_min = np.min(np.array(min))
total_max = np.max(np.array(max))
# Set the center (although this should usually be the origin already).
centers = (
(min[0] + max[0]) / 2,
(min[1] + max[1]) / 2,
(min[2] + max[2]) / 2
)
# Scales all dimensions equally.
sizes = (
total_max - total_min,
total_max - total_min,
total_max - total_min
)
translation = (
-centers[0],
-centers[1],
-centers[2]
)
scales = (
1 / (sizes[0] + 2 * self.options.padding * sizes[0]),
1 / (sizes[1] + 2 * self.options.padding * sizes[1]),
1 / (sizes[2] + 2 * self.options.padding * sizes[2])
)
mesh.translate(translation)
mesh.scale(scales)
print('[Data] %s extents before %f - %f, %f - %f, %f - %f' % (os.path.basename(filepath), min[0], max[0], min[1], max[1], min[2], max[2]))
min, max = mesh.extents()
print('[Data] %s extents after %f - %f, %f - %f, %f - %f' % (os.path.basename(filepath), min[0], max[0], min[1], max[1], min[2], max[2]))
# May also switch axes if necessary.
#mesh.switch_axes(1, 2)
mesh.to_off(os.path.join(self.options.out_dir, os.path.basename(filepath)))
# save parameters
rescale = {}
rescale['scales'] = scales
rescale['translation'] = translation
if not os.path.exists(self.options.out_dir[:-18]+'/rescale'):
os.makedirs(self.options.out_dir[:-18]+'/rescale')
path = self.options.out_dir[:-18]+'/rescale/'+os.path.basename(filepath)[0]+'_rescale.json'
with open(path, 'w') as outfile:
json.dump(rescale, outfile)
if __name__ == '__main__':
app = Scale()
app.run()
|
d3d-hoi-main
|
preprocess/1_scale.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
mode="analyze"
m=3
pre_k=1
main_k=5
def run_infer(infer_out, k, model, quiet):
total_time, total_alarms = 0, 0
try:
if not os.path.isdir(infer_out):
print(f' * Error: infer-out does not exist for {infer_out}')
exit(1)
else:
start_t = time.time()
use_model = f'--pulse-join-select {model}'
threads = "-j 1"
threads = ""
verbose_opt = ""
if quiet:
verbose_opt = " -q 2>&1 > /dev/null"
cmd = f'infer analyze {threads} --pulse-only --pulse-max-disjuncts {str(k)} -o {infer_out} {use_model} {verbose_opt}'
print(f" - cmd: {cmd}", file=sys.stderr)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
report = os.path.join(infer_out, "report.txt")
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unknown exceptions")
return 0, 0
def run_infer_pre(path, k, model, quiet):
return run_infer(path, k, model, quiet)
def run_infer_main(path, k, model, quiet):
return run_infer(path, k, model, quiet)
def pre_analysis(pgm, pre_k, models):
opt_model = None
opt_alarms = -1
pt = 0
if len(models) == 1:
return models[0], 0, 0
for model in models:
print(f" * Pre-analysis with model {model}")
t, a = run_infer_pre(pgm, pre_k, model, True)
print(f" # time(sec): {t}")
print(f" # alarms(#): {a}")
if opt_alarms < a:
opt_alarms = a
opt_model = model
pt = pt + t
return opt_model, opt_alarms, pt
def run_dd_infer(path, pre_k, main_k, models):
print("* Pre-analysis")
model, alarms, pretime = pre_analysis(path, pre_k, models)
print(f"# total pre-time(sec): {pretime}")
print("* Main analysis")
maintime, mainalarms = run_infer_main(path, main_k, model, False)
print(f"# Main analysis time(sec): {maintime}")
print(f"# alarms(#): {mainalarms}")
def main(target_path, model_path):
files = os.listdir(f'{model_path}/{m}')
pattern = ".*\.model"
models = [f'{model_path}/{m}/{s}' for s in files if re.match(pattern, s)]
print(f"prek = {pre_k}, maink = {main_k}, models = {models}", flush=True)
run_dd_infer(target_path, pre_k, main_k, models)
def usage():
print("usage:")
print("python DDInfer.py ~/best_models ~/infer-outs/gawk-5.1.0")
if len(sys.argv) < 2:
usage()
exit(1)
model_path = sys.argv[1]
target_path = sys.argv[2]
if not os.path.isdir(model_path):
print(f'Cannot find a model in {model_path}')
usage()
exit(1)
if not os.path.isdir(target_path):
print(f'Cannot find a captured target in {target_path}')
usage()
exit(1)
main(target_path, model_path)
|
data_driven_infer-main
|
bin/DDInfer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
def run(path, p, k, model):
total_time, total_alarms = 0, 0
try:
infer_out = path + p
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + p)
exit(1)
else:
start_t = time.time()
use_model = ""
if model != None:
use_model = "--pulse-join-select " + model
os.system("infer analyze -q -j 1 --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " " + use_model)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
os.system(f"cp {infer_out}/report.json ./data/{p}_1_{k}.json")
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
pgm = None
k = 10
model = None
filename = None
if len(sys.argv) < 4:
print("Insufficient arguments")
exit(1)
elif len(sys.argv) == 4:
filename = sys.argv[1]
model = sys.argv[2]
k = sys.argv[3]
else:
print("Invalid arguments")
exit(1)
path = "/home/vagrant/infer-outs/"
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
for pgm in pgms:
t0, a0 = run(path, pgm, k, model)
print(f'** {pgm}\t{a0}\t{t0}')
|
data_driven_infer-main
|
Table2/bin/eval_ml_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
import pickle
import itertools
import sys, os
import time
import random
## usage) python3 collect.py programs_training.txt 20 1 training_data (use the pgms in 'pgms.txt', with k=10, trials=1)
training_data_folder = ""
if len(sys.argv) < 4:
print("Insufficient arguments")
exit(1)
elif len(sys.argv) == 4:
filename = str(sys.argv[1])
trials = 1
training_data_folder = str(sys.argv[2])
k = str(sys.argv[3])
else:
print("Invalid arguments")
exit(1)
if not os.path.isdir(f'./{training_data_folder}'):
os.system(f'mkdir ./{training_data_folder}')
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
print(pgms)
random.seed()
pre_classifier = GradientBoostingClassifier(n_estimators=100, learning_rate=0.1, max_depth=2)
def read_file(data_file):
X, y = [], []
with open(data_file, "r") as f:
for line in f:
data = list(map(lambda x: int(x), line.split()))
fv = data[:len(data)-1]
label = data[len(data)-1]
X.append(fv)
y.append(label)
return X, y
def make_balance(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
#print(f'Original: #pos = {len(pos)}, #neg = {len(neg)}')
assert (len(neg) >= len(pos))
random.shuffle(neg)
neg = neg[:len(pos)]
#print(f'Balanced: #pos = {len(pos)}, #neg = {len(neg)}')
pos.extend(neg)
return zip(*pos)
def unique_sorted(values):
"Return a sorted list of the given values, without duplicates."
values = sorted(values)
if not values:
return []
consecutive_pairs = zip(values, itertools.islice(values, 1, len(values)))
result = [a for (a, b) in consecutive_pairs if a != b]
result.append(values[-1])
return result
def uniq(X, y):
lst = list(zip(X, y))
lst_uniq = unique_sorted(lst)
print(f'before: {len(lst)}, uniq: {len(lst_uniq)}')
return zip(*lst_uniq)
def preprocess(X, y):
X, y = make_balance(X, y)
return X, y
def trim_data(X, y, r):
lst = list(zip(X, y))
res = []
for e in lst:
if random.random() <= r:
res.append(e)
return zip(*res)
def train_clf (clf, X, y):
X = np.array(X)
y = np.array(y)
clf.fit(X, y)
return clf
def train_run(X, y, model_name):
trained_clf = train_clf (pre_classifier, X, y)
pickle.dump(trained_clf, open(model_name, "wb"))
def model(accum, model_path):
train_X, train_y = read_file(accum)
if (train_X == []):
return
train_X, train_y = preprocess(train_X, train_y)
train_X, train_y = trim_data(train_X, train_y, 0.7)
train_run(train_X, train_y, model_path)
mode = " --pulse-random-mode"
if os.path.isfile(f"./{training_data_folder}/acc.model"):
#mode = f' --pulse-random-mode --pulse-cover-load history.txt'
#mode = f' --pulse-join-train ./{training_data_folder}/acc.model --pulse-cover-load history.txt'
mode = f' --pulse-join-train ./{training_data_folder}/acc.model --pulse-cover-load history.txt --pulse-repeat-mode'
def train(path, pgms, k):
total_time = 0
os.system(f'touch ./{training_data_folder}/accum.txt')
for p in pgms:
print(f"Training for {p}")
infer_out = path + p
if not os.path.isdir(infer_out):
print("Error: infer-out does not exist for " + p)
continue
else:
if os.path.isfile(f'./{training_data_folder}/{p}/history.dat'):
os.system(f'mv ./{training_data_folder}/{p}/history.dat ./history.txt')
start_t = time.time()
cmd = ("infer analyze -j 1 --pulse-train-mode --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " --pulse-cover-mode" + mode)
print(cmd)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
if not os.path.isfile("./train.txt"):
print("Error: train.txt does not exist for " + p)
continue
os.system(f'sort -u -r ./{training_data_folder}/accum.txt train.txt -o temp.txt')
os.system(f'mv temp.txt ./{training_data_folder}/accum.txt')
r = random.randint(1,100000)
if not os.path.isdir(f'./{training_data_folder}/{p}'):
os.system(f'mkdir ./{training_data_folder}/{p}')
os.system(f'mv train.txt ./{training_data_folder}/{p}/{r}.txt')
os.system(f'mv history.txt ./{training_data_folder}/{p}/history.dat')
path = "/home/vagrant/infer-outs/"
train(path, pgms, k)
model(f'./{training_data_folder}/accum.txt', f'./{training_data_folder}/acc.model')
|
data_driven_infer-main
|
Table2/bin/collect.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
from multiprocessing import Process
import sklearn
from sklearn.ensemble import GradientBoostingClassifier
import sys
import random
import time
import pickle
import os
import itertools
from os.path import exists
from infer import *
random.seed()
#m0 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=1)
#m1 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=2)
#m2 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=4)
#m3 |-> GradientBoostingClassifier(max_depth=1)
#m4 |-> GradientBoostingClassifier(max_depth=2)
#m5 |-> GradientBoostingClassifier(max_depth=4)
#m6 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=1)
#m7 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=2)
#m8 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=4)
#m9 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=1, n_estimators=200)
#m10 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=2, n_estimators=200)
#m11 |-> GradientBoostingClassifier(learning_rate=0.01, max_depth=4, n_estimators=200)
#m12 |-> GradientBoostingClassifier(max_depth=1, n_estimators=200)
#m13 |-> GradientBoostingClassifier(max_depth=2, n_estimators=200)
#m14 |-> GradientBoostingClassifier(max_depth=4, n_estimators=200)
#m15 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=1, n_estimators=200)
#m16 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=2, n_estimators=200)
#m17 |-> GradientBoostingClassifier(learning_rate=1.0, max_depth=4, n_estimators=200)
classifiers = [GradientBoostingClassifier(n_estimators=ns, learning_rate=lr, max_depth=md)
for ns in [100, 200]
for lr in [0.01, 0.1, 1.0]
for md in [1, 2, 4]]
classifiers = list(zip (range(len(classifiers)), classifiers))
for idx, clf in classifiers:
print(f'm{idx} |-> {clf}')
def get_model_filename (folder, model_id):
filename = folder + "/" + str(model_id) + ".model"
return filename
def train_and_save (model_id, clf, X, y, folder):
filename = get_model_filename (folder, model_id)
if exists(filename):
print(f'Skip training {model_id} {clf}: model already exists in {filename}')
else:
start_t = time.time()
trained_clf = train_clf (clf, X, y)
end_t = time.time()
print(f'Training {model_id} {clf} finishes in {end_t-start_t} seconds', flush=True)
pickle.dump(trained_clf, open(filename, "wb"))
return
def split_list(a, n):
k, m = divmod(len(a), n)
return list(a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def train_and_save_models (classifiers, X, y, folder):
for model_id, clf in classifiers:
train_and_save (model_id, clf, X, y, folder)
def train_parallel (classifiers, X, y, folder, cpus):
clfs = classifiers[:]
random.shuffle(clfs)
splits = split_list(clfs, cpus)
i = 0
for split in splits:
i = i + 1
print(f'CPU {i} : {split}')
jobs = []
for i in range(cpus):
th = Process(target=train_and_save_models, args=(splits[i], X, y, folder))
jobs.append(th)
for i in range(cpus):
jobs[i].start()
for i in range(cpus):
jobs[i].join()
def get_pgm_name(fullpath):
basename = os.path.basename(fullpath)
assert (basename.endswith(".merged.txt"))
return basename[:-11]
def unique_sorted(values):
"Return a sorted list of the given values, without duplicates."
values = sorted(values)
if not values:
return []
consecutive_pairs = zip(values, itertools.islice(values, 1, len(values)))
result = [a for (a, b) in consecutive_pairs if a != b]
result.append(values[-1])
return result
def train_clf (clf, X, y):
X = np.array(X)
y = np.array(y)
clf.fit(X, y)
return clf
def read_file(data_file):
X, y = [], []
with open(data_file, "r") as f:
for line in f:
data = list(map(lambda x: int(x), line.split()))
fv = data[:len(data)-1]
label = data[len(data)-1]
X.append(fv)
y.append(label)
return X, y
def read_files(data_files):
X, y = [], []
for data_file in data_files:
_X, _y = read_file(data_file)
X.extend(_X)
y.extend(_y)
return X, y
def get_pos_neg(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
return pos, neg
def make_balance(X, y):
lst = list(zip(X, y))
pos = [(x,y) for (x,y) in lst if y == 1]
neg = [(x,y) for (x,y) in lst if y == 0]
assert (len(neg) >= len(pos))
random.shuffle(neg)
neg = neg[:len(pos)]
pos.extend(neg)
return zip(*pos)
def uniq(X, y):
lst = list(zip(X, y))
lst_uniq = unique_sorted(lst)
print(f'before: {len(lst)}, uniq: {len(lst_uniq)}')
return zip(*lst_uniq)
def preprocess(X, y):
X, y = uniq(X, y)
X, y = make_balance(X, y)
return X, y
def trim_data(X, y, r):
lst = list(zip(X, y))
res = []
for e in lst:
if random.random() <= r:
res.append(e)
return zip(*res)
def get_train_data(train_files, ratio):
train_X, train_y = read_files(train_files)
assert (train_X != [])
train_X, train_y = preprocess(train_X, train_y)
pos, neg = get_pos_neg (train_X, train_y)
print(f'#pos : {len(pos)}, #neg : {len(neg)}')
train_X, train_y = trim_data(train_X, train_y, ratio)
pos, neg = get_pos_neg (train_X, train_y)
print(f'#pos : {len(pos)}, #neg : {len(neg)}')
return train_X, train_y
def evaluate_clf_for_parallel(clf, valid_files, return_dict):
for valid_file in valid_files:
valid_X, valid_y = read_file(valid_file)
try:
valid_X, valid_y = make_balance(valid_X, valid_y)
except:
return_dict[valid_file] = (0, 0, 0, 0, 0)
return
predict_y = clf.predict(valid_X)
pgm = get_pgm_name(valid_file)
path = "/home/vagrant/infer-outs/"
model = f"/tmp/model_tmp_{pgm}.model"
if exists(model):
os.system(f"rm {model}")
pickle.dump(clf, open(model, "wb"))
infer_time, infer_alarms = run_infer_main(path, pgm, 5, model, True)
TP, FN, FP, TN = 0, 0, 0, 0
for (gt, predict) in zip(valid_y, predict_y):
if gt == 1:
if predict == 1:
TP = TP + 1
else:
FN = FN + 1
else:
if predict == 1:
FP = FP + 1
else:
TN = TN + 1
n_pos = sum(predict_y)
n_neg = len(predict_y) - n_pos
if TP + FN == 0:
recall = -1
else:
recall = int(TP / (TP + FN) * 100)
f1score = 0
if TP + FP == 0:
precision = -1
f1score = 0
else:
precision = int(TP / (TP + FP) * 100)
if precision + recall > 0:
f1score = int(2 * recall * precision / (precision + recall))
else:
f1score = 0
print(f' - validation on {valid_file}', flush=True)
print(f' - predict: #pos={n_pos}, #neg={n_neg}')
print(f' - TP={TP}, FN={FN}, FP={FP}, TN={TN}')
print(f' - Recall={recall}, Precision={precision}, f1score={f1score}')
print(f' - Infer alarms={infer_alarms}, Infer time={infer_time}')
return_dict[valid_file] = (TP, FN, FP, TN, infer_alarms)
def evaluate_clf_parallel(clf, valid_files, cpus):
files = valid_files[:]
random.shuffle(files)
splits = split_list(files, cpus)
print(splits)
i = 0
for split in splits:
i = i + 1
print(f'CPU {i} : {split}')
jobs = []
manager = Manager()
return_dict = manager.dict()
for i in range(cpus):
th = Process(target=evaluate_clf_for_parallel, args=(clf, splits[i], return_dict))
jobs.append(th)
for i in range(cpus):
jobs[i].start()
for i in range(cpus):
jobs[i].join()
return return_dict
def report(header, TP, FN, FP, TN, IA):
sensitivity = 0
if TP + FN != 0:
sensitivity = TP / (TP + FN)
recall = sensitivity
specitivity = 0
if TN + FP != 0:
specitivity = TN / (TN + FP)
precision = 0
if TP + FP != 0:
precision = TP / (TP + FP)
accuracy = 0
if TP + FP + FN + TN != 0:
accuracy = (TP + TN) / (TP + FP + FN + TN)
f1score = 0
if recall + precision != 0:
f1score = 2 * (recall * precision) / (recall + precision)
print()
print("**********************************")
print(header)
print("**********************************")
print(f'TP={TP}, FP={FP}, FN={FN}, TN={TN}')
print("Sensitivity/Recall = TP / (TP + FN) = %.2f" % sensitivity)
print("Specificity = TN / (TN + FP) = %.2f" % specitivity)
print("Precision = TP / (TP + FP) = %.2f" % precision)
print("Accuracy = (TP + TN) / (TP+FP+FN+TN) = %.2f" % accuracy)
print("F1-score = %.2f" % f1score)
print("Infer alarms = %d" % IA)
print("**********************************")
def load_clf(clf_id, folder):
model_file = get_model_filename (folder, clf_id)
clf = pickle.load(open(model_file, 'rb'))
return clf
def run_cv(data_files, folder_to_save_models, cpus, ratio_data, b_eval):
train_files = data_files[:int(len(data_files)*0.7)]
valid_files = [f for f in data_files if not f in train_files]
print(f'training programs ({len(train_files)}) = {train_files}')
print(f'validation programs ({len(valid_files)}) = {valid_files}')
print(f'Processing training data', flush=True)
start = time.time()
train_X, train_y = get_train_data(train_files, ratio_data)
end = time.time()
print(f'Processing training data finishes in {end-start}s', flush=True)
print(f'Training begins', flush=True)
start = time.time()
train_parallel(classifiers, train_X, train_y, folder_to_save_models, cpus)
end = time.time()
print(f'Training finished in {end-start} seconds', flush=True)
result = []
i = 0
if b_eval == False:
return result
for clf_idx, clf in classifiers:
i = i + 1
TP, FN, FP, TN, IA = 0, 0, 0, 0, 0
print()
print(f'Evaluating {clf_idx} {clf}', flush=True)
clf = load_clf(clf_idx, folder_to_save_models)
log = {}
log = evaluate_clf_parallel(clf, valid_files, cpus)
result.append(((clf_idx, clf), log))
return result
### use the result of Infer as metric
def clf_metric(TP, FN, FP, TN, IA):
return IA
def alarms_of_model (pgms, m, M):
s = 0
for p in pgms:
s = s + M[m][p]
return s
def max_alarms (p, models, M):
max = 0
for m in models:
if M[m][p] > max:
max = M[m][p]
return max
def sum_of_max_alarms (pgms, models, M):
sum = 0
for p in pgms:
sum = sum + max_alarms (p, models, M)
return sum
def best_model (pgms, models, M):
max_alarms = 0
max_model = None
for m in models:
alarms = alarms_of_model (pgms, m, M)
if max_alarms < alarms:
max_alarms = alarms
max_model = m
return max_model, max_alarms
def opt_model_comb (k, pgms, models, M):
combs = list(itertools.combinations (models, k))
opt_comb = None
opt_alarms = 0
for comb in combs:
alarms = sum_of_max_alarms (pgms, comb, M)
if opt_alarms < alarms:
opt_alarms = alarms
opt_comb = comb
return opt_comb, opt_alarms
def select_models(result, folder_to_save_models):
print()
best_clf = None
best_clf_metric = -1
dic = {}
for (clf_idx, clf),log in result:
print(f'{clf_idx}. {clf}')
TP, FN, FP, TN, IA = 0, 0, 0, 0, 0
for pgm,(tp,fn,fp,tn,ia) in log.items():
print(f' - {pgm}: TP={tp}, FN={fn}, FP={fp}, TN={tn}, IA={ia}')
TP, FN, FP, TN, IA = TP + tp, FN + fn, FP + fp, TN + tn, IA + ia
subdic = dic.get(pgm, {})
subdic[(clf_idx, clf)] = (tp, fn, fp, tn, ia)
dic[pgm] = subdic
if clf_metric(TP, FN, FP, TN, IA) > best_clf_metric:
best_clf_metric = clf_metric(TP, FN, FP, TN, IA)
best_clf = ((clf_idx, clf), TP, FN, FP, TN, IA)
print()
print("----------------------------------------------------")
print(" Best model")
print("----------------------------------------------------")
(clf_idx, clf), TP, FN, FP, TN, IA = best_clf
report(f"best clf : {clf_idx}. {clf}", TP, FN, FP, TN, IA)
os.system("mkdir best_models")
pickle.dump(clf, open("./best_models/best.model", "wb"))
print("----------------------------------------------------")
print(" Best models per program")
print("----------------------------------------------------")
best_alarms_sum = 0
alarms = {}
for pgm, subdic in dic.items():
print(pgm)
best_clf = None
best_clf_metric = -1
alarms[pgm] = {}
for (clf_idx, clf), (TP, FN, FP, TN, IA) in subdic.items():
alarms[pgm][clf_idx] = IA
if clf_metric(TP, FN, FP, TN, IA) > best_clf_metric:
best_clf_metric = clf_metric(TP, FN, FP, TN, IA)
best_clf = ((clf_idx, clf), TP, FN, FP, TN, IA)
(clf_idx, clf), TP, FN, FP, TN, IA = best_clf
basename = os.path.basename(pgm)
report(f'best clf for {basename} : {clf_idx} {clf}', TP, FN, FP, TN, IA)
pickle.dump(clf, open(f"./best_models/{basename}.model", "wb"))
best_alarms_sum = best_alarms_sum + IA
print()
print("----------------------------------------------------")
print(f'#Alarms of optimal Infer: {best_alarms_sum}')
print("----------------------------------------------------")
M = {}
pgms = []
models = []
for pgm in alarms:
pgms.append(pgm)
for (clf_id, _) in classifiers:
models.append(clf_id)
print(f'pgms : {pgms}')
print(f'models: {models}')
for m in models:
M[m] = {}
for p in pgms:
if p in alarms and m in alarms[p]:
M[m][p] = alarms[p][m]
else:
M[m][p] = 0
bm, ba = best_model (pgms, models, M)
print("-----------------------------------")
print(f'best model: {bm}, #alarms: {ba}')
print("-----------------------------------")
for k in range(1, 4):
opt_comb, opt_alarms = opt_model_comb(k, pgms, models, M)
print(f'comb size: {k}, optimal combination: {opt_comb}, #alarms: {opt_alarms}')
folder = folder_to_save_models + "/" + str(k)
os.system("mkdir " + folder)
for m in opt_comb:
mfile = get_model_filename (folder_to_save_models, m)
os.system("cp " + mfile + " " + folder)
for pgm in alarms:
for clf_idx in alarms[pgm]:
basename = get_pgm_name(pgm)
print(f'{basename} # m{clf_idx} # {alarms[pgm][clf_idx]}')
for p in pgms:
for m in models:
print(f'M[{m}][{p}] : {M[m][p]}')
if len(sys.argv) < 3:
print("Error: insufficient arguments")
exit(1)
folder_to_save_models = sys.argv[1]
ratio_of_data_to_use = float(sys.argv[2])
num_of_cpus = int(sys.argv[3])
filename = sys.argv[4]
f = open(filename, "r")
pgms_str = f.read().replace('\n', ' ')
pgms = pgms_str.split()[:]
print(pgms)
data_files = []
for p in pgms:
name="./merged_training_data/" + p + ".merged.txt"
if exists(name):
data_files.append(name)
if not exists(folder_to_save_models):
os.system(f"mkdir {folder_to_save_models}")
b_eval = True
print(f'save models in {folder_to_save_models}, using {num_of_cpus} cpus')
result = run_cv(data_files, folder_to_save_models, num_of_cpus, ratio_of_data_to_use, b_eval)
if b_eval:
select_models(result, folder_to_save_models)
|
data_driven_infer-main
|
Table2/bin/learn_classifier.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import infer
if len(sys.argv) < 6:
print("usage:")
print("python run_ml_infer.py bin/programs_test.txt 1 5 1 models")
exit(1)
filename = sys.argv[1]
pre_k = int(sys.argv[2])
main_k = int(sys.argv[3])
ncpus = int(sys.argv[4])
models = []
for model in sys.argv[5:]:
models.append(model)
path = "/home/vagrant/infer-outs/"
if os.path.exists(filename):
txtfile = open(filename, "r")
pgms = txtfile.read().splitlines()
else:
pgms = [filename]
print(f"prek = {pre_k}, maink = {main_k}, models = {models}", flush=True)
t, pret, a = infer.run_dd_infer_parallel(path, pgms, pre_k, main_k, models, ncpus, True)
print(f"k: {pre_k} {main_k}, alarms: {a}, pre_time: {pret}, main_time: {t}, total_time: {t+pret}, with model: {models}", flush=True)
|
data_driven_infer-main
|
Table2/bin/run_ml_infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys, os
import time
import re
import random
from multiprocessing import Process, Queue, Manager
def split_list(a, n):
k, m = divmod(len(a), n)
return list(a[i*k+min(i, m):(i+1)*k+min(i+1, m)] for i in range(n))
def run_random_infer(path, p, k, quiet):
total_time, total_alarms = 0, 0
try:
infer_out = path + p
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + p)
exit(1)
else:
start_t = time.time()
if quiet:
verbose_opt = " 2>&1 > /dev/null"
os.system("infer analyze -q -j 1 --pulse-random-mode --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " --pulse-cg-load " + "/vagrant/cgs/" + p + " " + verbose_opt)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
def run_infer(path, p, k, model, quiet, limit_fn, threads=1):
total_time, total_alarms = 0, 0
try:
infer_out = os.path.join(path, p)
if not os.path.isdir(infer_out):
print(" * Error: infer-out does not exist for " + infer_out)
exit(1)
else:
start_t = time.time()
use_model = ""
if model != None:
use_model = "--pulse-join-select " + model
limit_functions = ""
if limit_fn != 0:
limit_functions = " --pulse-limit-fn " + str(limit_fn) + " "
verbose_opt = ""
if quiet:
verbose_opt = " 2>&1 > /dev/null"
print(p, model)
cmd = f"infer analyze -q -j {threads} --pulse-only --pulse-max-disjuncts " + str(k) + " -o " + infer_out + " " + use_model + limit_functions + verbose_opt
print(cmd, file=sys.stderr)
os.system(cmd)
end_t = time.time()
elapsed_time = end_t - start_t
report = path + p + "/report.txt"
f = open (report, "r")
text = f.read().replace('\n', '')
issues = re.findall('Found ([0-9]+) issue', text)
if len(issues) == 0:
issues_count = 0
else:
issues_count = int(issues[0])
total_time = total_time + elapsed_time
total_alarms = total_alarms + issues_count
return total_time, total_alarms
except:
print(f"Skipping {p} due to unkonwn exceptions")
return 0, 0
def run_infer_main(path, p, k, model, quiet, threads=1):
return run_infer(path, p, k, model, quiet, 0, threads=threads)
def run_infer_pre(path, p, k, model, quiet, limit_fn, threads=1):
return run_infer(path, p, k, model, quiet, limit_fn, threads=threads)
def work(path, pgms, k, model, quiet, return_dict):
for pgm in pgms:
t, a = run_infer_main(path, pgm, k, model, quiet)
return_dict[pgm] = (a, t)
def run_infer_parallel(path, pgms, k, model, ncpus, quiet):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work, args=(path, splits[i], k, model, quiet, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, a_sum = 0, 0
for pgm in return_dict:
a, t = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
for pgm in pgms_orig:
a, t = return_dict[pgm]
print(f'** {pgm}\t\t{a}\t\t{t}', flush=True)
return t_sum, a_sum
def pre_analysis(path, pgm, pre_k, models, threads=1):
opt_model = None
opt_alarms = -1
pt = 0
if len(models) == 1:
return models[0], 0, 0
for model in models:
t, a = run_infer_pre(path, pgm, pre_k, model, True, 0, threads=threads)
if opt_alarms < a:
opt_alarms = a
opt_model = model
pt = pt + t
return opt_model, opt_alarms, pt
def work_dd(path, pgms, pre_k, main_k, models, quiet, threads, return_dict):
for pgm in pgms:
model, alarms, pretime = pre_analysis(path, pgm, pre_k, models, threads=threads)
t, a = run_infer_main(path, pgm, main_k, model, quiet, threads=threads)
infer_out = os.path.join(path, pgm)
os.system(f"cp {infer_out}/report.json ./data/{pgm}_3_{pre_k}_{main_k}.json")
return_dict[pgm] = (a, t, pretime)
def run_dd_infer_parallel(path, pgms, pre_k, main_k, models, ncpus, quiet, threads=1):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work_dd, args=(path, splits[i], pre_k, main_k, models, quiet, threads, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, pret_sum, a_sum = 0, 0, 0
for pgm in return_dict:
a, t, pret = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
pret_sum = pret_sum + pret
for pgm in pgms_orig:
a, t, pret = return_dict[pgm]
print(f'** {pgm}\t{a}\t{t}\t{pret}\t{pret+t}', flush=True)
return t_sum, pret_sum, a_sum
def work_random(path, pgms, k, quiet, return_dict):
for pgm in pgms:
t, a = run_random_infer(path, pgm, k, quiet)
return_dict[pgm] = (a, t)
def run_random_infer_parallel(path, pgms, k, ncpus, quiet):
pgms_orig = pgms[:]
random.shuffle(pgms)
splits = split_list(pgms, ncpus)
for i in range(ncpus):
print(f'cpu {i}: {splits[i]}', flush=True)
manager = Manager()
return_dict = manager.dict()
jobs = []
for i in range(ncpus):
th = Process(target=work_random, args=(path, splits[i], k, quiet, return_dict))
jobs.append(th)
th.start()
for job in jobs:
job.join()
t_sum, a_sum = 0, 0
for pgm in return_dict:
a, t = return_dict[pgm]
t_sum = t_sum + t
a_sum = a_sum + a
for pgm in pgms_orig:
a, t = return_dict[pgm]
print(f'** {pgm}\t\t{a}\t\t{t}', flush=True)
return t_sum, a_sum
|
data_driven_infer-main
|
Table2/bin/infer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified from github.com/openai/CLIP
from collections import OrderedDict
import numpy as np
import timm
import torch
from torch import nn
import torch.nn.functional as F
import torch.distributed as dist
import losses
import utils
import vit
def drop_path(x, drop_prob: float = 0., training: bool = False, scale_by_keep: bool = True):
if drop_prob == 0. or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
def __init__(self, drop_prob: float = 0., scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f'drop_prob={round(self.drop_prob,3):0.3f}'
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
ret = super().forward(x.type(torch.float32))
return ret.type(orig_type)
class QuickGELU(nn.Module):
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class ResidualAttentionBlock(nn.Module):
def __init__(self, d_model: int, n_head: int, attn_mask: torch.Tensor = None, dropout_prob=0.0, drop_path_prob=0.0):
super().__init__()
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ln_1 = LayerNorm(d_model)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, d_model * 4)),
("gelu", QuickGELU()),
("c_proj", nn.Linear(d_model * 4, d_model))
]))
self.ln_2 = LayerNorm(d_model)
self.attn_mask = attn_mask
self.dropout = nn.Dropout(p=dropout_prob, inplace=True)
self.drop_path = DropPath(drop_prob=drop_path_prob)
def attention(self, x: torch.Tensor):
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) if self.attn_mask is not None else None
return self.attn(x, x, x, need_weights=False, attn_mask=self.attn_mask)[0]
def forward(self, inp):
x, mode = inp
if mode == 'local':
self.dropout(x)
x = x + self.drop_path(self.attention(self.ln_1(x)))
x = x + self.drop_path(self.mlp(self.ln_2(x)))
else:
x = x + self.attention(self.ln_1(x))
x = x + self.mlp(self.ln_2(x))
return (x, mode)
class Transformer(nn.Module):
def __init__(self, width: int, layers: int, heads: int, attn_mask: torch.Tensor = None, dropout_prob=0.0, drop_path_prob=0.0):
super().__init__()
self.width = width
self.layers = layers
self.resblocks = nn.Sequential(*[
ResidualAttentionBlock(
width,
heads,
attn_mask,
dropout_prob,
drop_path_prob
) for _ in range(layers)
])
def forward(self, x: torch.Tensor, mode='global'):
return self.resblocks((x, mode))[0]
class CLIP(nn.Module):
def __init__(
self,
embed_dim: int,
# vision
vision_width: int,
vision_model: nn.Module,
# text
context_length: int,
vocab_size: int,
transformer_width: int,
transformer_heads: int,
transformer_layers: int = 12,
detach_proj: bool = False,
no_share_token=False,
clip_proj_type='linear',
clip_hidden_dim=4096,
global_text_mask_prob=1.0,
local_text_mask_prob=0.5,
text_dropout_prob=0.0,
text_drop_path_prob=0.0,
**kwargs,
):
super().__init__()
self.context_length = context_length
self.vision_width = vision_width
self.transformer_width = transformer_width
self.embed_dim = embed_dim
self.detach_proj = detach_proj
self.clip_proj_type = clip_proj_type
self.visual = vision_model
self.no_share_token = no_share_token
self.transformer = Transformer(
width=transformer_width,
layers=transformer_layers,
heads=transformer_heads,
attn_mask=self.build_attention_mask(),
dropout_prob=text_dropout_prob,
drop_path_prob=text_drop_path_prob,
)
self.vocab_size = vocab_size
self.local_text_mask_prob = local_text_mask_prob
self.global_text_mask_prob = global_text_mask_prob
self.token_embedding = nn.Embedding(vocab_size, transformer_width)
self.positional_embedding = nn.Parameter(torch.empty(self.context_length, transformer_width))
self.ln_final = LayerNorm(transformer_width)
if clip_proj_type == 'mlp':
self.image_projector = self._build_mlp(
in_dim=self.vision_width,
mlp_dim=clip_hidden_dim,
out_dim=embed_dim
)
self.text_projector = self._build_mlp(
in_dim=self.transformer_width,
mlp_dim=clip_hidden_dim,
out_dim=embed_dim
)
else:
self.image_projector = nn.Linear(self.vision_width, embed_dim, bias=False)
self.text_projector = nn.Linear(self.transformer_width, embed_dim, bias=False)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.initialize_parameters()
def _build_mlp(self, in_dim, mlp_dim, out_dim, num_layers=3):
mlp = [
("layer1", nn.Linear(in_dim, mlp_dim)),
("bn1", utils.infer_batchnorm_class()(mlp_dim)),
("relu1", nn.ReLU(inplace=True))
]
i = 1
for i in range(2, num_layers):
mlp.extend([
(f"layer{i}", nn.Linear(mlp_dim, mlp_dim)),
(f"bn{i}", utils.infer_batchnorm_class()(mlp_dim)),
(f"relu{i}", nn.ReLU(inplace=True))
])
mlp.append((f"layer{i+1}", nn.Linear(mlp_dim, out_dim)))
return nn.Sequential(OrderedDict(mlp))
@torch.no_grad()
def initialize_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.clip_proj_type == 'linear':
nn.init.normal_(self.image_projector.weight, std=self.vision_width ** -0.5)
nn.init.normal_(self.text_projector.weight, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the vision tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def encode_image(self, image):
feats = self.visual(image)
z = self.image_projector(feats)
return {'feats_image': feats, 'z_image': z}
def encode_text(self, text, mode='global', forward_proj=True):
range_index = torch.arange(text.size(0))
eot_index = text.argmax(dim=-1)
x = self.token_embedding(text) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, mode=mode)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
feats = x[range_index, eot_index]
out = {'feats_text': feats}
if forward_proj:
out['z_text'] = self.text_projector(feats.detach() if self.detach_proj else feats)
return out
def forward(self, image, text):
out_image = self.encode_image(image)
out_text = self.encode_text(text)
return {**out_image, **out_text, 'logit_scale': self.logit_scale.exp()}
@torch.no_grad()
def predict_zeroshot(self, image_feats, text_feats):
z_image = image_feats['z_image']
z_text = text_feats['z_text']
z_image = z_image / z_image.norm(dim=-1, keepdim=True)
z_text = z_text / z_text.norm(dim=-1, keepdim=True)
similarity = z_image @ z_text.t()
return {'z_sim': similarity}
def encode_image_val(self, image):
out = self.encode_image(image)
return out
def encode_text_val(self, text):
out = self.encode_text(text)
return out
class CL2L(CLIP):
def __init__(self, separate_proj=False, cl2l_txt_proj_type='mlp', cl2l_img_proj_type='mlp', **kwargs):
super().__init__(separate_proj=False, **kwargs)
self.separate_proj = separate_proj
if separate_proj:
self.l2l_logit_scale = nn.Parameter(
torch.ones([]) * np.log(1 / 0.1))
if cl2l_img_proj_type == 'mlp':
self.l2l_image_projector = self._build_mlp(
in_dim=self.vision_width,
mlp_dim=4096,
out_dim=self.embed_dim
)
else:
self.l2l_image_projector = nn.Linear(self.vision_width, self.embed_dim, bias=False)
if cl2l_txt_proj_type == 'mlp':
self.l2l_text_projector = self._build_mlp(
in_dim=self.transformer_width,
mlp_dim=4096,
out_dim=self.embed_dim
)
else:
self.l2l_text_projector = nn.Linear(self.transformer_width, self.embed_dim, bias=False)
else:
self.l2l_image_projector = self.image_projector
self.l2l_text_projector = self.text_projector
def encode_image_val(self, image):
out = self.encode_image(image)
out['h_image'] = self.l2l_image_projector(out['feats_image'])
return out
def encode_text_val(self, text):
out = super().encode_text(text)
out['h_text'] = self.l2l_text_projector(out['feats_text'])
return out
def forward(self, image_global, text, *image_local):
text_global, *text_local = text.unbind(1)
out = super().forward(image_global, text_global)
# forward backbone
out['feats_image_local'] = [self.visual(l) for l in image_local]
out['feats_text_local'] = [
self.encode_text(t, mode='local', forward_proj=False)['feats_text']
for t in text_local
]
# forward projector
out['h_image_local'] = [self.l2l_image_projector(l) for l in out['feats_image_local']]
out['h_text_local'] = [self.l2l_text_projector(l) for l in out['feats_text_local']]
# fix names
out['z_image_global'] = out.pop('z_image')
out['z_text_global'] = out.pop('z_text')
out['h_logit_scale'] = self.l2l_logit_scale.exp() if self.separate_proj else out['logit_scale']
return out
@torch.no_grad()
def predict_zeroshot(self, image_feats, text_feats):
outs = super().predict_zeroshot(image_feats, text_feats)
z_image = image_feats['h_image']
z_text = text_feats['h_text']
z_image = z_image / z_image.norm(dim=-1, keepdim=True)
z_text = z_text / z_text.norm(dim=-1, keepdim=True)
similarity = z_image @ z_text.t()
return {**outs, 'h_sim': similarity}
class BARLIP(CL2L):
def __init__(self, barlip_proj_dim, barlip_hidden_dim, **kwargs):
super().__init__(**kwargs)
self.barlip_image_projector_global = nn.Sequential(
nn.Linear(kwargs['vision_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
self.barlip_text_projector_global = nn.Sequential(
nn.Linear(kwargs['transformer_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.barlip_image_projector_local = nn.Sequential(
nn.Linear(kwargs['vision_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
self.barlip_text_projector_local = nn.Sequential(
nn.Linear(kwargs['transformer_width'], barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_hidden_dim),
utils.infer_batchnorm_class()(barlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(barlip_hidden_dim, barlip_proj_dim),
utils.infer_batchnorm_class()(barlip_proj_dim)
)
else:
self.barlip_image_projector_local = self.barlip_image_projector_global
self.barlip_text_projector_local = self.barlip_text_projector_global
def forward(self, image, text, *image_local):
out = super().forward(image, text, *image_local)
out['v_image'] = self.barlip_image_projector_global(out['feats_image'])
out['v_text'] = self.barlip_text_projector_global(out['feats_text'])
out['v_image_local'] = [self.barlip_image_projector_local(l) for l in out['feats_image_local']]
out['v_text_local'] = [self.barlip_text_projector_local(l) for l in out['feats_text_local']]
return out
class SIAMLIP(CL2L):
def __init__(self, siamlip_proj_dim, siamlip_hidden_dim, siamlip_no_last_bn, **kwargs):
super().__init__(**kwargs)
self.siamlip_image_projector_global = nn.Sequential(
nn.Linear(kwargs['vision_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
self.siamlip_text_projector_global = nn.Sequential(
nn.Linear(kwargs['transformer_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.siamlip_image_projector_local = nn.Sequential(
nn.Linear(kwargs['vision_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
self.siamlip_text_projector_local = nn.Sequential(
nn.Linear(kwargs['transformer_width'], siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
# nn.Linear(siamlip_hidden_dim, siamlip_hidden_dim, bias=False),
# utils.infer_batchnorm_class()(siamlip_hidden_dim),
# nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim, bias=False),
)
else:
self.siamlip_image_projector_local = self.siamlip_image_projector_global
self.siamlip_text_projector_local = self.siamlip_text_projector_global
if not siamlip_no_last_bn:
self.siamlip_image_projector_global = nn.Sequential(
self.siamlip_image_projector_global,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_text_projector_global = nn.Sequential(
self.siamlip_text_projector_global,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_image_projector_local = nn.Sequential(
self.siamlip_image_projector_local,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
self.siamlip_text_projector_local = nn.Sequential(
self.siamlip_text_projector_local,
utils.infer_batchnorm_class()(siamlip_proj_dim, affine=False)
)
# predictors
self.image_text_predictor_global = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
self.text_image_predictor_global = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
if 'separate_proj_child' in kwargs and kwargs['separate_proj_child']:
self.image_text_predictor_local = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
self.text_image_predictor_local = nn.Sequential(
nn.Linear(siamlip_proj_dim, siamlip_hidden_dim, bias=False),
utils.infer_batchnorm_class()(siamlip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(siamlip_hidden_dim, siamlip_proj_dim),
)
else:
self.image_text_predictor_local = self.image_text_predictor_global
self.text_image_predictor_local = self.text_image_predictor_global
def forward(self, image, text, *image_local):
out = super().forward(image, text, *image_local)
out['v_image'] = self.siamlip_image_projector_global(out['feats_image'])
out['p_image'] = self.image_text_predictor_global(out['v_image'])
out['v_text'] = self.siamlip_text_projector_global(out['feats_text'])
out['p_text'] = self.text_image_predictor_global(out['v_text'])
out['v_image_local'] = [self.siamlip_image_projector_local(l) for l in out['feats_image_local']]
out['p_image_local'] = [self.image_text_predictor_local(l) for l in out['v_image_local']]
out['v_text_local'] = [self.siamlip_text_projector_local(l) for l in out['feats_text_local']]
out['p_text_local'] = [self.text_image_predictor_local(l) for l in out['v_text_local']]
return out
class SWALIPV1(CLIP):
def __init__(
self,
swalip_proj_dim,
swalip_hidden_dim,
swalip_num_proto,
swalip_no_shared_proto,
swalip_temperature,
swalip_learn_temperature,
**kwargs
):
super().__init__(**kwargs)
self.swalip_image_projector = nn.Sequential(
nn.Linear(kwargs['vision_width'], swalip_hidden_dim),
utils.infer_batchnorm_class()(swalip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(swalip_hidden_dim, swalip_proj_dim)
)
self.swalip_text_projector = nn.Sequential(
nn.Linear(kwargs['transformer_width'], swalip_hidden_dim),
utils.infer_batchnorm_class()(swalip_hidden_dim),
nn.ReLU(inplace=True),
nn.Linear(swalip_hidden_dim, swalip_proj_dim)
)
# prototypes
if swalip_no_shared_proto:
self.image_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
self.text_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
else:
self.image_prototypes = self.create_prototypes(swalip_proj_dim, swalip_num_proto)
self.text_prototypes = self.image_prototypes
self.swalip_logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / swalip_temperature))
self.swalip_logit_scale.requires_grad = swalip_learn_temperature
def create_prototypes(self, swalip_proj_dim, swalip_num_proto):
prototypes = nn.utils.weight_norm(nn.Linear(swalip_proj_dim, swalip_num_proto, bias=False))
prototypes.weight_g.data.fill_(1)
prototypes.weight_g.requires_grad = False
return prototypes
def encode_image(self, image):
out = super().encode_image(image)
h_image = self.swalip_image_projector(out['feats_image'])
p_image = self.image_prototypes(F.normalize(h_image))
return {**out, 'h_image': h_image, 'p_image': p_image}
def encode_text(self, text):
out = super().encode_text(text)
h_text = self.swalip_text_projector(out['feats_text'])
p_text = self.text_prototypes(F.normalize(h_text))
return {**out, 'h_text': h_text, 'p_text': p_text}
def forward(self, image, text):
return {
**super().forward(image, text),
'swalip_logit_scale': self.swalip_logit_scale.exp(),
}
def get_model(args, **kwargs):
arch, model_name = args.model.rsplit('_', 1)
model_class = {
'BARLIP': BARLIP,
'SWALIP': CL2L,
'SWALIPV1': SWALIPV1,
'SIAMLIP': SIAMLIP,
'CLIP': CLIP,
'CL2L': CL2L,
}[model_name]
model = globals()[arch](model_class, **vars(args), **kwargs)
return model
def get_loss(args):
if args.model.startswith('CLIP'):
if args.model.endswith('SWALIPV1'):
return losses.SwALIPV1Loss(
sk_iters=args.sk_iters,
target_epsilon=args.target_epsilon,
swalip_weight=args.swalip_weight,
temperature=args.swalip_temperature,
)
else:
return losses.CLIPLoss()
if args.model.startswith('CL2L'):
if args.model.endswith('BARLIP'):
return losses.BarLIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
lamb=args.barlip_lamb,
scale_loss=args.barlip_scale_loss,
)
elif args.model.endswith('SIAMLIP'):
return losses.SiamLIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
)
elif args.model.endswith('SWALIP'):
return losses.SwALIPLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing,
sk_iters=args.sk_iters,
target_epsilon=args.target_epsilon,
swalip_weight=args.swalip_weight,
)
else:
return losses.CL2LLoss(
loss_avg_or_sum=args.loss_avg_or_sum,
label_smoothing=args.label_smoothing
)
def get_metric_names(model):
parent_model, _, child_model = model.split('_')
parent_metric_names = {
'CL2L': ['loss', 'clip_loss', 'clip_loss_image', 'clip_loss_text', 'clip_loss_image_global', 'clip_loss_text_global', 'clip_loss_image_local', 'clip_loss_text_local', 'clip_acc', 'clip_acc_image_local', 'clip_acc_text_local', 'clip_acc_image_global', 'clip_acc_text_global', 'h_logit_scale'],
'CLIP': ['loss', 'clip_loss', 'clip_acc'],
}[parent_model]
child_metric_names = {
'BARLIP': ['barlip_loss'],
'SWALIP': ['swalip_loss'],
'SIAMLIP': ['siamlip_loss'],
'CLIP': ['clip_loss', 'clip_acc'],
'CL2L': ['clip_loss', 'clip_acc'],
}[child_model]
return sorted(set(parent_metric_names + child_metric_names))
@timm.models.registry.register_model
def vit_small_mocov3_patch16_224(**kwargs):
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_224', **model_kwargs)
return model
@timm.models.registry.register_model
def vit_tiny_patch16_224(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16)
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = vit._create_vision_transformer('vit_tiny_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_tiny_patch16_384(pretrained=False, **kwargs):
""" ViT-Tiny (Vit-Ti/16) @ 384x384.
"""
model_kwargs = dict(patch_size=16, embed_dim=192, depth=12, num_heads=3, **kwargs)
model = vit._create_vision_transformer('vit_tiny_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch32_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32)
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch32_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/32) at 384x384.
"""
model_kwargs = dict(patch_size=32, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch16_224(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_small_patch16_384(pretrained=False, **kwargs):
""" ViT-Small (ViT-S/16)
NOTE I've replaced my previous 'small' model definition and weights with the small variant from the DeiT paper
"""
model_kwargs = dict(patch_size=16, embed_dim=384, depth=12, num_heads=6, **kwargs)
model = vit._create_vision_transformer('vit_small_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch32_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch32_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch16_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch16_384(pretrained=False, **kwargs):
""" ViT-Base model (ViT-B/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_base_patch8_224(pretrained=False, **kwargs):
""" ViT-Base (ViT-B/8) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=8, embed_dim=768, depth=12, num_heads=12, **kwargs)
model = vit._create_vision_transformer('vit_base_patch8_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch32_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929). No pretrained weights.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch32_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch32_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/32) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=32, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch32_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch16_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 224x224, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch16_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch16_384(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/16) from original paper (https://arxiv.org/abs/2010.11929).
ImageNet-1k weights fine-tuned from in21k @ 384x384, source https://github.com/google-research/vision_transformer.
"""
model_kwargs = dict(patch_size=16, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch16_384', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_large_patch14_224(pretrained=False, **kwargs):
""" ViT-Large model (ViT-L/14)
"""
model_kwargs = dict(patch_size=14, embed_dim=1024, depth=24, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_large_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_huge_patch14_224(pretrained=False, **kwargs):
""" ViT-Huge model (ViT-H/14) from original paper (https://arxiv.org/abs/2010.11929).
"""
model_kwargs = dict(patch_size=14, embed_dim=1280, depth=32, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_huge_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_giant_patch14_224(pretrained=False, **kwargs):
""" ViT-Giant (little-g) model (ViT-g/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1408, mlp_ratio=48/11, depth=40, num_heads=16, **kwargs)
model = vit._create_vision_transformer('vit_giant_patch14_224', pretrained=pretrained, **model_kwargs)
return model
@timm.models.registry.register_model
def vit_gigantic_patch14_224(pretrained=False, **kwargs):
""" ViT-Gigantic (big-G) model (ViT-G/14) from `Scaling Vision Transformers` - https://arxiv.org/abs/2106.04560
"""
model_kwargs = dict(patch_size=14, embed_dim=1664, mlp_ratio=64/13, depth=48, num_heads=16, **kwargs)
model = vit.vit._create_vision_transformer('vit_gigantic_patch14_224', pretrained=pretrained, **model_kwargs)
return model
def CL2L_CNEXTT(model_class, **kwargs):
vision_model = timm.create_model('convnext_tiny', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=vision_model.num_features, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS16MOCO(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_mocov3_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS16(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITS32(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_small_patch32_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=384, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_R50(model_class, **kwargs):
vision_model = timm.create_model('resnet50', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=2048, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_R50(model_class, **kwargs):
vision_model = timm.create_model('resnet50', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=2048, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_CNEXTS(model_class, **kwargs):
vision_model = timm.create_model('convnext_small', num_classes=0)
if dist.is_available() and dist.is_initialized():
vision_model = nn.SyncBatchNorm.convert_sync_batchnorm(vision_model)
model = model_class(vision_width=vision_model.num_features, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_VITB16(model_class, attn_layer, **kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITB32(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_base_patch32_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITB16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_base_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=768, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CLIP_VITL16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
def CL2L_VITL16(model_class, attn_layer, embed_dim=512, **kwargs):
vision_model = timm.create_model('vit_large_patch16_224', num_classes=0, attn_layer=attn_layer)
model = model_class(embed_dim=embed_dim, vision_width=1024, vision_model=vision_model, context_length=77, vocab_size=49408,
transformer_width=512, transformer_heads=8, **kwargs)
return model
|
clip-rocket-main
|
models.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""
A script to run multinode training with submitit.
"""
import argparse
import os
import uuid
from pathlib import Path
import main as main_slip
import submitit
def parse_args():
parser = main_slip.get_args_parser()
parser = argparse.ArgumentParser("Submitit for CL2L pre-training", parents=[parser])
parser.add_argument("--ngpus", default=8, type=int, help="Number of gpus to request on each node")
parser.add_argument("--nodes", default=8, type=int, help="Number of nodes to request")
parser.add_argument("--timeout", default=2800, type=int, help="Duration of the job")
parser.add_argument("--job_dir", default="", type=str, help="Job dir. Leave empty for automatic.")
parser.add_argument("--partition", default="", type=str, help="Partition where to submit")
parser.add_argument("--use_volta32", action='store_true', help="Big models? Use this")
parser.add_argument('--comment', default="", type=str,
help='Comment to pass to scheduler, e.g. priority message')
return parser.parse_args()
def get_shared_folder() -> Path:
user = os.getenv("USER")
if Path("/checkpoint/").is_dir():
p = Path(f"/checkpoint/{user}/experiments/cl2l")
p.mkdir(exist_ok=True)
return p
raise RuntimeError("No shared folder available")
def get_init_file():
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder()), exist_ok=True)
init_file = get_shared_folder() / f"{uuid.uuid4().hex}_init"
if init_file.exists():
os.remove(str(init_file))
return init_file
class Trainer(object):
def __init__(self, args):
self.args = args
def __call__(self):
import main as main_slip
self._setup_gpu_args()
main_slip.main(self.args)
def checkpoint(self):
import os
import submitit
self.args.dist_url = get_init_file().as_uri()
print("Requeuing ", self.args)
empty_trainer = type(self)(self.args)
return submitit.helpers.DelayedSubmission(empty_trainer)
def _setup_gpu_args(self):
import submitit
from pathlib import Path
job_env = submitit.JobEnvironment()
self.args.output_dir = Path(str(self.args.output_dir).replace("%j", str(job_env.job_id)))
self.args.gpu = job_env.local_rank
self.args.rank = job_env.global_rank
self.args.world_size = job_env.num_tasks
print(f"Process group: {job_env.num_tasks} tasks, rank: {job_env.global_rank}")
def main():
args = parse_args()
if args.job_dir == "":
args.job_dir = get_shared_folder() / "%j"
# Note that the folder will depend on the job_id, to easily track experiments
executor = submitit.AutoExecutor(folder=args.job_dir, slurm_max_num_timeout=30)
num_gpus_per_node = args.ngpus
nodes = args.nodes
timeout_min = args.timeout
partition = args.partition
kwargs = {}
if args.use_volta32:
kwargs['slurm_constraint'] = 'volta32gb'
if args.comment:
kwargs['slurm_comment'] = args.comment
executor.update_parameters(
mem_gb=40 * num_gpus_per_node,
gpus_per_node=num_gpus_per_node,
tasks_per_node=num_gpus_per_node, # one task per GPU
cpus_per_task=10,
nodes=nodes,
timeout_min=timeout_min, # max is 60 * 72
# Below are cluster dependent parameters
slurm_partition=partition,
slurm_signal_delay_s=120,
**kwargs
)
executor.update_parameters(name="slip")
args.dist_url = get_init_file().as_uri()
args.output_dir = args.job_dir
trainer = Trainer(args)
job = executor.submit(trainer)
print("Submitted job_id:", job.job_id)
if __name__ == "__main__":
main()
|
clip-rocket-main
|
run_with_submitit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from collections import defaultdict
import json
import os
import pickle
import zipfile
import numpy as np
from PIL import Image, ImageFile
import torch
from torchvision import transforms
from torchvision import datasets as t_datasets
from torchvision.datasets import ImageFolder
import utils
ImageFile.LOAD_TRUNCATED_IMAGES = True
def pil_loader(path):
# open path as file to avoid ResourceWarning (https://github.com/python-pillow/Pillow/issues/835)
with open(path, 'rb') as f:
img = Image.open(f)
return img.convert('RGB')
def yfcc_loader(root, index):
index = format(index, "0>8d")
repo = index[:2]
z = index[2: 5]
file_img = index[5:] + '.jpg'
path_zip = os.path.join(root, 'images', repo, z) + '.zip'
with zipfile.ZipFile(path_zip, 'r') as myzip:
img = Image.open(myzip.open(file_img))
return img.convert('RGB')
class ImageCaptionDatasetBase(torch.utils.data.Dataset):
def __init__(self, dataset, root, metadata, caption_sampling='single'):
self.dataset = dataset
self.root = root
self.caption_sampling = caption_sampling
if self.dataset == 'yfcc15m':
with open(metadata, 'rb') as f:
self.samples = pickle.load(f)
elif self.dataset == 'coco':
samples = defaultdict(list)
with open(metadata) as f:
annotations = json.load(f)['annotations']
for ann in annotations:
samples[ann['image_id']].append(ann['caption'])
self.samples = [(k, v) for k, v in samples.items()]
elif self.dataset == 'cc12m' or self.dataset == 'cc3m':
self.samples = np.load(metadata, allow_pickle=True)
elif self.dataset == 'merged_opendata':
self.samples = []
self.roots = []
for md, r in zip(metadata.split("---"), root.split("---")):
self.samples.append(np.load(md, allow_pickle=True))
self.roots.append(r)
elif self.dataset == 'redcaps':
with open(metadata) as f:
annotations = json.load(f)
self.samples = [(ann['image_id'], ann['subreddit'], ann['caption']) for ann in annotations]
def get_raw_item(self, i):
if self.dataset == 'yfcc15m':
index, title, desc = self.samples[i]
caption = [c for c in [title, desc] if c != '']
caption = [''] if len(caption) == 0 else caption
caption = tuple(caption if self.caption_sampling == 'multi' else [np.random.choice(caption)])
img = yfcc_loader(self.root, index)
elif self.dataset == 'coco':
index, captions = self.samples[i]
path = os.path.join(self.root, 'train2017', '{:012d}.jpg'.format(index))
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'cc3m':
ann = self.samples[i]
filename, captions = ann['image_id'], ann['captions']
path = os.path.join(self.root, str(filename))
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'cc12m':
ann = self.samples[i]
filename, captions = ann['image_name'], ann['captions']
path = os.path.join(self.root, filename)
img = pil_loader(path)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
elif self.dataset == 'merged_opendata':
datasets = ['cc3m', 'cc12m', 'yfcc15m']
cum_lens = np.array([len(s) for s in self.samples]).cumsum()
d_idx = [idx for idx, l in enumerate(cum_lens) if i < l][0]
offset = cum_lens[d_idx - 1] if d_idx > 0 else 0
samples_list = self.samples
self.samples = self.samples[d_idx]
self.dataset = datasets[d_idx]
self.root = self.roots[d_idx]
img, caption = self.get_raw_item(i - offset)
self.dataset = 'merged_opendata'
self.samples = samples_list
elif self.dataset == 'redcaps':
image_id, subreddit, caption = self.samples[i]
path = os.path.join(self.root, subreddit, f"{image_id}.jpg")
img = pil_loader(path)
elif 'pmd' in self.dataset:
img, captions = self.pmd[i]
# if isinstance(captions, str):
# caption = captions
assert isinstance(captions, list)
caption = tuple(captions if self.caption_sampling == 'multi' else [np.random.choice(captions)])
return img, caption
def __getitem__(self, i):
raise NotImplementedError
def __len__(self):
if 'pmd' in self.dataset:
return len(self.pmd)
elif 'merged_opendata' in self.dataset:
return sum([len(s) for s in self.samples])
else:
return len(self.samples)
class ImageCaptionDatasetCLIP(ImageCaptionDatasetBase):
def __init__(self, dataset, root, metadata, transform=None, tokenizer=None):
super().__init__(dataset, root, metadata)
self.transform = transform
self.tokenizer = tokenizer
def __getitem__(self, i):
img, caption = self.get_raw_item(i)
# apply transformation
if self.transform is not None:
image = self.transform(img)
# tokenize caption
if self.tokenizer is not None:
caption = self.tokenizer(caption)
return image, caption
class ImageCaptionDatasetCL2L(ImageCaptionDatasetBase):
def __init__(
self,
dataset,
root,
metadata,
transform,
augment,
num_augs=2,
tokenizer=None,
augs_only=False,
caption_sampling='single'
):
super().__init__(dataset, root, metadata, caption_sampling=caption_sampling)
self.transform = transform
self.num_augs = num_augs
self.augment = augment if isinstance(augment, list) else [augment] * num_augs
self.tokenizer = tokenizer
self.augs_only = augs_only
def __getitem__(self, i):
img, caption = self.get_raw_item(i)
augs = [self.augment[i](img) for i in range(self.num_augs)]
if self.augs_only:
return augs
image = self.transform(img)
# tokenize caption
if self.tokenizer is not None:
caption = self.tokenizer(caption)
return image, caption, *augs
class FileListDataset(torch.utils.data.Dataset):
def __init__(self, images, labels, transform=None, target_transform=None):
self.transform = transform
self.target_transform = target_transform
self.images = np.load(images)
self.labels = np.load(labels)
def __getitem__(self, index):
img = pil_loader(self.images[index])
target = self.labels[index]
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.images)
def get_downstream_dataset(catalog, name, is_train, transform):
entry = catalog[name]
root = entry['path']
if entry['type'] == 'imagefolder':
dataset = t_datasets.ImageFolder(os.path.join(root, entry['train'] if is_train else entry['test']),
transform=transform)
elif entry['type'] == 'special':
if name == 'cifar10':
dataset = t_datasets.CIFAR10(root, train=is_train,
transform=transform, download=True)
elif name == 'cifar100':
dataset = t_datasets.CIFAR100(root, train=is_train,
transform=transform, download=True)
elif name == 'stl10':
dataset = t_datasets.STL10(root, split='train' if is_train else 'test',
transform=transform, download=True)
elif name == 'mnist':
dataset = t_datasets.MNIST(root, train=is_train,
transform=transform, download=True)
elif entry['type'] == 'filelist':
path = entry['train'] if is_train else entry['test']
val_images = os.path.join(root, path + '_images.npy')
val_labels = os.path.join(root, path + '_labels.npy')
if name == 'clevr_counts':
target_transform = lambda x: ['count_10', 'count_3', 'count_4', 'count_5', 'count_6', 'count_7', 'count_8', 'count_9'].index(x)
else:
target_transform = None
dataset = FileListDataset(val_images, val_labels, transform, target_transform)
else:
raise Exception('Unknown dataset')
return dataset
def get_train_dataset(args, tokenizer, metadata, augs_only=False):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
train_transform = transforms.Compose([
transforms.RandomResizedCrop(
224,
scale=(args.weak_min_scale, 1.0),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.ToTensor(),
normalize
])
augment = transforms.Compose([
transforms.RandomResizedCrop(
args.multicrop_resize,
scale=(0.08, args.multicrop_max_scale),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.4, 0.1) # not strengthened
], p=0.8),
transforms.RandomGrayscale(p=args.grayscale_prob),
transforms.RandomApply([utils.GaussianBlur([.1, 2.])], p=args.blur_prob),
transforms.RandomApply([utils.Solarization()], p=args.solarize_prob),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
if args.byol_augment:
assert args.num_augs == 2
augment = []
asym_blur_prob = [1.0, 0.1]
asym_solarize_prob = [0.0, 0.2]
for blur_prob, solarize_prob in zip(asym_blur_prob, asym_solarize_prob):
augment.append(transforms.Compose([
transforms.RandomResizedCrop(
args.multicrop_resize,
scale=(0.08, args.multicrop_max_scale),
interpolation=transforms.InterpolationMode.BICUBIC
),
transforms.RandomApply([
transforms.ColorJitter(0.4, 0.4, 0.2, 0.1)
], p=0.8),
transforms.RandomGrayscale(p=0.2),
transforms.RandomApply([utils.GaussianBlur([.1, 2.])], p=blur_prob),
transforms.RandomApply([utils.Solarization()], p=solarize_prob),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
assert not (args.weak_augment and args.strong_augment)
if args.weak_augment:
augment = train_transform
if args.strong_augment:
train_transform = augment
if args.randaugment:
train_transform = transforms.RandomChoice([train_transform, augment])
if args.model.startswith('CLIP'):
return ImageCaptionDatasetCLIP(args.dataset, args.root, metadata, train_transform, tokenizer)
elif args.model.startswith('CL2L'):
return ImageCaptionDatasetCL2L(
args.dataset,
args.root,
metadata,
train_transform,
augment,
args.num_augs,
tokenizer=tokenizer,
augs_only=augs_only,
caption_sampling=args.caption_sampling
)
def get_val_dataset():
val_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'dataset_catalog.json')) as f:
root = json.load(f)['imagenet']['path']
return ImageFolder(os.path.join(root, 'val'), val_transform)
|
clip-rocket-main
|
datasets.py
|
# Taken from https://github.com/rwightman/timm
""" Vision Transformer (ViT) in PyTorch
A PyTorch implement of Vision Transformers as described in:
'An Image Is Worth 16 x 16 Words: Transformers for Image Recognition at Scale'
- https://arxiv.org/abs/2010.11929
`How to train your ViT? Data, Augmentation, and Regularization in Vision Transformers`
- https://arxiv.org/abs/2106.10270
The official jax code is released and available at https://github.com/google-research/vision_transformer
Acknowledgments:
* The paper authors for releasing code and weights, thanks!
* I fixed my class token impl based on Phil Wang's https://github.com/lucidrains/vit-pytorch ... check it out
for some einops/einsum fun
* Simple transformer style inspired by Andrej Karpathy's https://github.com/karpathy/minGPT
* Bert reference code checks against Huggingface Transformers and Tensorflow Bert
Hacked together by / Copyright 2020, Ross Wightman
"""
import math
from functools import partial
from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.checkpoint
from timm.models.helpers import build_model_with_cfg, resolve_pretrained_cfg, named_apply, adapt_input_conv, checkpoint_seq
from timm.models.layers import PatchEmbed, Mlp, DropPath, trunc_normal_, lecun_normal_
from xformers.ops import memory_efficient_attention, unbind
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, attn_drop=0., proj_drop=0.):
super().__init__()
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim ** -0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
q, k, v = qkv.unbind(0)
attn = (q @ k.transpose(-2, -1)) * self.scale
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = (attn @ v).transpose(1, 2).reshape(B, N, C)
x = self.proj(x)
x = self.proj_drop(x)
return x
class MemEffAttention(nn.Module):
def __init__(
self,
dim: int,
num_heads: int = 8,
qkv_bias: bool = False,
attn_drop: float = 0.0,
proj_drop: float = 0.0,
) -> None:
super().__init__()
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = head_dim**-0.5
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
self.attn_drop = nn.Dropout(attn_drop)
self.proj = nn.Linear(dim, dim)
self.proj_drop = nn.Dropout(proj_drop)
def forward(self, x):
B, N, C = x.shape
qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads)
q, k, v = unbind(qkv, 2)
x = memory_efficient_attention(q, k, v)
x = x.reshape([B, N, C])
x = self.proj(x)
x = self.proj_drop(x)
return x
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class Block(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm,
attn_layer=Attention
):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = attn_layer(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.ls1 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
# NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.ls2 = LayerScale(dim, init_values=init_values) if init_values else nn.Identity()
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
def forward(self, x):
x = x + self.drop_path1(self.ls1(self.attn(self.norm1(x))))
x = x + self.drop_path2(self.ls2(self.mlp(self.norm2(x))))
return x
class ResPostBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
mlp_ratio=4.,
qkv_bias=False,
drop=0.,
attn_drop=0.,
init_values=None,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm
):
super().__init__()
self.init_values = init_values
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)
self.norm1 = norm_layer(dim)
self.drop_path1 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.mlp = Mlp(in_features=dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)
self.norm2 = norm_layer(dim)
self.drop_path2 = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.init_weights()
def init_weights(self):
# NOTE this init overrides that base model init with specific changes for the block type
if self.init_values is not None:
nn.init.constant_(self.norm1.weight, self.init_values)
nn.init.constant_(self.norm2.weight, self.init_values)
def forward(self, x):
x = x + self.drop_path1(self.norm1(self.attn(x)))
x = x + self.drop_path2(self.norm2(self.mlp(x)))
return x
class ParallelBlock(nn.Module):
def __init__(
self,
dim,
num_heads,
num_parallel=2,
mlp_ratio=4.,
qkv_bias=False,
init_values=None,
drop=0.,
attn_drop=0.,
drop_path=0.,
act_layer=nn.GELU,
norm_layer=nn.LayerNorm
):
super().__init__()
self.num_parallel = num_parallel
self.attns = nn.ModuleList()
self.ffns = nn.ModuleList()
for _ in range(num_parallel):
self.attns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('attn', Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, attn_drop=attn_drop, proj_drop=drop)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
self.ffns.append(nn.Sequential(OrderedDict([
('norm', norm_layer(dim)),
('mlp', Mlp(dim, hidden_features=int(dim * mlp_ratio), act_layer=act_layer, drop=drop)),
('ls', LayerScale(dim, init_values=init_values) if init_values else nn.Identity()),
('drop_path', DropPath(drop_path) if drop_path > 0. else nn.Identity())
])))
def _forward_jit(self, x):
x = x + torch.stack([attn(x) for attn in self.attns]).sum(dim=0)
x = x + torch.stack([ffn(x) for ffn in self.ffns]).sum(dim=0)
return x
@torch.jit.ignore
def _forward(self, x):
x = x + sum(attn(x) for attn in self.attns)
x = x + sum(ffn(x) for ffn in self.ffns)
return x
def forward(self, x):
if torch.jit.is_scripting() or torch.jit.is_tracing():
return self._forward_jit(x)
else:
return self._forward(x)
class VisionTransformer(nn.Module):
""" Vision Transformer
A PyTorch impl of : `An Image is Worth 16x16 Words: Transformers for Image Recognition at Scale`
- https://arxiv.org/abs/2010.11929
"""
def __init__(
self,
img_size=224,
patch_size=16,
in_chans=3,
num_classes=1000,
global_pool='token',
embed_dim=768,
depth=12,
num_heads=12,
mlp_ratio=4.,
qkv_bias=True,
init_values=None,
class_token=True,
no_embed_class=False,
pre_norm=False,
fc_norm=None,
drop_rate=0.,
attn_drop_rate=0.,
drop_path_rate=0.,
weight_init='',
embed_layer=PatchEmbed,
norm_layer=None,
act_layer=None,
block_fn=Block,
attn_layer=Attention
):
"""
Args:
img_size (int, tuple): input image size
patch_size (int, tuple): patch size
in_chans (int): number of input channels
num_classes (int): number of classes for classification head
global_pool (str): type of global pooling for final sequence (default: 'token')
embed_dim (int): embedding dimension
depth (int): depth of transformer
num_heads (int): number of attention heads
mlp_ratio (int): ratio of mlp hidden dim to embedding dim
qkv_bias (bool): enable bias for qkv if True
init_values: (float): layer-scale init values
class_token (bool): use class token
fc_norm (Optional[bool]): pre-fc norm after pool, set if global_pool == 'avg' if None (default: None)
drop_rate (float): dropout rate
attn_drop_rate (float): attention dropout rate
drop_path_rate (float): stochastic depth rate
weight_init (str): weight init scheme
embed_layer (nn.Module): patch embedding layer
norm_layer: (nn.Module): normalization layer
act_layer: (nn.Module): MLP activation layer
"""
super().__init__()
assert global_pool in ('', 'avg', 'token')
assert class_token or global_pool != 'token'
use_fc_norm = global_pool == 'avg' if fc_norm is None else fc_norm
norm_layer = norm_layer or partial(nn.LayerNorm, eps=1e-6)
act_layer = act_layer or nn.GELU
self.num_classes = num_classes
self.global_pool = global_pool
self.num_features = self.embed_dim = embed_dim # num_features for consistency with other models
self.num_prefix_tokens = 1 if class_token else 0
self.no_embed_class = no_embed_class
self.grad_checkpointing = False
self.patch_embed = embed_layer(
img_size=img_size,
patch_size=patch_size,
in_chans=in_chans,
embed_dim=embed_dim,
bias=not pre_norm, # disable bias if pre-norm is used (e.g. CLIP)
)
num_patches = self.patch_embed.num_patches
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim)) if class_token else None
embed_len = num_patches if no_embed_class else num_patches + self.num_prefix_tokens
self.pos_embed = nn.Parameter(torch.randn(1, embed_len, embed_dim) * .02)
self.pos_drop = nn.Dropout(p=drop_rate)
self.norm_pre = norm_layer(embed_dim) if pre_norm else nn.Identity()
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)] # stochastic depth decay rule
self.blocks = nn.Sequential(*[
block_fn(
dim=embed_dim,
num_heads=num_heads,
mlp_ratio=mlp_ratio,
qkv_bias=qkv_bias,
init_values=init_values,
drop=drop_rate,
attn_drop=attn_drop_rate,
drop_path=dpr[i],
norm_layer=norm_layer,
act_layer=act_layer,
attn_layer=attn_layer
)
for i in range(depth)])
self.norm = norm_layer(embed_dim) if not use_fc_norm else nn.Identity()
# Classifier Head
self.fc_norm = norm_layer(embed_dim) if use_fc_norm else nn.Identity()
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
if weight_init != 'skip':
self.init_weights(weight_init)
def init_weights(self, mode=''):
assert mode in ('jax', 'jax_nlhb', 'moco', '')
head_bias = -math.log(self.num_classes) if 'nlhb' in mode else 0.
trunc_normal_(self.pos_embed, std=.02)
if self.cls_token is not None:
nn.init.normal_(self.cls_token, std=1e-6)
named_apply(get_init_weights_vit(mode, head_bias), self)
def _init_weights(self, m):
# this fn left here for compat with downstream users
init_weights_vit_timm(m)
@torch.jit.ignore()
def load_pretrained(self, checkpoint_path, prefix=''):
_load_weights(self, checkpoint_path, prefix)
@torch.jit.ignore
def no_weight_decay(self):
return {'pos_embed', 'cls_token', 'dist_token'}
@torch.jit.ignore
def group_matcher(self, coarse=False):
return dict(
stem=r'^cls_token|pos_embed|patch_embed', # stem and embed
blocks=[(r'^blocks\.(\d+)', None), (r'^norm', (99999,))]
)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
@torch.jit.ignore
def get_classifier(self):
return self.head
def reset_classifier(self, num_classes: int, global_pool=None):
self.num_classes = num_classes
if global_pool is not None:
assert global_pool in ('', 'avg', 'token')
self.global_pool = global_pool
self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()
def _pos_embed(self, x):
if self.no_embed_class:
# deit-3, updated JAX (big vision)
# position embedding does not overlap with class token, add then concat
x = x + self.pos_embed
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
else:
# original timm, JAX, and deit vit impl
# pos_embed has entry for class token, concat then add
if self.cls_token is not None:
x = torch.cat((self.cls_token.expand(x.shape[0], -1, -1), x), dim=1)
x = x + self.pos_embed
return self.pos_drop(x)
def forward_features(self, x):
x = self.patch_embed(x)
x = self._pos_embed(x)
x = self.norm_pre(x)
if self.grad_checkpointing and not torch.jit.is_scripting():
x = checkpoint_seq(self.blocks, x)
else:
x = self.blocks(x)
x = self.norm(x)
return x
def forward_head(self, x, pre_logits: bool = False):
if self.global_pool:
x = x[:, self.num_prefix_tokens:].mean(dim=1) if self.global_pool == 'avg' else x[:, 0]
x = self.fc_norm(x)
return x if pre_logits else self.head(x)
def forward(self, x):
x = self.forward_features(x)
x = self.forward_head(x)
return x
def init_weights_vit_timm(module: nn.Module, name: str = ''):
""" ViT weight initialization, original timm impl (for reproducibility) """
if isinstance(module, nn.Linear):
trunc_normal_(module.weight, std=.02)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_jax(module: nn.Module, name: str = '', head_bias: float = 0.):
""" ViT weight initialization, matching JAX (Flax) impl """
if isinstance(module, nn.Linear):
if name.startswith('head'):
nn.init.zeros_(module.weight)
nn.init.constant_(module.bias, head_bias)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.normal_(module.bias, std=1e-6) if 'mlp' in name else nn.init.zeros_(module.bias)
elif isinstance(module, nn.Conv2d):
lecun_normal_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def init_weights_vit_moco(module: nn.Module, name: str = ''):
""" ViT weight initialization, matching moco-v3 impl minus fixed PatchEmbed """
if isinstance(module, nn.Linear):
if 'qkv' in name:
# treat the weights of Q, K, V separately
val = math.sqrt(6. / float(module.weight.shape[0] // 3 + module.weight.shape[1]))
nn.init.uniform_(module.weight, -val, val)
else:
nn.init.xavier_uniform_(module.weight)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif hasattr(module, 'init_weights'):
module.init_weights()
def get_init_weights_vit(mode='jax', head_bias: float = 0.):
if 'jax' in mode:
return partial(init_weights_vit_jax, head_bias=head_bias)
elif 'moco' in mode:
return init_weights_vit_moco
else:
return init_weights_vit_timm
@torch.no_grad()
def _load_weights(model: VisionTransformer, checkpoint_path: str, prefix: str = ''):
""" Load weights from .npz checkpoints for official Google Brain Flax implementation
"""
import numpy as np
def _n2p(w, t=True):
if w.ndim == 4 and w.shape[0] == w.shape[1] == w.shape[2] == 1:
w = w.flatten()
if t:
if w.ndim == 4:
w = w.transpose([3, 2, 0, 1])
elif w.ndim == 3:
w = w.transpose([2, 0, 1])
elif w.ndim == 2:
w = w.transpose([1, 0])
return torch.from_numpy(w)
w = np.load(checkpoint_path)
if not prefix and 'opt/target/embedding/kernel' in w:
prefix = 'opt/target/'
if hasattr(model.patch_embed, 'backbone'):
# hybrid
backbone = model.patch_embed.backbone
stem_only = not hasattr(backbone, 'stem')
stem = backbone if stem_only else backbone.stem
stem.conv.weight.copy_(adapt_input_conv(stem.conv.weight.shape[1], _n2p(w[f'{prefix}conv_root/kernel'])))
stem.norm.weight.copy_(_n2p(w[f'{prefix}gn_root/scale']))
stem.norm.bias.copy_(_n2p(w[f'{prefix}gn_root/bias']))
if not stem_only:
for i, stage in enumerate(backbone.stages):
for j, block in enumerate(stage.blocks):
bp = f'{prefix}block{i + 1}/unit{j + 1}/'
for r in range(3):
getattr(block, f'conv{r + 1}').weight.copy_(_n2p(w[f'{bp}conv{r + 1}/kernel']))
getattr(block, f'norm{r + 1}').weight.copy_(_n2p(w[f'{bp}gn{r + 1}/scale']))
getattr(block, f'norm{r + 1}').bias.copy_(_n2p(w[f'{bp}gn{r + 1}/bias']))
if block.downsample is not None:
block.downsample.conv.weight.copy_(_n2p(w[f'{bp}conv_proj/kernel']))
block.downsample.norm.weight.copy_(_n2p(w[f'{bp}gn_proj/scale']))
block.downsample.norm.bias.copy_(_n2p(w[f'{bp}gn_proj/bias']))
embed_conv_w = _n2p(w[f'{prefix}embedding/kernel'])
else:
embed_conv_w = adapt_input_conv(
model.patch_embed.proj.weight.shape[1], _n2p(w[f'{prefix}embedding/kernel']))
model.patch_embed.proj.weight.copy_(embed_conv_w)
model.patch_embed.proj.bias.copy_(_n2p(w[f'{prefix}embedding/bias']))
model.cls_token.copy_(_n2p(w[f'{prefix}cls'], t=False))
pos_embed_w = _n2p(w[f'{prefix}Transformer/posembed_input/pos_embedding'], t=False)
if pos_embed_w.shape != model.pos_embed.shape:
pos_embed_w = resize_pos_embed( # resize pos embedding when different size from pretrained weights
pos_embed_w,
model.pos_embed,
getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
model.pos_embed.copy_(pos_embed_w)
model.norm.weight.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/scale']))
model.norm.bias.copy_(_n2p(w[f'{prefix}Transformer/encoder_norm/bias']))
if isinstance(model.head, nn.Linear) and model.head.bias.shape[0] == w[f'{prefix}head/bias'].shape[-1]:
model.head.weight.copy_(_n2p(w[f'{prefix}head/kernel']))
model.head.bias.copy_(_n2p(w[f'{prefix}head/bias']))
# NOTE representation layer has been removed, not used in latest 21k/1k pretrained weights
# if isinstance(getattr(model.pre_logits, 'fc', None), nn.Linear) and f'{prefix}pre_logits/bias' in w:
# model.pre_logits.fc.weight.copy_(_n2p(w[f'{prefix}pre_logits/kernel']))
# model.pre_logits.fc.bias.copy_(_n2p(w[f'{prefix}pre_logits/bias']))
for i, block in enumerate(model.blocks.children()):
block_prefix = f'{prefix}Transformer/encoderblock_{i}/'
mha_prefix = block_prefix + 'MultiHeadDotProductAttention_1/'
block.norm1.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/scale']))
block.norm1.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_0/bias']))
block.attn.qkv.weight.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/kernel'], t=False).flatten(1).T for n in ('query', 'key', 'value')]))
block.attn.qkv.bias.copy_(torch.cat([
_n2p(w[f'{mha_prefix}{n}/bias'], t=False).reshape(-1) for n in ('query', 'key', 'value')]))
block.attn.proj.weight.copy_(_n2p(w[f'{mha_prefix}out/kernel']).flatten(1))
block.attn.proj.bias.copy_(_n2p(w[f'{mha_prefix}out/bias']))
for r in range(2):
getattr(block.mlp, f'fc{r + 1}').weight.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/kernel']))
getattr(block.mlp, f'fc{r + 1}').bias.copy_(_n2p(w[f'{block_prefix}MlpBlock_3/Dense_{r}/bias']))
block.norm2.weight.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/scale']))
block.norm2.bias.copy_(_n2p(w[f'{block_prefix}LayerNorm_2/bias']))
def resize_pos_embed(posemb, posemb_new, num_prefix_tokens=1, gs_new=()):
# Rescale the grid of position embeddings when loading from state_dict. Adapted from
# https://github.com/google-research/vision_transformer/blob/00883dd691c63a6830751563748663526e811cee/vit_jax/checkpoint.py#L224
ntok_new = posemb_new.shape[1]
if num_prefix_tokens:
posemb_prefix, posemb_grid = posemb[:, :num_prefix_tokens], posemb[0, num_prefix_tokens:]
ntok_new -= num_prefix_tokens
else:
posemb_prefix, posemb_grid = posemb[:, :0], posemb[0]
gs_old = int(math.sqrt(len(posemb_grid)))
if not len(gs_new): # backwards compatibility
gs_new = [int(math.sqrt(ntok_new))] * 2
assert len(gs_new) >= 2
posemb_grid = posemb_grid.reshape(1, gs_old, gs_old, -1).permute(0, 3, 1, 2)
posemb_grid = F.interpolate(posemb_grid, size=gs_new, mode='bicubic', align_corners=False)
posemb_grid = posemb_grid.permute(0, 2, 3, 1).reshape(1, gs_new[0] * gs_new[1], -1)
posemb = torch.cat([posemb_prefix, posemb_grid], dim=1)
return posemb
def _convert_openai_clip(state_dict, model):
out_dict = {}
swaps = [
('visual.', ''), ('conv1', 'patch_embed.proj'), ('positional_embedding', 'pos_embed'),
('transformer.resblocks.', 'blocks.'), ('ln_pre', 'norm_pre'), ('ln_post', 'norm'), ('ln_', 'norm'),
('in_proj_', 'qkv.'), ('out_proj', 'proj'), ('mlp.c_fc', 'mlp.fc1'), ('mlp.c_proj', 'mlp.fc2'),
]
for k, v in state_dict.items():
if not k.startswith('visual.'):
continue
for sp in swaps:
k = k.replace(sp[0], sp[1])
if k == 'proj':
k = 'head.weight'
v = v.transpose(0, 1)
out_dict['head.bias'] = torch.zeros(v.shape[0])
elif k == 'class_embedding':
k = 'cls_token'
v = v.unsqueeze(0).unsqueeze(1)
elif k == 'pos_embed':
v = v.unsqueeze(0)
if v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v,
model.pos_embed,
0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
out_dict[k] = v
return out_dict
def checkpoint_filter_fn(state_dict, model, adapt_layer_scale=False):
""" convert patch embedding weight from manual patchify + linear proj to conv"""
import re
out_dict = {}
if 'model' in state_dict:
# For deit models
state_dict = state_dict['model']
if 'visual.class_embedding' in state_dict:
return _convert_openai_clip(state_dict, model)
for k, v in state_dict.items():
if 'patch_embed.proj.weight' in k and len(v.shape) < 4:
# For old models that I trained prior to conv based patchification
O, I, H, W = model.patch_embed.proj.weight.shape
v = v.reshape(O, -1, H, W)
elif k == 'pos_embed' and v.shape[1] != model.pos_embed.shape[1]:
# To resize pos embedding when using model at different size from pretrained weights
v = resize_pos_embed(
v,
model.pos_embed,
0 if getattr(model, 'no_embed_class') else getattr(model, 'num_prefix_tokens', 1),
model.patch_embed.grid_size
)
elif adapt_layer_scale and 'gamma_' in k:
# remap layer-scale gamma into sub-module (deit3 models)
k = re.sub(r'gamma_([0-9])', r'ls\1.gamma', k)
elif 'pre_logits' in k:
# NOTE representation layer removed as not used in latest 21k/1k pretrained weights
continue
out_dict[k] = v
return out_dict
def _create_vision_transformer(variant, pretrained=False, **kwargs):
if kwargs.get('features_only', None):
raise RuntimeError('features_only not implemented for Vision Transformer models.')
kwargs['attn_layer'] = MemEffAttention if kwargs['attn_layer'] == "flash" else Attention
pretrained_cfg = resolve_pretrained_cfg(variant, pretrained_cfg=kwargs.pop('pretrained_cfg', None))
model = build_model_with_cfg(
VisionTransformer, variant, pretrained,
pretrained_cfg=pretrained_cfg,
pretrained_filter_fn=checkpoint_filter_fn,
pretrained_custom_load='npz' in pretrained_cfg['url'],
**kwargs)
return model
|
clip-rocket-main
|
vit.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# Modified from github.com/openai/CLIP
import gzip
import html
import os
from functools import lru_cache
import ftfy
import regex as re
import torch
from textaugment import EDA
import random
from nltk.tokenize import word_tokenize
import nltk
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(
self,
bpe_path: str = default_bpe(),
text_augment=False,
no_text_augment_prob=0.0,
remove_stopwords_prob=0.5,
synonym_replacement_prob=0.2,
random_swap_prob=0.2,
random_deletion_prob=0.1,
clean_before_augment=False,
num_augs=2,
):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
vocab.extend(['<|startoftext|>', '<|endoftext|>'])
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {'<|startoftext|>': '<|startoftext|>', '<|endoftext|>': '<|endoftext|>'}
self.pat = re.compile(r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.clean_before_augment = clean_before_augment
self.remove_stopwords_prob = remove_stopwords_prob
self.stopwords = set(nltk.corpus.stopwords.words('english'))
self.remove_stopwords = lambda x: " ".join([w for w in word_tokenize(x) if w.lower() not in self.stopwords])
if text_augment:
eda = EDA()
identity = lambda x: x
self.text_augment = lambda x: random.choices(
[
identity,
eda.synonym_replacement,
eda.random_swap,
eda.random_deletion
],
weights=[
no_text_augment_prob,
synonym_replacement_prob,
random_swap_prob,
random_deletion_prob
],
k=1
)[0](x)
else:
self.text_augment = None
self.num_augs = num_augs
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
def weak_augment(self, text):
if len(text) == 0:
return text
if random.random() < self.remove_stopwords_prob:
stripped_texts = self.remove_stopwords(text)
text = stripped_texts if len(stripped_texts) > 0 else text
return text
def strong_augment(self, text):
if len(text) == 0:
return text
if random.random() < self.remove_stopwords_prob:
stripped_texts = self.remove_stopwords(text)
text = stripped_texts if len(stripped_texts) > 0 else text
if self.text_augment is not None:
augmented_texts = self.text_augment(text)
augmented_texts = augmented_texts[0] if isinstance(augmented_texts, list) else augmented_texts
text = augmented_texts if len(augmented_texts) > 0 else text
return text
def __call__(self, texts, context_length=77):
if isinstance(texts, tuple): # training time
texts = list(texts)
if self.clean_before_augment:
for i, txt in enumerate(texts):
texts[i] = whitespace_clean(basic_clean(txt)).lower()
texts = [
self.weak_augment(random.choice(texts)),
*[self.strong_augment(random.choice(texts)) for _ in range(self.num_augs)],
]
sot_token = self.encoder["<|startoftext|>"]
eot_token = self.encoder["<|endoftext|>"]
all_tokens = [[sot_token] + self.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
tokens = tokens[:context_length]
if tokens[-1] != eot_token:
tokens[-1] = eot_token
result[i, :len(tokens)] = torch.tensor(tokens)
if len(result) == 1:
return result[0]
return result
|
clip-rocket-main
|
tokenizer.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import os
import random
import shutil
import torch
import torch.distributed as dist
import torch.autograd as autograd
from PIL import ImageFilter, ImageOps
def get_model_parallel(model):
if isinstance(model, torch.nn.DataParallel) \
or isinstance(model, torch.nn.parallel.DistributedDataParallel):
return model.module
else:
return model
def setup_for_distributed(is_master):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(state, is_best, output_dir):
if is_main_process():
ckpt_path = f'{output_dir}/checkpoint.pt'
best_path = f'{output_dir}/checkpoint_best.pt'
torch.save(state, ckpt_path)
if is_best:
shutil.copyfile(ckpt_path, best_path)
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
def scaled_all_reduce(tensors, is_scale=True):
"""Performs the scaled all_reduce operation on the provided tensors.
The input tensors are modified in-place. Currently supports only the sum
reduction operator. The reduced values are scaled by the inverse size of the
world size.
"""
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
# Queue the reductions
reductions = []
for tensor in tensors:
reduction = dist.all_reduce(tensor, async_op=True)
reductions.append(reduction)
# Wait for reductions to finish
for reduction in reductions:
reduction.wait()
# Scale the results
if is_scale:
for tensor in tensors:
tensor.mul_(1.0 / world_size)
return tensors
def all_gather_batch(tensors):
"""
Performs all_gather operation on the provided tensors.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = [torch.ones_like(tensor) for _ in range(world_size)]
dist.all_gather(
tensor_all,
tensor,
async_op=False # performance opt
)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
class GatherLayer(autograd.Function):
"""
Gather tensors from all workers with support for backward propagation:
This implementation does not cut the gradients as torch.distributed.all_gather does.
"""
@staticmethod
def forward(ctx, x):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def all_gather_batch_with_grad(tensors):
"""
Performs all_gather operation on the provided tensors.
Graph remains connected for backward grad computation.
"""
# Queue the gathered tensors
world_size = get_world_size()
# There is no need for reduction in the single-proc case
if world_size == 1:
return tensors
tensor_list = []
output_tensor = []
for tensor in tensors:
tensor_all = GatherLayer.apply(tensor)
tensor_list.append(tensor_all)
for tensor_all in tensor_list:
output_tensor.append(torch.cat(tensor_all, dim=0))
return output_tensor
def cosine_scheduler(base_value, final_value, epochs, niter_per_ep, warmup_epochs=0, start_warmup_value=0):
warmup_schedule = np.array([])
warmup_iters = warmup_epochs * niter_per_ep
if warmup_epochs > 0:
warmup_schedule = np.linspace(start_warmup_value, base_value, warmup_iters)
iters = np.arange(epochs * niter_per_ep - warmup_iters)
schedule = final_value + 0.5 * (base_value - final_value) * (1 + np.cos(np.pi * iters / len(iters)))
schedule = np.concatenate((warmup_schedule, schedule))
assert len(schedule) == epochs * niter_per_ep
return schedule
class GaussianBlur(object):
"""Gaussian blur augmentation in SimCLR https://arxiv.org/abs/2002.05709"""
def __init__(self, sigma=[.1, 2.]):
self.sigma = sigma
def __call__(self, x):
sigma = random.uniform(self.sigma[0], self.sigma[1])
x = x.filter(ImageFilter.GaussianBlur(radius=sigma))
return x
class Solarization:
"""Solarization as a callable object."""
def __call__(self, img):
return ImageOps.solarize(img)
@torch.no_grad()
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def infer_batchnorm_class():
if dist.is_available() and dist.is_initialized():
return torch.nn.SyncBatchNorm
else:
return torch.nn.BatchNorm1d
def cycle(iterable, sampler=None):
epoch = 0
while True:
if sampler is not None:
sampler.set_epoch(epoch)
for x in iterable:
yield x
epoch += 1
|
clip-rocket-main
|
utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import OrderedDict, defaultdict
import json
import os
from sklearn import metrics
import numpy as np
from tqdm import tqdm
import torch
import torch.backends.cudnn as cudnn
import torch.utils.data
import torchvision.transforms as transforms
import datasets
import models
from tokenizer import SimpleTokenizer
import utils
def get_args_parser():
parser = argparse.ArgumentParser(description='SLIP 0-shot evaluations', add_help=False)
parser.add_argument('--output-dir', default='./', type=str, help='output dir')
parser.add_argument('--batch-size', default=256, type=int, help='batch_size')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--resume', default='', type=str, help='path to latest checkpoint')
parser.add_argument('--local_rank', type=int, default=0)
parser.add_argument('--model-name', type=str, default='')
return parser
def main(args):
# optionally resume from a checkpoint (takes precedence over autoresume)
if args.resume:
ckpt_path = args.resume
elif os.path.isfile(os.path.join(args.output_dir, 'checkpoint_best.pt')):
ckpt_path = os.path.join(args.output_dir, 'checkpoint_best.pt')
else:
raise Exception('no checkpoint found')
ckpt = torch.load(ckpt_path, map_location='cpu')
state_dict = OrderedDict()
for k, v in ckpt['state_dict'].items():
state_dict[k.replace('module.', '')] = v
# create model
old_args = ckpt['args']
print("=> creating model: {}".format(old_args.model))
model = models.get_model(old_args, rand_embed=False)
model.cuda()
msg = model.load_state_dict(state_dict, strict=False)
print(msg)
print("=> loaded resume checkpoint '{}' (epoch {})".format(args.resume, ckpt['epoch']))
cudnn.benchmark = True
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'dataset_catalog.json')) as f:
catalog = json.load(f)
with open(os.path.join(cwd, 'templates.json')) as f:
all_templates = json.load(f)
with open(os.path.join(cwd, 'labels.json')) as f:
all_labels = json.load(f)
# Data loading code
print("=> creating dataset")
tokenizer = SimpleTokenizer(bpe_path=old_args.bpe_path)
val_transform = transforms.Compose([
transforms.Resize(224),
transforms.CenterCrop(224),
lambda x: x.convert('RGB'),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
])
results = {}
for d in catalog:
if d == 'kinetics700_frames':
continue
print('Evaluating {}'.format(d))
val_dataset = datasets.get_downstream_dataset(catalog, name=d, is_train=False, transform=val_transform)
val_loader = torch.utils.data.DataLoader(
val_dataset, batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True, drop_last=False)
templates = all_templates[d]
labels = all_labels[d]
accs = validate_zeroshot(val_loader, templates, labels, model, tokenizer, d, old_args)
results[d] = accs
print('metric:', accs)
print('All results:')
for d, x in results.items():
print('{}:\n{}'.format(d, x))
res_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'zeroshot_results')
os.makedirs(res_dir, exist_ok=True)
exp_id = os.path.basename(args.output_dir)
ckpt_name = os.path.basename(ckpt_path).rsplit('.', 1)[0]
with open('{}/{}_{}_{}.txt'.format(res_dir, args.model_name, exp_id, ckpt_name), 'w') as f:
f.write(json.dumps(results))
def validate_zeroshot(val_loader, templates, labels, model, tokenizer, dataset_name, args):
# switch to evaluate mode
model.eval()
is_acc = dataset_name not in ['aircraft', 'pets', 'caltech101', 'flowers', 'kinetics700_frames', 'hateful_memes']
print('is_acc:', is_acc)
ensemble_weights = np.linspace(0.1, 0.9, 9).round(decimals=1)
results = defaultdict(lambda: defaultdict(int if is_acc else list))
with torch.no_grad():
print('=> encoding captions')
text_features = defaultdict(list)
for label in tqdm(labels):
if isinstance(label, list):
texts = [t.format(l) for t in templates for l in label]
else:
texts = [t.format(label) for t in templates]
texts = tokenizer(texts).cuda(non_blocking=True)
texts = texts.view(-1, 77).contiguous()
class_embeddings = utils.get_model_parallel(model).encode_text_val(texts)
embed_names = {'z_text', 'h_text'}.intersection(class_embeddings.keys())
for embed_name in embed_names:
cls_embed = class_embeddings[embed_name]
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
cls_embed = cls_embed.mean(dim=0)
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
text_features[embed_name].append(cls_embed)
text_features = {k: torch.stack(v, dim=0) for k, v in text_features.items()}
print('=> encoding images')
for images, target in tqdm(val_loader):
images = images.cuda(non_blocking=True)
target = target.cuda(non_blocking=True)
# encode images
image_features = utils.get_model_parallel(model).encode_image_val(images)
# cosine similarity as logits
similarities = utils.get_model_parallel(model).predict_zeroshot(image_features, text_features)
# measure accuracy
for name, similarity in similarities.items():
if is_acc:
# measure accuracy and record loss
pred = similarity.argmax(dim=1)
correct = pred.eq(target).sum()
results[f'acc1_{name[0]}']['correct'] += correct.item()
results[f'acc1_{name[0]}']['total'] += images.size(0)
else:
results[name[0]]['outputs'].append(similarity.cpu())
results[name[0]]['targets'].append(target.cpu())
if is_acc and not args.model.startswith('CLIP'):
# ensemble accuracy
for w in ensemble_weights:
similarity = w * similarities['z_sim'] + (1-w) * similarities['h_sim']
# measure accuracy and record loss
pred = similarity.argmax(dim=1)
correct = pred.eq(target).sum()
results[f'acc1_zh_{w}']['correct'] += correct.item()
results[f'acc1_zh_{w}']['total'] += images.size(0)
if is_acc:
return {k: 100 * d['correct'] / d['total'] for k, d in results.items()}
else:
results = {
k: (torch.cat(d['outputs']), torch.cat(d['targets']))
for k, d in results.items()
}
results = {**results, **{
f'zh_{w}': (w * results['z'][0] + (1-w) * results['h'][0], results['z'][1])
for w in ensemble_weights
}}
if dataset_name in ['aircraft', 'pets', 'caltech101', 'flowers']:
return {k: mean_per_class(*r) for k, r in results.items()}
elif dataset_name == 'kinetics700_frames':
return {k: (sum(accuracy(*r, topk=(1, 5))) / 2).item() for k, r in results.items()}
elif dataset_name == 'hateful_memes':
return {k: roc_auc(*r) for k, r in results.items()}
def accuracy(output, target, topk=(1,)):
"""Computes the accuracy over the k top predictions for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.reshape(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def mean_per_class(outputs, targets):
pred = outputs.argmax(1)
confusion_matrix = metrics.confusion_matrix(targets, pred)
per_classes = confusion_matrix.diagonal() / confusion_matrix.sum(axis=1)
return 100 * per_classes.mean()
def roc_auc(outputs, targets):
pos_score = outputs[:, 1] - outputs[:, 0]
metric = metrics.roc_auc_score(targets, pos_score)
return 100 * metric
if __name__ == '__main__':
parser = argparse.ArgumentParser('SLIP 0-shot evaluations', parents=[get_args_parser()])
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
main(args)
|
clip-rocket-main
|
eval_zeroshot.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.distributed as dist
import utils
class CLIPLoss(nn.Module):
def __init__(self):
super().__init__()
self.labels = None
self.last_local_batch_size = None
def forward(self, outputs):
image_embed = outputs['z_image']
text_embed = outputs['z_text']
logit_scale = outputs['logit_scale']
local_batch_size = image_embed.size(0)
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * utils.get_rank() + torch.arange(
local_batch_size, device=image_embed.device
)
self.last_local_batch_size = local_batch_size
# normalized features
image_embed = F.normalize(image_embed, dim=-1, p=2)
text_embed = F.normalize(text_embed, dim=-1, p=2)
# gather features from all GPUs
image_embed_all, text_embed_all = \
utils.all_gather_batch([image_embed, text_embed])
# cosine similarity as logits
logits_per_image = logit_scale * image_embed @ text_embed_all.t()
logits_per_text = logit_scale * text_embed @ image_embed_all.t()
loss = (F.cross_entropy(logits_per_image, self.labels) + \
F.cross_entropy(logits_per_text, self.labels)) / 2
# compute accuracy
with torch.no_grad():
pred = torch.argmax(logits_per_image, dim=-1)
correct = pred.eq(self.labels).sum()
acc = 100 * correct / local_batch_size
return {'loss': loss, 'clip_loss': loss, 'clip_acc': acc}
class CL2LLoss(nn.Module):
def __init__(self, loss_avg_or_sum, label_smoothing):
super().__init__()
self.labels = None
self.last_local_batch_size = None
self.loss_avg_or_sum = loss_avg_or_sum
self.label_smoothing = label_smoothing
def forward(self, outputs):
z_image_global = outputs['z_image_global']
z_text_global = outputs['z_text_global']
h_image_local = outputs['h_image_local']
h_text_local = outputs['h_text_local']
logit_scale = outputs['logit_scale']
h_logit_scale = outputs['h_logit_scale']
local_batch_size = z_image_global.size(0)
assert len(h_image_local) == len(h_text_local)
num_augs = len(h_image_local)
if local_batch_size != self.last_local_batch_size:
self.labels = local_batch_size * utils.get_rank() + torch.arange(
local_batch_size, device=z_image_global.device
)
self.last_local_batch_size = local_batch_size
# normalized features
z_image_global = F.normalize(z_image_global)
z_text_global = F.normalize(z_text_global)
h_image_local = [F.normalize(z) for z in h_image_local]
h_text_local = [F.normalize(z) for z in h_text_local]
# gather features from all GPUs
z_image_global_all, z_text_global_all = utils.all_gather_batch([z_image_global, z_text_global])
h_image_local_all = utils.all_gather_batch(h_image_local)
h_text_local_all = utils.all_gather_batch(h_text_local)
# compute global loss
image_global_logits = logit_scale * z_image_global @ z_text_global_all.t()
text_global_logits = logit_scale * z_text_global @ z_image_global_all.t()
clip_loss_image_global = F.cross_entropy(image_global_logits, self.labels)
clip_loss_text_global = F.cross_entropy(text_global_logits, self.labels)
# compute local loss
clip_loss_image_local, clip_loss_text_local = 0, 0
if num_augs > 0:
image_local_logits = []
text_local_logits = []
for i in range(num_augs):
image_local_logits += [h_logit_scale * h @ h_text_local_all[i].t() for h in h_image_local]
text_local_logits += [h_logit_scale * h @ h_image_local_all[i].t() for h in h_text_local]
clip_loss_image_local = sum([F.cross_entropy(l, self.labels, label_smoothing=self.label_smoothing) for l in image_local_logits]) / len(image_local_logits)
clip_loss_text_local = sum([F.cross_entropy(l, self.labels, label_smoothing=self.label_smoothing) for l in text_local_logits]) / len(text_local_logits)
# compute total losses
clip_loss_image = (clip_loss_image_global + clip_loss_image_local * num_augs) / (1 + num_augs)
clip_loss_text = (clip_loss_text_global + clip_loss_text_local * num_augs) / (1 + num_augs)
clip_loss = (clip_loss_image + clip_loss_text) / 2
# compute accuracy
with torch.no_grad():
pred = torch.argmax(image_global_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_image_global = 100 * correct / local_batch_size
pred = torch.argmax(text_global_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_text_global = 100 * correct / local_batch_size
clip_acc_image_local, clip_acc_text_local = 0, 0
if num_augs > 0:
for aug_logits in image_local_logits:
pred = torch.argmax(aug_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_image_local += 100 * correct / local_batch_size
clip_acc_image_local /= len(image_local_logits)
for aug_logits in text_local_logits:
pred = torch.argmax(aug_logits, dim=-1)
correct = pred.eq(self.labels).sum()
clip_acc_text_local += 100 * correct / local_batch_size
clip_acc_text_local /= len(image_local_logits)
loss = clip_loss * (2 if self.loss_avg_or_sum == 'sum' else 1)
clip_local_dict = {
'clip_loss_image_local': clip_loss_image_local,
'clip_loss_text_local': clip_loss_text_local,
'clip_acc_image_local': clip_acc_image_local,
'clip_acc_text_local': clip_acc_text_local,
} if num_augs > 0 else {}
return {
'loss': loss,
'clip_loss_image': clip_loss_image,
'clip_loss_text': clip_loss_text,
'clip_loss_image_global': clip_loss_image_global,
'clip_loss_text_global': clip_loss_text_global,
'clip_loss_image': clip_loss_image,
'clip_loss': clip_loss,
'clip_acc': clip_acc_image_global,
'clip_acc_image_global': clip_acc_image_global,
'clip_acc_text_global': clip_acc_text_global,
'h_logit_scale': h_logit_scale,
**clip_local_dict,
}
class BarLIPLoss(CL2LLoss):
def __init__(self, loss_avg_or_sum, label_smoothing, lamb=5e-3, scale_loss=0.025):
super().__init__(loss_avg_or_sum, label_smoothing)
self.lamb = lamb
self.scale_loss = scale_loss
def barlip_loss(self, z1, z2):
N, D = z1.size()
corr = torch.einsum("bi, bj -> ij", z1, z2) / N
if dist.is_available() and dist.is_initialized():
dist.all_reduce(corr)
world_size = dist.get_world_size()
corr /= world_size
diag = torch.eye(D, device=corr.device)
cdif = (corr - diag).pow(2)
cdif[~diag.bool()] *= self.lamb
loss = self.scale_loss * cdif.sum()
return loss
def forward(self, outputs):
# global to global
num_losses = 1
barlip_loss = self.barlip_loss(outputs['v_image'], outputs['v_text'])
# local to local
for v_image in outputs['v_image_local']:
for v_text in outputs['v_text_local']:
barlip_loss += self.barlip_loss(v_image, v_text)
num_losses += 1
barlip_loss /= num_losses
# online eval with clip loss
clip_loss_out = super().forward(outputs)
loss = barlip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'barlip_loss': barlip_loss,
**clip_loss_out
}
class SiamLIPLoss(CL2LLoss):
def __init__(self, loss_avg_or_sum, label_smoothing):
super().__init__(loss_avg_or_sum, label_smoothing)
def negative_cosine_similarity(self, p, v):
p = F.normalize(p, dim=-1)
v = F.normalize(v, dim=-1)
return 2 - 2 * (p * v.detach()).sum(dim=1).mean()
def forward(self, outputs):
p_image_global = outputs['p_image']
p_text_global = outputs['p_text']
p_image_local = outputs['p_image_local']
p_text_local = outputs['p_text_local']
if any('momentum' in k for k in outputs):
v_image_global = outputs['v_image_momentum']
v_text_global = outputs['v_text_momentum']
v_image_local = outputs['v_image_local_momentum']
v_text_local = outputs['v_text_local_momentum']
else:
v_image_global = outputs['v_image']
v_text_global = outputs['v_text']
v_image_local = outputs['v_image_local']
v_text_local = outputs['v_text_local']
# global to global
num_losses = 2
siamlip_loss = (
self.negative_cosine_similarity(p_image_global, v_text_global.detach()) + \
self.negative_cosine_similarity(p_text_global, v_image_global.detach())
)
# local to local
for p in p_image_local:
for v in v_text_local:
siamlip_loss += self.negative_cosine_similarity(p, v.detach())
num_losses += 1
for p in p_text_local:
for v in v_image_local:
siamlip_loss += self.negative_cosine_similarity(p, v.detach())
num_losses += 1
siamlip_loss /= num_losses
# online eval with clip loss
clip_loss_out = super().forward(outputs)
loss = siamlip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'siamlip_loss': siamlip_loss,
**clip_loss_out
}
class SwALIPLoss(CL2LLoss):
def __init__(
self,
loss_avg_or_sum,
label_smoothing,
sk_iters=3,
target_epsilon=0.05,
swalip_weight=0.2,
):
assert label_smoothing == 0.0
super().__init__(loss_avg_or_sum, label_smoothing)
self.sk_iters = sk_iters
self.target_epsilon = target_epsilon
self.swalip_weight = swalip_weight
self.labels = None
self.last_local_batch_size = None
self.set_world_size()
def set_world_size(self):
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
else:
self.world_size = 1
@torch.no_grad()
def sinkhorn_knopp(self, Q: torch.Tensor) -> torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.target_epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0] # num prototypes
# make the matrix sum to 1
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.sk_iters):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def cross_entropy(self, logits, targets):
return -torch.mean(torch.sum(targets * torch.log_softmax(logits, dim=1), dim=1))
def forward(self, outputs):
# online eval with clip loss
clip_loss_out = super().forward(outputs)
# cl2l
h_image_local = [F.normalize(h) for h in outputs['h_image_local']]
h_text_local = [F.normalize(h) for h in outputs['h_text_local']]
h_logit_scale = outputs['h_logit_scale']
num_augs = len(h_image_local)
h_image_local_all = utils.all_gather_batch(h_image_local)
h_text_local_all = utils.all_gather_batch(h_text_local)
logits_per_image_local = [[h @ h_all.t() for h_all in h_text_local_all] for h in h_image_local]
logits_per_text_local = [[h @ h_all.t() for h_all in h_image_local_all] for h in h_text_local]
# generate pseudo-label
with torch.no_grad():
targets_per_image_local = [[self.sinkhorn_knopp(t.detach()) for t in i] for i in logits_per_image_local]
targets_per_text_local = [[self.sinkhorn_knopp(i.detach()) for i in t] for t in logits_per_text_local]
# compute the loss between all views
swalip_loss = 0
for l1 in range(2):
for l2 in range(2):
t1, t2 = abs(l1 - 1), abs(l2 - 1)
swalip_loss += (
self.cross_entropy(logits_per_image_local[l1][l2] * h_logit_scale, targets_per_image_local[t1][t2]) + \
self.cross_entropy(logits_per_text_local[l1][l2] * h_logit_scale, targets_per_text_local[t1][t2])
) / 2
swalip_loss /= num_augs ** 2
loss = self.swalip_weight * swalip_loss + clip_loss_out.pop('loss')
return {**clip_loss_out, 'loss': loss, 'swalip_loss': swalip_loss}
class SwALIPV1Loss(nn.Module):
def __init__(self, sk_iters, target_epsilon, temperature, swalip_weight=1.0):
super().__init__()
self.sk_iters = sk_iters
self.target_epsilon = target_epsilon
self.temperature = temperature
self.swalip_weight = swalip_weight
self.clip_loss = CLIPLoss()
self.set_world_size()
def set_world_size(self):
if dist.is_available() and dist.is_initialized():
self.world_size = dist.get_world_size()
else:
self.world_size = 1
@torch.no_grad()
def sinkhorn_knopp(self, Q: torch.Tensor) -> torch.Tensor:
"""Produces assignments using Sinkhorn-Knopp algorithm.
Applies the entropy regularization, normalizes the Q matrix and then normalizes rows and
columns in an alternating fashion for num_iter times. Before returning it normalizes again
the columns in order for the output to be an assignment of samples to prototypes.
Args:
Q (torch.Tensor): cosine similarities between the features of the
samples and the prototypes.
Returns:
torch.Tensor: assignment of samples to prototypes according to optimal transport.
"""
Q = torch.exp(Q / self.target_epsilon).t()
B = Q.shape[1] * self.world_size
K = Q.shape[0] # num prototypes
# make the matrix sums to 1
sum_Q = torch.sum(Q)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_Q)
Q /= sum_Q
for _ in range(self.sk_iters):
# normalize each row: total weight per prototype must be 1/K
sum_of_rows = torch.sum(Q, dim=1, keepdim=True)
if dist.is_available() and dist.is_initialized():
dist.all_reduce(sum_of_rows)
Q /= sum_of_rows
Q /= K
# normalize each column: total weight per sample must be 1/B
Q /= torch.sum(Q, dim=0, keepdim=True)
Q /= B
Q *= B # the colomns must sum to 1 so that Q is an assignment
return Q.t()
def cross_entropy(self, logits, assign):
return -torch.mean(torch.sum(assign * torch.log_softmax(logits, dim=1), dim=1))
def forward(self, outputs):
image_logits = outputs['p_image']
text_logits = outputs['p_text']
logit_scale = outputs['swalip_logit_scale']
if any('momentum' in k for k in outputs):
image_targets = outputs['p_image_momentum']
text_targets = outputs['p_text_momentum']
else:
image_targets = outputs['p_image'].detach()
text_targets = outputs['p_text'].detach()
image_assign = self.sinkhorn_knopp(image_targets.detach())
text_assign = self.sinkhorn_knopp(text_targets.detach())
image_logits *= logit_scale
text_logits *= logit_scale
swalip_loss = (
self.cross_entropy(image_logits, text_assign) + \
self.cross_entropy(text_logits, image_assign)
) / 2
# online eval with clip loss
clip_loss_out = self.clip_loss(outputs)
loss = self.swalip_weight * swalip_loss + clip_loss_out.pop('loss')
return {
'loss': loss,
'swalip_loss': swalip_loss,
'swalip_logit_scale': logit_scale,
**clip_loss_out
}
|
clip-rocket-main
|
losses.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
from collections import OrderedDict, defaultdict
import json
import math
import os
import sys
import time
from tkinter import E
try:
import wandb
except ImportError:
print("wandb not found")
import numpy as np
import torch
import torch.cuda.amp as amp
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
import datasets
import models
import utils
from tokenizer import SimpleTokenizer
def get_args_parser():
parser = argparse.ArgumentParser(description='CL2L training and evaluation', add_help=False)
# Data
parser.add_argument('--dataset', default='yfcc15m', type=str, choices=['yfcc15m', 'cc3m', 'cc12m', 'merged_opendata'])
parser.add_argument('--root', default='datasets/yfcc100m', type=str,
help='path to dataset root')
parser.add_argument('--metadata', default='datasets/yfcc15m_v1/yfcc15m.pkl', type=str,
help='path to metadata file (see README for details)')
parser.add_argument('--metadata-unpaired', default='datasets/yfcc15m_v1/yfcc85m.pkl', type=str,
help='path to metadata file (see README for details)')
parser.add_argument('--bpe-path', default='datasets/yfcc15m_v1/bpe_simple_vocab_16e6.txt.gz', type=str,
help='path to the bpe file (see README for details)')
parser.add_argument('--output-dir', default='./', type=str, help='output dir')
# Model
parser.add_argument('--model', default='CL2L_VITB16', type=str)
parser.add_argument('--attn-layer', default='flash', type=str, choices=["flash", "standard"])
parser.add_argument('--no-share-token', action='store_true')
parser.add_argument('--semi-paired', action='store_true')
parser.add_argument('--unpaired-ratio', default=4, type=int)
parser.add_argument('--embed-dim', default=256, type=int,
help='output dim for the language-image projector')
parser.add_argument('--clip-hidden-dim', default=4096, type=int,
help='hidden dim of CLIP mlp projection head')
parser.add_argument('--ssl-scale', default=1.0, type=float,
help='loss scale for SimCLR objective')
parser.add_argument('--ssl-temp', default=0.1, type=float,
help='softmax temperature for SimCLR objective')
parser.add_argument('--resume', default='', type=str, help='path to resume from')
parser.add_argument('--init-text-encoder', default=None, type=str, help='path to init from')
parser.add_argument('--detach-proj', action='store_true',
help='whether or not to detach the clip projector')
parser.add_argument('--momentum', action='store_true',
help='whether or not to use the momentum encoder')
parser.add_argument('--momentum-tau', type=float, default=0.99,
help='whether or not to use the momentum encoder')
parser.add_argument('--transformer-layers', default=12, type=int,
help='number of layers for the text transformer')
parser.add_argument('--clip-proj-type', default='linear', type=str,
choices=['mlp', 'linear'], help='type of projector for clip')
parser.add_argument('--cl2l-txt-proj-type', default='mlp', type=str,
choices=['mlp', 'linear'], help='type of text projector for cl2l')
parser.add_argument('--cl2l-img-proj-type', default='mlp', type=str,
choices=['mlp', 'linear'], help='type of vision projector for cl2l')
parser.add_argument('--separate-proj', default=False, action='store_true',
help='different heads')
parser.add_argument('--separate-proj-child', default=False, action='store_true',
help='different heads in non-contrastive stream')
# BarLIP
parser.add_argument('--barlip-proj-dim', default=8192, type=int,
help='output dim for the barlip projector')
parser.add_argument('--barlip-hidden-dim', default=3000, type=int,
help='hidden dim for the barlip projector')
parser.add_argument('--barlip-lamb', default=5e-3, type=float,
help='lambda for BarLIP loss')
parser.add_argument('--barlip-scale-loss', default=0.025, type=float,
help='loss scaling factor for BarLIP')
# SwALIP
parser.add_argument('--swalip-proj-dim', default=128, type=int,
help='output dim for the swalip projector')
parser.add_argument('--swalip-hidden-dim', default=2048, type=int,
help='output dim for the swalip projector')
parser.add_argument('--swalip-num-proto', default=3000, type=int,
help='number of prototypes for swalip')
parser.add_argument('--swalip-temperature', default=0.1, type=float,
help='softmax temperature for swalip')
parser.add_argument('--swalip-learn-temperature', action='store_true',
help='whether to learn softmax temperature for swalip')
parser.add_argument('--sk-iters', default=3, type=int,
help='output dim for the swalip projector')
parser.add_argument('--target-epsilon', default=0.05, type=float,
help='output dim for the swalip projector')
parser.add_argument('--swalip-freeze-proto-iters', default=100, type=int,
help='number of iters to freeze swalip prototypes')
parser.add_argument('--swalip-no-shared-proto', action='store_true',
help='whether or not to share prototypes between modalities')
parser.add_argument('--swalip-weight', default=0.2, type=float,
help='weight for the swalip loss')
# SiamLIP
parser.add_argument('--siamlip-proj-dim', default=128, type=int,
help='output dim for the siamlip projector')
parser.add_argument('--siamlip-hidden-dim', default=2048, type=int,
help='output dim for the siamlip projector')
parser.add_argument('--siamlip-no-last-bn', action='store_true',
help='whether to use batchnorm at the end of the proj')
# Image Augmentations
parser.add_argument('--num-augs', default=2, type=int,
help='number of augmentations in cl2l')
parser.add_argument('--multicrop-resize', default=224, type=int)
parser.add_argument('--multicrop-max-scale', default=1.0, type=float)
parser.add_argument('--weak-min-scale', default=0.5, type=float)
parser.add_argument('--blur-prob', default=0.5, type=float)
parser.add_argument('--solarize-prob', default=0.0, type=float)
parser.add_argument('--grayscale-prob', default=0.2, type=float)
parser.add_argument('--byol-augment', default=False, action='store_true',
help='byol-like asymmetric augment. It overrides other augment probs')
parser.add_argument('--weak-augment', default=False, action='store_true',
help='make all augmentations weak, including the ones of cl2l')
parser.add_argument('--strong-augment', default=False, action='store_true',
help='make all augmentations strong, including the one of baseline clip')
parser.add_argument('--randaugment', default=False, action='store_true',
help='add randaugment to base augmentations')
# Text Augmentations
parser.add_argument('--caption-sampling', default='single', type=str,
choices=['single', 'multi'], help='how to sample captions')
parser.add_argument('--text-dropout-prob', default=0.0, type=float,
help='dropout probability')
parser.add_argument('--text-drop-path-prob', default=0.0, type=float,
help='dropout probability')
parser.add_argument('--label-smoothing', default=0.0, type=float,
help='label smoothing')
parser.add_argument('--text-augment', default=False, action='store_true',
help='text augmentations')
parser.add_argument('--clean-before-augment', default=False, action='store_true',
help='clean before text augmentations')
parser.add_argument('--no-text-augment-prob', default=0.0, type=float,
help='prob not to augment text')
parser.add_argument('--remove-stopwords-prob', default=0.8, type=float,
help='prob to remove stopwords from text')
parser.add_argument('--synonym-replacement-prob', default=0.4, type=float,
help='prob to replace synonym in text')
parser.add_argument('--random-swap-prob', default=0.4, type=float,
help='prob to randomly swap in text')
parser.add_argument('--random-deletion-prob', default=0.2, type=float,
help='prob to randomly delete text')
# Training
parser.add_argument('--epochs', default=25, type=int)
parser.add_argument('--warmup-epochs', default=1, type=int)
parser.add_argument('--start-epoch', default=0, type=int)
parser.add_argument('--batch-size', default=64, type=int,
help='number of samples per-device/per-gpu')
parser.add_argument('--lr', default=3e-3, type=float)
parser.add_argument('--lr-start', default=1e-6, type=float,
help='initial warmup lr')
parser.add_argument('--lr-end', default=1e-5, type=float,
help='minimum final lr')
parser.add_argument('--update-freq', default=1, type=int,
help='optimizer update frequency (i.e. gradient accumulation steps)')
parser.add_argument('--wd', default=0.1, type=float)
parser.add_argument('--betas', default=(0.9, 0.98), nargs=2, type=float)
parser.add_argument('--eps', default=1e-8, type=float)
parser.add_argument('--eval-freq', default=1, type=int)
parser.add_argument('--disable-amp', action='store_true',
help='disable mixed-precision training (requires more memory and compute)')
parser.add_argument('--loss-avg-or-sum', default='avg', type=str)
parser.add_argument('--checkpoint-grad', action='store_true',
help='enable gradient checkpointing')
# System
parser.add_argument('--print-freq', default=25, type=int, help='print frequency')
parser.add_argument('-j', '--workers', default=10, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--workers-unpaired', default=5, type=int, metavar='N',
help='number of data loading workers per process')
parser.add_argument('--evaluate', action='store_true', help='eval only')
parser.add_argument('--world-size', default=1, type=int,
help='number of nodes for distributed training')
parser.add_argument('--rank', default=0, type=int,
help='node rank for distributed training')
parser.add_argument("--local_rank", type=int, default=0)
parser.add_argument('--dist-url', default='env://', type=str,
help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str)
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--gpu', default=None, type=int, help='GPU id to use.')
parser.add_argument('--wandb', action='store_true', help='Enable WandB logging')
parser.add_argument('--offline', action='store_true', help='WandB will not log online')
parser.add_argument('--name', default='CLIP_ROCKET', type=str)
return parser
best_acc1 = 0
def main(args):
utils.init_distributed_mode(args)
global best_acc1
# fix the seed for reproducibility
seed = args.seed + utils.get_rank()
torch.manual_seed(seed)
np.random.seed(seed)
# create model
print("=> creating model: {}".format(args.model))
model = models.get_model(args)
model.visual.set_grad_checkpointing(enable=args.checkpoint_grad)
model.cuda(args.gpu)
print(model)
if args.distributed:
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.gpu], bucket_cap_mb=200, static_graph=True)
if args.momentum:
momentum_model = models.get_model(args, is_momentum=True)
momentum_model.cuda(args.gpu)
if args.distributed:
momentum_model = torch.nn.parallel.DistributedDataParallel(
momentum_model, device_ids=[args.gpu], bucket_cap_mb=200, static_graph=True)
msg = utils.get_model_parallel(momentum_model).load_state_dict(
utils.get_model_parallel(model).state_dict(), strict=False)
print(msg)
for p in momentum_model.parameters():
p.requires_grad = False
# define loss function (criterion) and optimizer
criterion = models.get_loss(args).cuda(args.gpu)
p_wd, p_non_wd = [], []
for n, p in model.named_parameters():
if not p.requires_grad:
continue # frozen weights
if p.ndim < 2 or 'bias' in n or 'ln' in n or 'bn' in n:
p_non_wd.append(p)
else:
p_wd.append(p)
optim_params = [{"params": p_wd, "weight_decay": args.wd},
{"params": p_non_wd, "weight_decay": 0}]
optimizer = torch.optim.AdamW(optim_params, lr=args.lr, betas=args.betas,
eps=args.eps, weight_decay=args.wd)
scaler = amp.GradScaler(enabled=not args.disable_amp)
# optionally load pre-trained text encoder
if args.init_text_encoder is not None:
cp_text_encoder = torch.load(args.init_text_encoder)['state_dict']
cp_text_encoder = {k.replace('module.', ''): v for k, v in cp_text_encoder.items() if 'transformer' in k}
result = utils.get_model_parallel(model).load_state_dict(cp_text_encoder, strict=False)
print(result)
del cp_text_encoder
# optionally resume from a checkpoint (takes precedence over autoresume)
if args.resume:
if os.path.isfile(args.resume):
print("=> loading resume checkpoint '{}'".format(args.resume))
checkpoint = torch.load(args.resume, map_location='cpu')
epoch = checkpoint['epoch'] if 'epoch' in checkpoint else 0
args.start_epoch = epoch
result = model.load_state_dict(
checkpoint['state_dict'], strict=False)
print(result)
if args.momentum:
print("=> loading momentum encoder from '{}'".format(args.resume))
result = momentum_model.load_state_dict(
checkpoint['momentum_state_dict'], strict=False)
print(result)
optimizer.load_state_dict(checkpoint['optimizer']) if 'optimizer' in checkpoint else ()
scaler.load_state_dict(checkpoint['scaler']) if 'scaler' in checkpoint else ()
best_acc1 = checkpoint['best_acc1']
print("=> loaded resume checkpoint '{}' (epoch {})"
.format(args.resume, epoch))
del checkpoint
else:
print("=> no checkpoint found at '{}'".format(args.resume))
else:
# auto-resume from latest checkpoint in output directory
latest = os.path.join(args.output_dir, 'checkpoint.pt')
if os.path.isfile(latest):
print("=> loading latest checkpoint '{}'".format(latest))
latest_checkpoint = torch.load(latest, map_location='cpu')
args.start_epoch = latest_checkpoint['epoch']
model.load_state_dict(latest_checkpoint['state_dict'])
if args.momentum:
momentum_model.load_state_dict(latest_checkpoint['momentum_state_dict'])
optimizer.load_state_dict(latest_checkpoint['optimizer'])
scaler.load_state_dict(latest_checkpoint['scaler'])
best_acc1 = latest_checkpoint['best_acc1']
print("=> loaded latest checkpoint '{}' (epoch {})"
.format(latest, latest_checkpoint['epoch']))
del latest_checkpoint
cudnn.benchmark = True
# build tokenizer
tokenizer = SimpleTokenizer(
bpe_path=args.bpe_path,
text_augment=args.text_augment,
no_text_augment_prob=args.no_text_augment_prob,
remove_stopwords_prob=args.remove_stopwords_prob,
synonym_replacement_prob=args.synonym_replacement_prob,
random_swap_prob=args.random_swap_prob,
random_deletion_prob=args.random_deletion_prob,
clean_before_augment=args.clean_before_augment,
num_augs=args.num_augs,
)
# build datasets
print("=> creating paired datasets")
train_dataset = datasets.get_train_dataset(args, tokenizer, metadata=args.metadata)
val_dataset = datasets.get_val_dataset()
# dist eval resamples data to pad uneven batch sizes
# make sure num_samples = 0 mod num_gpus for exact acc
train_sampler = DistributedSampler(train_dataset) if args.distributed else None
val_sampler = DistributedSampler(val_dataset) if args.distributed else None
train_loader = DataLoader(
train_dataset,
batch_size=args.batch_size,
shuffle=(train_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=train_sampler,
drop_last=True
)
val_loader = DataLoader(
val_dataset,
batch_size=args.batch_size,
shuffle=(val_sampler is None),
num_workers=args.workers,
pin_memory=True,
sampler=val_sampler,
drop_last=False
)
# optionally also load unpaired data
if args.semi_paired:
print("=> creating unpaired dataset")
unpaired_dataset = datasets.get_train_dataset(
args,
tokenizer,
metadata=args.metadata_unpaired,
augs_only=False
)
unpaired_sampler = DistributedSampler(unpaired_dataset) if args.distributed else None
unpaired_loader = DataLoader(
unpaired_dataset,
batch_size=args.batch_size // args.unpaired_ratio,
shuffle=(unpaired_sampler is None),
num_workers=args.workers_unpaired,
pin_memory=True,
sampler=unpaired_sampler,
drop_last=True
)
unpaired_iterable = utils.cycle(unpaired_loader, unpaired_sampler)
if args.evaluate:
zero_stats = validate_zeroshot(val_loader, model, tokenizer, args)
if utils.is_main_process():
with open(os.path.join(args.output_dir, 'eval_log.txt'), 'a') as f:
f.write(json.dumps(zero_stats) + '\n')
return
lr_schedule = utils.cosine_scheduler(args.lr, args.lr_end, args.epochs,
len(train_loader) // args.update_freq, warmup_epochs=args.warmup_epochs, start_warmup_value=args.lr_start)
if utils.is_main_process() and args.output_dir != './':
with open(os.path.join(args.output_dir, 'command.txt'), 'w') as f:
f.write(' '.join(sys.argv))
json.dump(
vars(args),
open(os.path.join(args.output_dir, 'args.json'), 'w'),
default=lambda o: "<not serializable>",
indent=4
)
if args.wandb:
wandb_id = os.path.split(args.output_dir)[-1]
wandb.init(
project='clip-rocket',
id=wandb_id,
config=args,
resume='allow',
name=args.name,
save_code=True,
notes=' '.join(sys.argv),
mode='offline' if args.offline else 'online',
dir=args.output_dir
)
wandb.watch(model)
print(args)
print("=> beginning training")
for epoch in range(args.start_epoch, args.epochs):
if args.distributed:
train_sampler.set_epoch(epoch)
# train for one epoch
train_stats = train(
train_loader,
model, criterion,
optimizer,
scaler,
epoch,
lr_schedule,
args,
momentum_model if args.momentum else None,
unpaired_iterable if args.semi_paired else None
)
if (epoch + 1) % args.eval_freq != 0:
continue
val_stats = validate_zeroshot(val_loader, model, tokenizer, args)
acc1 = val_stats['acc1_z']
is_best = acc1 > best_acc1
best_acc1 = max(acc1, best_acc1)
print("=> saving checkpoint")
checkpoint_dict = {
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'optimizer' : optimizer.state_dict(),
'scaler': scaler.state_dict(),
'best_acc1': best_acc1,
'args': args,
}
if args.momentum:
checkpoint_dict['momentum_state_dict'] = momentum_model.state_dict()
utils.save_on_master(checkpoint_dict, is_best, args.output_dir)
log_stats = {**{f'train_{k}': v for k, v in train_stats.items()},
**{f'test_{k}': v for k, v in val_stats.items()},
'epoch': epoch}
if utils.is_main_process():
if args.wandb:
wandb.log(log_stats)
with open(os.path.join(args.output_dir, 'log.txt'), 'a') as f:
f.write(json.dumps(log_stats) + '\n')
if utils.is_main_process():
wandb.finish()
def train(
train_loader,
model,
criterion,
optimizer,
scaler,
epoch,
lr_schedule,
args,
momentum_model=None,
unpaired_iterable=None,
):
batch_time = AverageMeter('Time', ':6.2f')
data_time = AverageMeter('Data', ':6.2f')
mem = AverageMeter('Mem (GB)', ':6.1f')
metric_names = models.get_metric_names(args.model)
iters_per_epoch = len(train_loader) // args.update_freq
metrics = OrderedDict([(name, AverageMeter(name, ':.2e')) for name in metric_names])
progress = ProgressMeter(
iters_per_epoch,
[batch_time, data_time, mem, *metrics.values()],
prefix="Epoch: [{}]".format(epoch))
assert (momentum_model is not None) == args.momentum
# switch to train mode
model.train()
if args.momentum:
momentum_model.train()
end = time.time()
for data_iter, inputs in enumerate(train_loader):
optim_iter = data_iter // args.update_freq
# optionally load unpaired data
if args.semi_paired:
inputs_unpaired = next(unpaired_iterable)
inputs = [
torch.cat([inputs[0], inputs_unpaired[0]]),
inputs[1],
*[torch.cat([inputs[a+2], inputs_unpaired[a+2]])
for a in range(args.num_augs)]
]
# measure data loading time
data_time.update(time.time() - end)
# update weight decay and learning rate according to their schedule
it = iters_per_epoch * epoch + optim_iter # global training iteration
for k, param_group in enumerate(optimizer.param_groups):
param_group['lr'] = lr_schedule[it]
inputs = [t.cuda(args.gpu, non_blocking=True) for t in inputs]
# compute output
with amp.autocast(enabled=not args.disable_amp):
outputs = model(*inputs)
if args.momentum:
with torch.no_grad():
momentum_outputs = momentum_model(*inputs)
momentum_outputs = {k + '_momentum' : v for k, v in momentum_outputs.items()}
outputs = {**outputs, **momentum_outputs}
loss_dict = criterion(outputs)
loss = loss_dict['loss']
loss /= args.update_freq
if not math.isfinite(loss.item()):
torch.save(
{"inputs": utils.all_gather_batch(inputs), "outputs": utils.all_gather_batch(outputs), "losses": loss_dict, "state_dict": model.state_dict()},
os.path.join(args.output_dir, "dump_loss_nan.pgz")
)
print("Loss is {}, stopping training".format(loss.item()))
time.sleep(5)
sys.exit(1)
scaler.scale(loss).backward()
if (data_iter + 1) % args.update_freq != 0:
continue
if args.model.endswith('SWALIPV1') and it < args.swalip_freeze_proto_iters:
for name, p in model.named_parameters():
if "prototypes" in name:
p.grad = None
# compute gradient and do SGD step
scaler.step(optimizer)
scaler.update()
model.zero_grad(set_to_none=True)
# momentum update
if args.momentum:
with torch.no_grad():
m = args.momentum_tau
for p, p_mom in zip(
utils.get_model_parallel(model).parameters(),
utils.get_model_parallel(momentum_model).parameters()
):
p_mom.data.mul_(m).add_((1 - m) * p.detach().data)
# clamp logit scale to [0, 100]
utils.get_model_parallel(model).logit_scale.data.clamp_(0, 4.6052)
logit_scale = utils.get_model_parallel(model).logit_scale.exp().item()
utils.get_model_parallel(model).l2l_logit_scale.data.clamp_(0, 4.6052)
for k in loss_dict:
metrics[k].update(loss_dict[k].item(), args.batch_size)
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
mem.update(torch.cuda.max_memory_allocated() // 1e9)
if optim_iter % args.print_freq == 0:
if utils.is_main_process() and args.wandb:
wandb.log({**{k: v.item() for k, v in loss_dict.items()},
'scaler': scaler.get_scale(),
'logit': logit_scale})
progress.display(optim_iter)
progress.synchronize()
return {**{k: v.avg for k, v in metrics.items()},
'lr': optimizer.param_groups[0]['lr'],
'logit_scale': logit_scale}
def validate_zeroshot(val_loader, model, tokenizer, args):
batch_time = AverageMeter('Time', ':6.3f')
if args.model.startswith('SLIP') or args.model.startswith('CLIP'):
metrics = {
'acc1_z': AverageMeter('Acc@1_z', ':6.2f'),
'acc5_z': AverageMeter('Acc@5_z', ':6.2f')
}
else:
ensemble_weights = np.linspace(0.1, 0.9, 9).round(decimals=1)
metric_suffixes = ['z', 'h'] + [f'zh_{w}' for w in ensemble_weights]
metrics = {
f'acc{k}_{s}': AverageMeter(f'Acc@{k}_{s}', ':6.2f')
for s in metric_suffixes for k in [1, 5]
}
progress = ProgressMeter(
len(val_loader),
[batch_time, *metrics.values()],
prefix='Test: '
)
# switch to evaluate mode
model.eval()
print('=> encoding captions')
cwd = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(cwd, 'templates.json')) as f:
templates = json.load(f)['imagenet']
with open(os.path.join(cwd, 'labels.json')) as f:
labels = json.load(f)['imagenet']
with torch.no_grad():
text_features = defaultdict(list)
for l in labels:
texts = [t.format(l) for t in templates]
texts = tokenizer(texts).cuda(args.gpu, non_blocking=True)
class_embeddings = utils.get_model_parallel(model).encode_text_val(texts)
embed_names = {'z_text', 'h_text', 'p_text'}.intersection(class_embeddings.keys())
for embed_name in embed_names:
cls_embed = class_embeddings[embed_name]
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
cls_embed = cls_embed.mean(dim=0)
cls_embed = cls_embed / cls_embed.norm(dim=-1, keepdim=True)
text_features[embed_name].append(cls_embed)
text_features = {k: torch.stack(v, dim=0) for k, v in text_features.items()}
end = time.time()
for i, (images, target) in enumerate(val_loader):
images = images.cuda(args.gpu, non_blocking=True)
target = target.cuda(args.gpu, non_blocking=True)
# encode images
image_features = utils.get_model_parallel(model).encode_image_val(images)
# compute similarities
similarities = utils.get_model_parallel(model).predict_zeroshot(image_features, text_features)
# measure accuracy
for name, similarity in similarities.items():
acc1, acc5 = utils.accuracy(similarity, target, topk=(1, 5))
acc1, acc5 = utils.scaled_all_reduce([acc1, acc5])
metrics[f'acc1_{name[0]}'].update(acc1.item(), images.size(0))
metrics[f'acc5_{name[0]}'].update(acc5.item(), images.size(0))
if not (args.model.startswith('SLIP')) and not (args.model.startswith('CLIP')):
# ensemble accuracy
for w in ensemble_weights:
similarity = w * similarities['z_sim'] + (1-w) * similarities['h_sim']
acc1, acc5 = utils.accuracy(similarity, target, topk=(1, 5))
acc1, acc5 = utils.scaled_all_reduce([acc1, acc5])
metrics[f'acc1_zh_{w}'].update(acc1.item(), images.size(0))
metrics[f'acc5_zh_{w}'].update(acc5.item(), images.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
progress.display(i)
progress.synchronize()
print(f'0-shot * Acc@1 {metrics["acc1_z"].avg:.3f} Acc@5 {metrics["acc5_z"].avg:.3f}')
return {k: v.avg for k, v in metrics.items()}
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self, name, fmt=':f'):
self.name = name
self.fmt = fmt
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def synchronize(self):
if not utils.is_dist_avail_and_initialized():
return
t = torch.tensor([self.sum, self.count], dtype=torch.float64, device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.sum = int(t[0])
self.count = t[1]
self.avg = self.sum / self.count
def __str__(self):
fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
return fmtstr.format(**self.__dict__)
class ProgressMeter(object):
def __init__(self, num_batches, meters=None, prefix=""):
self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
self.meters = meters
self.prefix = prefix
def display(self, batch):
entries = [self.prefix + self.batch_fmtstr.format(batch)]
entries += [str(meter) for meter in self.meters]
print('\t'.join(entries))
def synchronize(self):
for meter in self.meters:
if meter.count == 0:
continue
meter.synchronize()
def _get_batch_fmtstr(self, num_batches):
num_digits = len(str(num_batches // 1))
fmt = '{:' + str(num_digits) + 'd}'
return '[' + fmt + '/' + fmt.format(num_batches) + ']'
if __name__ == '__main__':
parser = argparse.ArgumentParser('SLIP training and evaluation', parents=[get_args_parser()])
args = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
main(args)
|
clip-rocket-main
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from absl import app
from absl import flags
import cv2
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import pdb
import time
import numpy as np
import torch
import torch.backends.cudnn as cudnn
cudnn.benchmark = True
from nnutils.train_utils import v2s_trainer
opts = flags.FLAGS
def main(_):
torch.cuda.set_device(opts.local_rank)
world_size = opts.ngpu
torch.distributed.init_process_group(
'nccl',
init_method='env://',
world_size=world_size,
rank=opts.local_rank,
)
print('%d/%d'%(world_size,opts.local_rank))
torch.manual_seed(0)
torch.cuda.manual_seed(1)
torch.manual_seed(0)
trainer = v2s_trainer(opts)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
trainer.init_training()
trainer.train()
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
main.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from absl import flags, app
import sys
sys.path.insert(0,'third_party')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
opts = flags.FLAGS
def save_output(rendered_seq, aux_seq, seqname, save_flo):
save_dir = '%s/'%(opts.model_path.rsplit('/',1)[0])
length = len(aux_seq['mesh'])
mesh_rest = aux_seq['mesh_rest']
len_max = (mesh_rest.vertices.max(0) - mesh_rest.vertices.min(0)).max()
mesh_rest.export('%s/mesh-rest.obj'%save_dir)
if 'mesh_rest_skin' in aux_seq.keys():
aux_seq['mesh_rest_skin'].export('%s/mesh-rest-skin.obj'%save_dir)
if 'bone_rest' in aux_seq.keys():
bone_rest = aux_seq['bone_rest']
save_bones(bone_rest, len_max, '%s/bone-rest.obj'%save_dir)
flo_gt_vid = []
flo_p_vid = []
for i in range(length):
impath = aux_seq['impath'][i]
seqname = impath.split('/')[-2]
save_prefix = '%s/%s'%(save_dir,seqname)
idx = int(impath.split('/')[-1].split('.')[-2])
mesh = aux_seq['mesh'][i]
rtk = aux_seq['rtk'][i]
# convert bones to meshes TODO: warp with a function
if 'bone' in aux_seq.keys() and len(aux_seq['bone'])>0:
bones = aux_seq['bone'][i]
bone_path = '%s-bone-%05d.obj'%(save_prefix, idx)
save_bones(bones, len_max, bone_path)
mesh.export('%s-mesh-%05d.obj'%(save_prefix, idx))
np.savetxt('%s-cam-%05d.txt' %(save_prefix, idx), rtk)
img_gt = rendered_seq['img'][i]
flo_gt = rendered_seq['flo'][i]
mask_gt = rendered_seq['sil'][i][...,0]
flo_gt[mask_gt<=0] = 0
img_gt[mask_gt<=0] = 1
if save_flo: img_gt = cat_imgflo(img_gt, flo_gt)
else: img_gt*=255
cv2.imwrite('%s-img-gt-%05d.jpg'%(save_prefix, idx), img_gt[...,::-1])
flo_gt_vid.append(img_gt)
img_p = rendered_seq['img_coarse'][i]
flo_p = rendered_seq['flo_coarse'][i]
mask_gt = cv2.resize(mask_gt, flo_p.shape[:2][::-1]).astype(bool)
flo_p[mask_gt<=0] = 0
img_p[mask_gt<=0] = 1
if save_flo: img_p = cat_imgflo(img_p, flo_p)
else: img_p*=255
cv2.imwrite('%s-img-p-%05d.jpg'%(save_prefix, idx), img_p[...,::-1])
flo_p_vid.append(img_p)
flo_gt = cv2.resize(flo_gt, flo_p.shape[:2])
flo_err = np.linalg.norm( flo_p - flo_gt ,2,-1)
flo_err_med = np.median(flo_err[mask_gt])
flo_err[~mask_gt] = 0.
cv2.imwrite('%s-flo-err-%05d.jpg'%(save_prefix, idx),
128*flo_err/flo_err_med)
img_gt = rendered_seq['img'][i]
img_p = rendered_seq['img_coarse'][i]
img_gt = cv2.resize(img_gt, img_p.shape[:2][::-1])
img_err = np.power(img_gt - img_p,2).sum(-1)
img_err_med = np.median(img_err[mask_gt])
img_err[~mask_gt] = 0.
cv2.imwrite('%s-img-err-%05d.jpg'%(save_prefix, idx),
128*img_err/img_err_med)
# fps = 1./(5./len(flo_p_vid))
upsample_frame = min(30, len(flo_p_vid))
save_vid('%s-img-p' %(save_prefix), flo_p_vid, upsample_frame=upsample_frame)
save_vid('%s-img-gt' %(save_prefix),flo_gt_vid,upsample_frame=upsample_frame)
def transform_shape(mesh,rtk):
"""
(deprecated): absorb rt into mesh vertices,
"""
vertices = torch.Tensor(mesh.vertices)
Rmat = torch.Tensor(rtk[:3,:3])
Tmat = torch.Tensor(rtk[:3,3])
vertices = obj_to_cam(vertices, Rmat, Tmat)
rtk[:3,:3] = np.eye(3)
rtk[:3,3] = 0.
mesh = trimesh.Trimesh(vertices.numpy(), mesh.faces)
return mesh, rtk
def main(_):
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
trainer.define_model(data_info)
seqname=opts.seqname
dynamic_mesh = opts.flowbw or opts.lbs
idx_render = str_to_frame(opts.test_frames, data_info)
# idx_render[0] += 50
# idx_render[0] += 374
# idx_render[0] += 292
# idx_render[0] += 10
# idx_render[0] += 340
# idx_render[0] += 440
# idx_render[0] += 540
# idx_render[0] += 640
# idx_render[0] += trainer.model.data_offset[4]-4 + 37
# idx_render[0] += 36
trainer.model.img_size = opts.render_size
chunk = opts.frame_chunk
for i in range(0, len(idx_render), chunk):
rendered_seq, aux_seq = trainer.eval(idx_render=idx_render[i:i+chunk],
dynamic_mesh=dynamic_mesh)
rendered_seq = tensor2array(rendered_seq)
save_output(rendered_seq, aux_seq, seqname, save_flo=opts.use_corresp)
#TODO merge the outputs
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
extract.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2
import glob
import numpy as np
import pdb
import os
import shutil
import detectron2
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
coco_metadata = MetadataCatalog.get("coco_2017_val")
import torch
import torch.nn.functional as F
import torchvision
import sys
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
try:
detbase='./third_party/detectron2/'
sys.path.insert(0,'%s/projects/PointRend/'%detbase)
import point_rend
except:
detbase='./third_party/detectron2_old/'
sys.path.insert(0,'%s/projects/PointRend/'%detbase)
import point_rend
sys.path.insert(0,'third_party/ext_utils')
from utils.io import save_vid
from util_flow import write_pfm
seqname=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
datadir='tmp/%s/images/'%seqname
odir='database/DAVIS/'
imgdir= '%s/JPEGImages/Full-Resolution/%s'%(odir,seqname)
maskdir='%s/Annotations/Full-Resolution/%s'%(odir,seqname)
#if os.path.exists(imgdir): shutil.rmtree(imgdir)
#if os.path.exists(maskdir): shutil.rmtree(maskdir)
#os.mkdir(imgdir)
#os.mkdir(maskdir)
cfg = get_cfg()
point_rend.add_pointrend_config(cfg)
cfg.merge_from_file('%s/projects/PointRend/configs/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco.yaml'%(detbase))
cfg.MODEL.WEIGHTS ='https://dl.fbaipublicfiles.com/detectron2/PointRend/InstanceSegmentation/pointrend_rcnn_X_101_32x8d_FPN_3x_coco/28119989/model_final_ba17b9.pkl'
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST=0.9
predictor = DefaultPredictor(cfg)
counter=0
frames = []
for i,path in enumerate(sorted(glob.glob('%s/*'%datadir))):
print(path)
img = cv2.imread(path)
h,w = img.shape[:2]
# store at most 1080p videos
scale = np.sqrt(1920*1080/(h*w))
if scale<1:
img = cv2.resize(img, (int(w*scale), int(h*scale)) )
h,w = img.shape[:2]
# resize to some empirical size
if h>w: h_rszd,w_rszd = 1333, 1333*w//h
else: h_rszd,w_rszd = 1333*h//w, 1333
img_rszd = cv2.resize(img,(w_rszd,h_rszd))
# pad borders to make sure detection works when obj is out-of-frame
pad=100
img_rszd = cv2.copyMakeBorder(img_rszd,pad,pad,pad,pad,cv2.BORDER_REPLICATE)
# pointrend
outputs = predictor(img_rszd)
outputs = outputs['instances'].to('cpu')
mask_rszd = np.zeros((h_rszd+pad*2,w_rszd+pad*2))
for it,ins_cls in enumerate(outputs.pred_classes):
print(ins_cls)
#if ins_cls ==15: # cat
#if ins_cls==0 or (ins_cls >= 14 and ins_cls <= 23):
if ishuman=='y':
if ins_cls ==0:
mask_rszd += np.asarray(outputs.pred_masks[it])
else:
if ins_cls >= 14 and ins_cls <= 23:
mask_rszd += np.asarray(outputs.pred_masks[it])
nb_components, output, stats, centroids = \
cv2.connectedComponentsWithStats(mask_rszd.astype(np.uint8), connectivity=8)
if nb_components>1:
max_label, max_size = max([(i, stats[i, cv2.CC_STAT_AREA]) for i in range(1, nb_components)], key=lambda x: x[1])
mask_rszd = output == max_label
mask_rszd = mask_rszd.astype(bool).astype(int)
if (mask_rszd.sum())<1000: continue
mask_rszd = mask_rszd [pad:-pad,pad:-pad]
img_rszd = img_rszd [pad:-pad,pad:-pad]
outputs.pred_masks=outputs.pred_masks[:,pad:-pad,pad:-pad]
outputs.pred_boxes.tensor[:,0:2] -= pad
outputs.pred_boxes.tensor[:,2:4] -= 2*pad
mask_rszd = np.concatenate([mask_rszd[:,:,np.newaxis]* 128,
np.zeros((h_rszd, w_rszd, 1)),
np.zeros((h_rszd, w_rszd, 1))],-1)
mask = cv2.resize(mask_rszd,(w,h))
cv2.imwrite('%s/%05d.jpg'%(imgdir,counter), img)
cv2.imwrite('%s/%05d.png'%(maskdir,counter), mask)
# vis
v = Visualizer(img_rszd, coco_metadata, scale=1, instance_mode=ColorMode.IMAGE_BW)
#outputs.remove('pred_masks')
vis = v.draw_instance_predictions(outputs)
vis = vis.get_image()
cv2.imwrite('%s/vis-%05d.jpg'%(maskdir,counter), vis)
counter+=1
frames.append(vis[:,:,::-1])
save_vid('%s/vis'%maskdir, frames, suffix='.mp4')
save_vid('%s/vis'%maskdir, frames, suffix='.gif')
|
banmo-main
|
preprocess/mask.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
"""
python img2lines.py --seqname xx
"""
from absl import flags, app
import sys
sys.path.insert(0,'third_party')
sys.path.insert(0,'./')
import numpy as np
import torch
import os
import glob
import pdb
import cv2
import trimesh
from scipy.spatial.transform import Rotation as R
import imageio
from utils.io import save_vid, str_to_frame, save_bones
from utils.colors import label_colormap
from nnutils.train_utils import v2s_trainer
from nnutils.geom_utils import obj_to_cam, tensor2array, vec_to_sim3, obj_to_cam
from utils.io import mkdir_p
from ext_utils.util_flow import write_pfm
from ext_utils.flowlib import cat_imgflo
from utils.io import config_to_dataloader
from torch.utils.data import DataLoader
from nnutils.geom_utils import tensor2array
opts = flags.FLAGS
def dict2pix(dict_array, idy):
dict_px = {}
dict_px['img'] = dict_array['img'][...,idy,:]
dict_px['mask'] = dict_array['mask'][...,idy,:]
dict_px['vis2d'] = dict_array['vis2d'][...,idy,:]
dict_px['flow'] = dict_array['flow'][...,idy,:]
dict_px['occ'] = dict_array['occ'][...,idy,:]
dict_px['dp'] = dict_array['dp'][...,idy,:]
dict_px['dp_feat_rsmp'] = dict_array['dp_feat_rsmp'][...,idy,:]
return dict_px
def dict2rtk(dict_array):
dict_out = {}
dict_out['rtk'] = dict_array['rtk']
dict_out['kaug'] = dict_array['kaug']
return dict_out
def main(_):
seqname=opts.seqname
trainer = v2s_trainer(opts, is_eval=True)
data_info = trainer.init_dataset()
impaths = data_info['impath']
data_offset = data_info['offset']
opts_dict = {}
opts_dict['seqname'] = opts.seqname
opts_dict['img_size'] = opts.img_size
opts_dict['rtk_path'] = opts.rtk_path
opts_dict['batch_size'] = 1
opts_dict['ngpu'] = 1
opts_dict['preload'] = False
opts_dict['dframe'] = [1,2,4,8,16,32]
dataset = config_to_dataloader(opts_dict,is_eval=True)
#dataset = config_to_dataloader(opts_dict,is_eval=False)
dataset = DataLoader(dataset,
batch_size= 1, num_workers=0, drop_last=False,
pin_memory=True, shuffle=False)
for dat in dataset.dataset.datasets:
dat.spec_dt = 1
#TODO
#overwrite=False
overwrite=True
# hardcoded path
base_path = 'database/DAVIS/Pixels/Full-Resolution/'
for i, batch in enumerate(dataset):
frameid = batch['frameid']
dataid = batch['dataid']
dt = frameid[0,1] - frameid[0,0]
frameid = frameid + data_offset[dataid[0,0].long()]
if dt<0: continue # only save forward pair (bachward pair is equivalent)
impath = impaths[frameid.long()[0,0]]
seqname_sub = impath.split('/')[-2]
frameid_sub = impath.split('/')[-1].split('.')[0]
save_dir = '%s/%s'%(base_path, seqname_sub)
save_dir_t = '%s/%d_%s'%(save_dir, dt, frameid_sub)
print(save_dir_t)
if (not overwrite) and os.path.exists(save_dir_t):
continue
mkdir_p(save_dir_t)
dict_array = tensor2array(batch)
# save each pixel: 00_00000/0000.npy # t,h
dict_rtk = dict2rtk(dict_array)
save_path_rtk = '%s/rtk.npy'%(save_dir_t)
np.save(save_path_rtk, dict_rtk)
for idy in range(opts.img_size):
save_path = '%s/%04d.npy'%(save_dir_t, idy)
dict_px = dict2pix(dict_array, idy)
np.save(save_path, dict_px)
if __name__ == '__main__':
app.run(main)
|
banmo-main
|
preprocess/img2lines.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import configparser
import cv2
import glob
import pdb
import sys
seqname_pre=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
silroot='database/DAVIS/Annotations/Full-Resolution/'
config = configparser.ConfigParser()
config['data'] = {
'dframe': '1',
'init_frame': '0',
'end_frame': '-1',
'can_frame': '-1'}
seqname_all = sorted(glob.glob('%s/%s[0-9][0-9][0-9]'%(silroot, seqname_pre)))
total_vid = 0
for i,seqname in enumerate(seqname_all):
seqname = seqname.split('/')[-1]
img = cv2.imread('%s/%s/00000.png'%(silroot,seqname),0)
if img is None:continue
num_fr = len(glob.glob('%s/%s/*.png'%(silroot,seqname)))
if num_fr < 16:continue
fl = max(img.shape)
px = img.shape[1]//2
py = img.shape[0]//2
camtxt = [fl,fl,px,py]
config['data_%d'%total_vid] = {
'ishuman': ishuman,
'ks': ' '.join( [str(i) for i in camtxt] ),
'datapath': 'database/DAVIS/JPEGImages/Full-Resolution/%s/'%seqname,
}
total_vid += 1
with open('configs/%s.config'%(seqname_pre), 'w') as configfile:
config.write(configfile)
|
banmo-main
|
preprocess/write_config.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2
import glob
import numpy as np
import pdb
import os
import shutil
import detectron2
from detectron2.config import get_cfg
from detectron2.engine import DefaultPredictor
from detectron2.utils.visualizer import Visualizer, ColorMode
from detectron2.data import MetadataCatalog
coco_metadata = MetadataCatalog.get("coco_2017_val")
import torch
import torch.nn.functional as F
import torchvision
import sys
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0,curr_dir)
try:
detbase='./third_party/detectron2/'
sys.path.insert(0,'%s/projects/DensePose/'%detbase)
from utils.cselib import create_cse, run_cse
except:
detbase='./third_party/detectron2_old/'
sys.path.insert(0,'%s/projects/DensePose/'%detbase)
from utils.cselib import create_cse, run_cse
sys.path.insert(0,'third_party/ext_utils')
from utils.io import save_vid, visObj
from util_flow import write_pfm
seqname=sys.argv[1]
ishuman=sys.argv[2] # 'y/n'
odir='database/DAVIS/'
imgdir= '%s/JPEGImages/Full-Resolution/%s'%(odir,seqname)
maskdir='%s/Annotations/Full-Resolution/%s'%(odir,seqname)
dpdir='%s/Densepose/Full-Resolution/%s'%(odir,seqname)
if os.path.exists(dpdir): shutil.rmtree(dpdir)
os.mkdir(dpdir)
if ishuman=='y':
#human
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
mesh_name = 'smpl_27554'
elif ishuman=='n':
#quadrupeds
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
mesh_name = 'sheep_5004'
else:
print('y/n, exiting')
exit()
predictor_dp, embedder, mesh_vertex_embeddings = create_cse(config_path,
weight_path)
counter=0
frames = []
for i,path in enumerate(sorted(glob.glob('%s/*'%imgdir))):
print(path)
img = cv2.imread(path)
msk = cv2.imread(path.replace('JPEGImages', 'Annotations').replace('.jpg', '.png'),0)
h,w = img.shape[:2]
# recompte mask
msk = msk/np.sort(np.unique(msk))[1]
occluder = msk==255
msk[occluder] = 0
# resize to some empirical size
if h>w: h_rszd,w_rszd = 1333, 1333*w//h
else: h_rszd,w_rszd = 1333*h//w, 1333
img_rszd = cv2.resize(img,(w_rszd,h_rszd))
msk_rszd = cv2.resize(msk,(w_rszd,h_rszd))
# densepose
clst_verts, image_bgr1, embedding, embedding_norm, bbox = run_cse(
predictor_dp, embedder,
mesh_vertex_embeddings,
img_rszd, msk_rszd,
mesh_name=mesh_name)
# resize to original size
bbox[0] *= w / clst_verts.shape[1]
bbox[2] *= w / clst_verts.shape[1]
bbox[1] *= h / clst_verts.shape[0]
bbox[3] *= h / clst_verts.shape[0]
np.savetxt( '%s/bbox-%05d.txt'%(dpdir,counter) , bbox)
clst_verts = cv2.resize(clst_verts, (w,h), interpolation=cv2.INTER_NEAREST)
# assume max 10k/200 max
clst_verts = (clst_verts/50.).astype(np.float32)
write_pfm( '%s/%05d.pfm'%(dpdir,counter), clst_verts)
embedding_norm = cv2.resize(embedding_norm, (w,h))
write_pfm( '%s/norm-%05d.pfm'%(dpdir,counter), embedding_norm)
embedding = embedding.reshape((-1,embedding.shape[-1]))
write_pfm( '%s/feat-%05d.pfm'%(dpdir,counter), embedding)
# vis
#v = Visualizer(img_rszd, coco_metadata, scale=1, instance_mode=ColorMode.IMAGE_BW)
#outvis = visObj()
#outvis.image_height = h
#outvis.image_width = w
#outvis._fields = {}
#outvis._fields["pred_boxes"] = np.asarray([[0,0,h,w,1.]])
#vis = v.draw_instance_predictions(outvis)
#vis = vis.get_image()
vis=img_rszd
alpha_mask = 0.8*(msk_rszd>0)[...,None]
mask_result = vis*(1-alpha_mask) + image_bgr1 * alpha_mask
cv2.imwrite('%s/vis-%05d.jpg'%(dpdir,counter), mask_result)
counter+=1
frames.append(mask_result[:,:,::-1])
save_vid('%s/vis'%dpdir, frames, suffix='.mp4')
save_vid('%s/vis'%dpdir, frames, suffix='.gif')
|
banmo-main
|
preprocess/compute_dp.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import os
import errno
from typing import Any, Dict, List, Tuple, Union
import cv2
import pdb
import configparser
import torch
import numpy as np
import imageio
import trimesh
import glob
import matplotlib.cm
import torch.nn.functional as F
from scipy.spatial.transform import Rotation as R
from torch.utils.data import Dataset
import sys
sys.path.insert(0,'third_party')
import dataloader.vidbase as base_data
from ext_utils.flowlib import flow_to_image
from utils.colors import label_colormap
def draw_lines(img, xy1s, xy2s):
device = img.device
colormap = label_colormap()
len_colormap = colormap.shape[0]
img = img.permute(1,2,0).cpu().numpy()*255
img = img.astype(np.uint8)[:,:,::-1].copy()
for i in range(len(xy1s)):
color = tuple([int(x) for x in colormap[i%len_colormap]])
p1 = tuple(xy1s[i].detach().cpu().numpy())
p2 = tuple(xy2s[i].detach().cpu().numpy())
cv2.circle(img,p1,3, color)
cv2.circle(img,p2,3, color)
cv2.line(img, p1, p2, color, thickness=1)
#pdb.set_trace()
#cv2.imwrite('tmp/0.png', img)
#img = torch.Tensor(img).to(device).permute(2,0,1)[None]
return img
def draw_pts(img, xys):
device = img.device
img = img.permute(1,2,0).cpu().numpy()*255
img = img.astype(np.uint8)[:,:,::-1].copy()
for point in xys:
point = point.detach().cpu().numpy()
cv2.circle(img,tuple(point),1,(0,0,255))
#pdb.set_trace()
#cv2.imwrite('tmp/0.png', img)
#img = torch.Tensor(img).to(device).permute(2,0,1)[None]
return img
def save_bones(bones, len_max, path):
B = len(bones)
elips_list = []
elips = trimesh.creation.uv_sphere(radius=len_max/20,count=[16, 16])
# remove identical vertices
elips = trimesh.Trimesh(vertices=elips.vertices, faces=elips.faces)
N_elips = len(elips.vertices)
for bone in bones:
center = bone[None,:3]
orient = bone[3:7] # real first
orient = orient / np.linalg.norm(orient, 2,-1)
orient = orient[[1,2,3,0]]
orient = R.from_quat(orient).as_matrix() # real first
orient = orient.T # transpose R
scale = np.exp(bone[None, 7:10])
elips_verts = elips.vertices
elips_verts = elips_verts / scale
elips_verts = elips_verts.dot(orient)
elips_verts = elips_verts+center
elips_list.append( trimesh.Trimesh(vertices = elips_verts,
faces=elips.faces) )
elips = trimesh.util.concatenate(elips_list)
colormap = label_colormap()[:B]
colormap= np.tile(colormap[:,None], (1,N_elips,1)).reshape((-1,3))
elips.visual.vertex_colors[:len(colormap),:3] = colormap
elips.export(path)
def vis_match(results, masks, imgs, bs,img_size,ndepth):
# show error images
bs = imgs.shape[0]
for i in range(bs):
mask_rszd = F.interpolate(masks[None],(img_size,img_size))[0,i].bool()
img_rszd = F.interpolate(imgs ,(img_size,img_size))[i].permute(1,2,0)
img_mskd = img_rszd[mask_rszd].cpu().numpy()
if 'feat_err' in results.keys():
feat_errs = results['feat_err']
feat_err = feat_errs[i].view(img_size,img_size)
feat_err[~mask_rszd] = 0.
med = feat_err[mask_rszd].median()
print('%d-median:%f' %(i,med))
cv2.imwrite('tmp/match_err-%d.png'%i, (feat_err/med).cpu().numpy()*128)
# draw lines
if 'xyz_camera_vis' in results.keys() and 'pts_exp_vis' in results.keys():
mask_rszd = F.interpolate(masks[None],(img_size,img_size))[0,0].bool()
img_rszd = F.interpolate(imgs ,(img_size,img_size))[0].permute(1,2,0)
xyz_coarse_frame = results['xyz_camera_vis']
color_plane = torch.stack([img_rszd, torch.ones_like(img_rszd)],0).view(-1,3)
color_plane = color_plane.cpu().numpy()
near_plane= xyz_coarse_frame.view(bs,-1,ndepth,3)[0,:,0]
d_near = near_plane[:,2].mean()
near_plane[...,-1] -= d_near*0.01
far_plane = xyz_coarse_frame.view(bs,-1,ndepth,3)[0,:,-1]
nf_plane = torch.cat([near_plane, far_plane],0)
#trimesh.Trimesh(nf_plane.cpu().numpy(), vertex_colors=color_plane).\
trimesh.Trimesh(near_plane.cpu().numpy(), vertex_colors=img_rszd.view(-1,3).cpu().numpy()).\
export('tmp/match_plane.obj')
near_plane_mskd = near_plane[mask_rszd.view(-1)].cpu()
pts_pred = results['pts_pred_vis']
pts_pred = pts_pred[0].view(img_size,img_size,3)[mask_rszd].cpu().numpy()
draw_lines_ray_canonical(near_plane_mskd, pts_pred,img_mskd,
'tmp/match_line_pred.obj')
pts_exp = results['pts_exp_vis']
pts_exp = pts_exp[0].view(img_size,img_size,3)[mask_rszd].cpu().numpy()
draw_lines_ray_canonical(pts_pred, pts_exp,img_mskd,
'tmp/match_line_exp.obj')
#pts_pred_col=results['pts_pred'][0][mask_rszd].cpu().numpy()
#pts_exp_col = results['pts_exp'][0][mask_rszd].cpu().numpy()
#trimesh.Trimesh(pts_pred, vertex_colors=img_mskd).export('tmp/viser_pred.obj')
#trimesh.Trimesh(pts_exp ,vertex_colors=img_mskd).export('tmp/viser_exp.obj')
def draw_lines_ray_canonical(near_plane_mskd, pts_exp, img_mskd, path):
colormap = label_colormap()
len_color = len(colormap)
meshes = []
idx=0
num_pts = len(near_plane_mskd)
for i in range(0,num_pts, num_pts//50): # display 50 points
## only plot idx=5
#if idx!=5:
# idx+=1
# continue
segment = np.stack([near_plane_mskd[i], pts_exp[i]])
line = trimesh.creation.cylinder(0.0001,
segment=segment,sections=5, vertex_colors=colormap[idx%len_color])
meshes.append(line)
idx+=1
meshes = trimesh.util.concatenate(meshes)
meshes.export(path)
def merge_dict(dict_list):
out_dict = {}
for k in dict_list[0].keys():
out_dict[k] = []
for i in range(len(dict_list)):
for k in out_dict.keys():
out_dict[k] += dict_list[i][k]
return out_dict
def render_root_txt(cam_dir, cap_frame):
# read all the data
camlist = load_root(cam_dir, cap_frame)
# construct camera mesh
mesh = draw_cams(camlist)
save_dir,seqname=cam_dir.rsplit('/',1)
mesh.export('%s/mesh-%s.obj'%(save_dir, seqname))
def load_sils(root_dir, cap_frame):
"""
load all the imgs with
input is ...-(00000.png)
"""
imglist = []
img_path = '%s*.png'%(root_dir)
#img_path = '%s0*.png'%(root_dir)
all_path = sorted(glob.glob(img_path))
if cap_frame>0:
all_path = all_path[:cap_frame]
for idx,path in enumerate(all_path):
img = cv2.imread(path,0)
imglist.append(img)
imglist = np.asarray(imglist)
return imglist
def load_root(root_dir, cap_frame):
"""
load all the root se(3)
input is ...-(00000.txt)
"""
camlist = []
#cam_path = '%s0*.txt'%(root_dir)
cam_path = '%s*.txt'%(root_dir)
all_path = sorted(glob.glob(cam_path))
if cap_frame>0:
all_path = all_path[:cap_frame]
for idx,path in enumerate(all_path):
rtk = np.loadtxt(path)
camlist.append(rtk)
camlist = np.asarray(camlist)
return camlist
def draw_cams(all_cam, color='cool', axis=True,
color_list = None):
"""
all_cam: a list of 4x4 cameras
"""
# scale: the scene bound
cmap = matplotlib.cm.get_cmap(color)
all_cam = np.asarray(all_cam)
trans_norm = np.linalg.norm(all_cam[:,:3,3],2,-1)
valid_cams = trans_norm>0
trans_max = np.median(trans_norm[valid_cams])
scale=trans_max
traj_len = len(all_cam)
cam_list = []
if color_list is None:
color_list = np.asarray(range(traj_len))/float(traj_len)
for j in range(traj_len):
cam_rot = all_cam[j][:3,:3].T
cam_tran = -cam_rot.dot(all_cam[j][:3,3:])[:,0]
radius = 0.02*scale
cam = trimesh.creation.uv_sphere(radius=radius,count=[2, 2])
if axis:
#TODO draw axis
extents = np.asarray([radius*20, radius*10, radius*0.1])
axis = trimesh.creation.axis(origin_size = radius,
origin_color = cmap(color_list[j]),
axis_radius = radius* 0.1,
axis_length = radius*5)
#extents=extents)
#axis.vertices[:,2] += radius * 5
#cam = trimesh.util.concatenate([elips, axis])
cam = axis
#cam.vertices = cam.vertices + cam_tran
cam.vertices = cam.vertices.dot(cam_rot.T) + cam_tran
#cam.visual.vertex_colors = cmap(float(j)/traj_len)
cam_list.append(cam)
mesh_cam = trimesh.util.concatenate(cam_list)
return mesh_cam
def draw_cams_pair(cam1,cam2, color='cool', axis=True,
color_list = None):
frame_num = cam1.shape[0]
cam_mesh1 = draw_cams(cam1, color=color,axis=axis,color_list=color_list)
cam_mesh2 = draw_cams(cam2, color=color,axis=axis,color_list=color_list)
# draw line
lines = []
for i in range(frame_num):
cam1_c = -cam1[i,:3,:3].T.dot(cam1[i,:3,3:])[:,0]
cam2_c = -cam2[i,:3,:3].T.dot(cam2[i,:3,3:])[:,0]
segment = np.stack([cam1_c, cam2_c])
line = trimesh.creation.cylinder(0.001,segment=segment,sections=5)
lines.append(line)
lines = trimesh.util.concatenate(lines)
return cam_mesh1, cam_mesh2, lines
def save_vid(outpath, frames, suffix='.gif',upsample_frame=150., fps=10,
is_flow=False):
"""
save frames to video
frames: n,h,w,1 or n.
"""
# convert to 150 frames
if upsample_frame<1: upsample_frame = len(frames)
frame_150=[]
for i in range(int(upsample_frame)):
fid = int(i/upsample_frame*len(frames))
frame = frames[fid]
if is_flow:
frame = flow_to_image(frame)
if frame.max()<=1:
frame=frame*255
frame = frame.astype(np.uint8)
if suffix=='.gif':
h,w=frame.shape[:2]
fxy = np.sqrt(4e5/(h*w))
frame = cv2.resize(frame,None,fx=fxy, fy=fxy)
frame_150.append(frame)
imageio.mimsave('%s%s'%(outpath,suffix), frame_150, fps=fps)
class visObj(object):
"""
a class for detectron2 vis
"""
def has(self, name: str) -> bool:
return name in self._fields
def __getattr__(self, name: str) -> Any:
if name == "_fields" or name not in self._fields:
raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
return self._fields[name]
def config_to_dataloader(opts, is_eval=False):
"""
from a dict of options {seqname, batch_size, ngpu} to a pytorch dataloader
"""
config = configparser.RawConfigParser()
config.read('configs/%s.config'%opts['seqname'])
numvid = len(config.sections())-1
datalist = []
for i in range(numvid):
dataset = get_config_info(opts, config, 'data_%d'%i, i, is_eval=is_eval)
datalist = datalist + dataset
dataset = torch.utils.data.ConcatDataset(datalist)
return dataset
def get_config_info(opts, config, name, dataid, is_eval=False):
def load_attr(attrs, config, dataname):
try:attrs['datapath'] = '%s'%(str(config.get(dataname, 'datapath')))
except:pass
try:attrs['dframe'] = [int(i) for i in config.get(dataname, 'dframe').split(',')]
except:pass
try:attrs['can_frame']= int(config.get(dataname, 'can_frame'))
except:pass
try:attrs['init_frame']=int(config.get(dataname, 'init_frame'))
except:pass
try:attrs['end_frame'] =int(config.get(dataname, 'end_frame'))
except:pass
try:attrs['rtk_path'] =config.get(dataname, 'rtk_path')
except:pass
return
attrs={}
attrs['rtk_path'] = None
load_attr(attrs, config, 'data')
load_attr(attrs, config, name)
datapath = attrs['datapath']
if 'dframe' in opts.keys():
dframe = opts['dframe'] # only in preload
else:
dframe = attrs['dframe']
can_frame =attrs['can_frame']
init_frame=attrs['init_frame']
end_frame= attrs['end_frame']
rtk_path=opts['rtk_path']
numvid = len(config.sections())-1
if numvid==1 and not config.has_option(name, 'datapath'):
datapath='%s/%s'%(datapath, opts['seqname'])
# opts rtk_path
if rtk_path =='':
# rtk path from config
rtk_path= attrs['rtk_path']
elif not os.path.isfile('%s-00000.txt'%rtk_path):
print('loading cameras from init-cam')
rtk_path = '%s/%s'%(rtk_path, datapath.strip('/').split('/')[-1])
imglist = sorted(glob.glob('%s/*'%datapath))
try: flip=int(config.get(name, 'flip'))
except: flip=0
if end_frame >0:
imglist = imglist[:end_frame]
print('init:%d, end:%d'%(init_frame, end_frame))
# load dataset
datasets = []
for df in dframe:
if 'lineload' in opts.keys() and opts['lineload']:
# per-line loader
#TODO
dataset= LineDataset(opts, imglist = imglist, can_frame = can_frame,
dframe=df, init_frame=init_frame,
dataid=dataid, numvid=numvid, flip=flip, is_eval=is_eval,
rtk_path=rtk_path)
else:
# per-image loader
try:
dataset = VidDataset(opts, imglist = imglist, can_frame = can_frame,
dframe=df, init_frame=init_frame,
dataid=dataid, numvid=numvid, flip=flip, is_eval=is_eval,
rtk_path=rtk_path)
except: continue
if rtk_path is None:
dataset.has_prior_cam = False
else:
dataset.has_prior_cam = True
# whether to use preloaded data
if 'preload' in opts.keys():
dataset.preload = opts['preload']
else:
dataset.preload = False
if 'multiply' in opts.keys():
# duplicate such that it goes more than 200 iters
dup_num = 200/(len(dataset)/opts['ngpu']/opts['batch_size'])
if 'accu_steps' in opts.keys():
dup_num = dup_num*opts['accu_steps']
dup_num = int(dup_num)+1
for i in range(dup_num):
datasets.append(dataset)
else:
datasets.append(dataset)
return datasets
class LineDataset(Dataset):
'''
'''
def __init__(self, opts, filter_key=None, imglist=None, can_frame=0,
dframe=1,init_frame=0, dataid=0, numvid=1, flip=0,
is_eval=False, rtk_path=None):
super(LineDataset, self).__init__()
self.crop_factor = 1.2
self.imglist = imglist
self.img_size = opts['img_size']
self.num_lines = (len(imglist)-1) * self.img_size # last img not saved
seqname = imglist[0].split('/')[-2]
if rtk_path is not None:
self.rtklist =['%s-%05d.txt'%(rtk_path, i) for i in range(len(self.imglist))]
else:
self.rtklist =[i.replace('JPEGImages', 'Cameras').replace('.jpg', '.txt') for i in self.imglist]
# Load the annotation file.
self.dataid = dataid
print('%d lines' % self.num_lines)
def __len__(self):
return self.num_lines
def __getitem__(self, index):
try:dataid = self.dataid
except: dataid=0
#TODO lolalize file
idt = index // self.img_size# idt, idy
idy = index % self.img_size# idt, idy
save_dir = self.imglist[0].replace('JPEGImages', 'Pixels').rsplit('/',1)[0]
dframe_list = [2,4,8,16,32]
max_id = len(self.imglist)-1
dframe_list = [1] + [i for i in dframe_list if (idt%i==0) and \
int(idt+i) <= max_id]
dframe = np.random.choice(dframe_list)
data_path = '%s/%d_%05d/%04d.npy'%(save_dir, dframe, idt, idy)
elem = np.load(data_path,allow_pickle=True).item()
# modify dataid according to training time ones
# reload rtk based on rtk predictions
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
# always forward flow
idtn = idt + dframe
try:
rtk_path = self.rtklist[idt]
rtk = np.loadtxt(rtk_path)
rtkn_path = self.rtklist[idtn]
rtkn = np.loadtxt(rtkn_path)
rtk = np.stack([rtk, rtkn])
except:
print('warning: loading empty camera')
print(rtk_path)
rtk = np.zeros((4,4))
rtk[:3,:3] = np.eye(3)
rtk[:3, 3] = np.asarray([0,0,10])
rtk[3, :] = np.asarray([512,512,256,256])
rtkn = rtk.copy()
rtk = np.stack([rtk, rtkn])
kaug_path = '%s/%d_%05d/rtk.npy'%(save_dir, dframe, idt)
kaug = np.load(kaug_path,allow_pickle=True).item()['kaug']
#TODO fill elems
elem['rtk'] = rtk[None] # 1,2,x
elem['kaug'] = kaug
elem['dataid'] = np.stack([dataid, dataid])[None]
elem['frameid'] = np.stack([idt, idtn])[None]
elem['lineid'] = np.stack([idy, idy])[None]
return elem
class VidDataset(base_data.BaseDataset):
'''
'''
def __init__(self, opts, filter_key=None, imglist=None, can_frame=0,
dframe=1,init_frame=0, dataid=0, numvid=1, flip=0,
is_eval=False, rtk_path=None):
super(VidDataset, self).__init__(opts, filter_key=filter_key)
self.flip=flip
self.imglist = imglist
self.can_frame = can_frame
self.dframe = dframe
seqname = imglist[0].split('/')[-2]
self.masklist = [i.replace('JPEGImages', 'Annotations').replace('.jpg', '.png') for i in self.imglist]
self.camlist = [i.replace('JPEGImages', 'Camera').replace('.jpg', '.txt') for i in self.imglist]
if dframe==1:
self.flowfwlist = [i.replace('JPEGImages', 'FlowFW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%seqname) for i in self.imglist]
self.flowbwlist = [i.replace('JPEGImages', 'FlowBW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%seqname) for i in self.imglist]
else:
self.flowfwlist = [i.replace('JPEGImages', 'FlowFW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%(seqname)) for i in self.imglist]
self.flowbwlist = [i.replace('JPEGImages', 'FlowBW').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/flo-'%(seqname)) for i in self.imglist]
self.featlist = [i.replace('JPEGImages', 'Densepose').replace('.jpg', '.pfm').replace('.png', '.pfm').replace('%s/'%seqname, '%s/feat-'%seqname) for i in self.imglist]
self.featlist = ['%s/feat-%05d.pfm'%(i.rsplit('/',1)[0], int(i.split('feat-')[-1].split('.pfm')[0])) for i in self.featlist]
self.bboxlist = ['%s/bbox-%05d.txt'%(i.rsplit('/',1)[0], int(i.split('feat-')[-1].split('.pfm')[0])) for i in self.featlist]
self.kplist = [i.replace('JPEGImages', 'KP').replace('.jpg', '_keypoints.json').replace('.png', '_keypoints.json') for i in self.imglist]
self.dplist = [i.replace('JPEGImages', 'Densepose').replace('.jpg', '.pfm').replace('.png', '.pfm') for i in self.imglist]
if rtk_path is not None:
self.rtklist =['%s-%05d.txt'%(rtk_path, i) for i in range(len(self.imglist))]
else:
self.rtklist =[i.replace('JPEGImages', 'Cameras').replace('.jpg', '.txt') for i in self.imglist]
self.baselist = [i for i in range(len(self.imglist)-self.dframe)] + [i+self.dframe for i in range(len(self.imglist)-self.dframe)]
self.directlist = [1] * (len(self.imglist)-self.dframe) + [0]* (len(self.imglist)-self.dframe)
# to skip frames
self.odirectlist = self.directlist.copy()
len_list = len(self.baselist)//2
self.fw_list = self.baselist[:len_list][init_frame::self.dframe]
self.bw_list = self.baselist[len_list:][init_frame::self.dframe]
self.dir_fwlist = self.directlist[:len_list][init_frame::self.dframe]
self.dir_bwlist = self.directlist[len_list:][init_frame::self.dframe]
if is_eval:
self.baselist = self.fw_list
self.directlist = self.dir_fwlist
else:
self.baselist = self.fw_list + self.bw_list
self.directlist = self.dir_fwlist + self.dir_bwlist
self.baselist = [self.baselist[0]] + self.baselist + [self.baselist[-1]]
self.directlist = [self.directlist[0]] + self.directlist + [self.directlist[-1]]
fac = (opts['batch_size']*opts['ngpu']*200)//len(self.directlist) // numvid
if fac==0: fac=1
self.directlist = self.directlist*fac
self.baselist = self.baselist*fac
# Load the annotation file.
self.num_imgs = len(self.directlist)
self.dataid = dataid
print('%d pairs of images' % self.num_imgs)
def str_to_frame(test_frames, data_info):
if test_frames[0]=='{':
# render a list of videos
idx_render = []
for i in test_frames[1:-1].split(','):
vid_idx = int(i)
idx_render += range(data_info['offset'][vid_idx]-vid_idx,
data_info['offset'][vid_idx+1]-vid_idx-1)
else:
test_frames = int(test_frames)
if test_frames==0:
test_frames = data_info['len_evalloader']-1
# render specific number of frames
idx_render = np.linspace(0,data_info['len_evalloader']-1,
test_frames, dtype=int)
return idx_render
def extract_data_info(loader):
data_info = {}
dataset_list = loader.dataset.datasets
data_offset = [0]
impath = []
for dataset in dataset_list:
impath += dataset.imglist
data_offset.append(len(dataset.imglist))
data_info['offset'] = np.asarray(data_offset).cumsum()
data_info['impath'] = impath
data_info['len_evalloader'] = len(loader)
return data_info
def mkdir_p(path):
try:
os.makedirs(path)
except OSError as exc: # Python >2.5
if exc.errno == errno.EEXIST and os.path.isdir(path):
pass
else:
raise
def get_vertex_colors(model, mesh, frame_idx=0, view_dir=None):
# assign color to mesh verts according to current frame
xyz_query = torch.cuda.FloatTensor(mesh.vertices, device=model.device)
xyz_embedded = model.embedding_xyz(xyz_query) # (N, embed_xyz_channels)
# use env code of the first frame
env_code = model.env_code(torch.Tensor([frame_idx]).long().to(model.device))
env_code = env_code.expand(xyz_query.shape[0],-1)
if view_dir is None:
# use view direction of (0,0,-1)
dir_query = torch.zeros_like(xyz_query)
dir_query[:,2] = -1
else:
dir_query = F.normalize(view_dir, 2,-1)
dir_embedded = model.embedding_dir(dir_query) # (N, embed_xyz_channels)
xyz_embedded = torch.cat([xyz_embedded, dir_embedded, env_code],-1)
#xyz_embedded = torch.cat([xyz_embedded, env_code],-1)
vis = model.nerf_coarse(xyz_embedded)[:,:3].cpu().numpy()
vis = np.clip(vis, 0, 1)
return vis
|
banmo-main
|
utils/io.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pickle
import cv2
import numpy as np
import os
import torch
import torch.nn.functional as F
import pdb
import trimesh
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import Boxes as create_boxes
import sys
try:
sys.path.insert(0,'./third_party/detectron2//projects/DensePose/')
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.data.build import get_class_to_mesh_name_mapping
from densepose.modeling import build_densepose_embedder
from densepose.vis.densepose_outputs_vertex import get_xyz_vertex_embedding
from densepose.vis.base import Boxes, Image, MatrixVisualizer
except:
sys.path.insert(0,'./third_party/detectron2_old//projects/DensePose/')
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from densepose.data.build import get_class_to_mesh_name_mapping
from densepose.modeling import build_densepose_embedder
from densepose.vis.densepose_outputs_vertex import get_xyz_vertex_embedding
from densepose.vis.base import Boxes, Image, MatrixVisualizer
# load model
def create_cse(config_fpath, weights_fpath):
cfg = get_cfg()
add_densepose_config(cfg)
cfg.merge_from_file(config_fpath)
cfg.MODEL.WEIGHTS = weights_fpath
model = build_model(cfg) # returns a torch.nn.Module
DetectionCheckpointer(model).load(cfg.MODEL.WEIGHTS) # load a file, usually from cfg.MODEL.WEIGHTS
embedder = build_densepose_embedder(cfg)
class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
mesh_vertex_embeddings = {
mesh_name: embedder(mesh_name).cuda()
for mesh_name in class_to_mesh_name.values()
if embedder.has_embeddings(mesh_name)
}
return model, embedder, mesh_vertex_embeddings
def run_cse(model, embedder, mesh_vertex_embeddings, image, mask, mesh_name='smpl_27554'):
h,w,_=image.shape
# resize
max_size=1333
if h>w:
h_rszd, w_rszd = max_size, max_size*w//h
else:
h_rszd, w_rszd = max_size*h//w, max_size
image = cv2.resize(image, (w_rszd, h_rszd))
mask = cv2.resize(mask.astype(float), (w_rszd, h_rszd)).astype(np.uint8)
# pad
h_pad = (1+h_rszd//32)*32
w_pad = (1+w_rszd//32)*32
image_tmp = np.zeros((h_pad,w_pad,3)).astype(np.uint8)
mask_tmp = np.zeros((h_pad,w_pad)).astype(np.uint8)
image_tmp[:h_rszd,:w_rszd] = image
mask_tmp[:h_rszd,:w_rszd] = mask
image = image_tmp
mask = mask_tmp
image_raw = image.copy()
# preprocess image and box
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( int((xid.max()-xid.min())*1.//2), int((yid.max()-yid.min())*1.//2))
bbox = [center[0]-length[0], center[1]-length[1],length[0]*2, length[1]*2]
bboxw = bbox[2]
bboxh = bbox[3]
bbox = [max(0,bbox[0]),
max(0,bbox[1]),
min(w_pad, bbox[0]+bbox[2]),
min(h_pad, bbox[1]+bbox[3])]
image=torch.Tensor(image).cuda().permute(2,0,1)[None]
image = torch.stack([(x - model.pixel_mean) / model.pixel_std for x in image])
pred_boxes = torch.Tensor([bbox]).cuda()
pred_boxes = create_boxes(pred_boxes)
# inference
model.eval()
with torch.no_grad():
features = model.backbone(image)
features = [features[f] for f in model.roi_heads.in_features]
features = [model.roi_heads.decoder(features)]
features_dp = model.roi_heads.densepose_pooler(features, [pred_boxes])
densepose_head_outputs = model.roi_heads.densepose_head(features_dp)
densepose_predictor_outputs = model.roi_heads.densepose_predictor(densepose_head_outputs)
coarse_segm_resized = densepose_predictor_outputs.coarse_segm[0]
embedding_resized = densepose_predictor_outputs.embedding[0]
# use input mask
x, y, xx, yy= bbox
mask_box = mask[y:yy, x:xx]
mask_box = torch.Tensor(mask_box).cuda()[None,None]
mask_box = F.interpolate(mask_box, coarse_segm_resized.shape[1:3], mode='bilinear')[0,0]>0
# find closest match (in the cropped/resized coordinate)
clst_verts_pad = torch.zeros(h_pad, w_pad).long().cuda()
clst_verts_box = torch.zeros(mask_box.shape, dtype=torch.long).cuda()
all_embeddings = embedding_resized[:, mask_box].t()
assign_mat = squared_euclidean_distance_matrix(all_embeddings, mesh_vertex_embeddings[mesh_name])
clst_verts_box[mask_box] = assign_mat.argmin(dim=1)
clst_verts_box = F.interpolate(clst_verts_box[None,None].float(), (yy-y,xx-x),mode='nearest')[0,0].long()
clst_verts_pad[y:yy,x:xx] = clst_verts_box
# output embedding
embedding = embedding_resized # size does not matter for a image code
embedding = embedding * mask_box.float()[None]
# embedding norm
embedding_norm = embedding.norm(2,0)
embedding_norm_pad = torch.zeros(h_rszd, w_rszd).cuda()
embedding_norm_box = F.interpolate(embedding_norm[None,None], (yy-y,xx-x),mode='bilinear')[0,0]
embedding_norm_pad[y:yy,x:xx] = embedding_norm_box
embedding_norm = embedding_norm_pad[:h_rszd, :w_rszd]
embedding_norm = F.interpolate(embedding_norm[None,None], (h,w),mode='bilinear')[0][0]
embedding = embedding.cpu().numpy()
embedding_norm = embedding_norm.cpu().numpy()
# visualization
embed_map = get_xyz_vertex_embedding(mesh_name, 'cuda')
vis = (embed_map[clst_verts_pad].clip(0, 1) * 255.0).cpu().numpy()
mask_visualizer = MatrixVisualizer(
inplace=False, cmap=cv2.COLORMAP_JET, val_scale=1.0, alpha=0.7
)
image_bgr = mask_visualizer.visualize(image_raw, mask, vis, [0,0,w_pad,h_pad])
image_bgr = image_bgr[:h_rszd,:w_rszd]
image_bgr = cv2.resize(image_bgr, (w,h))
clst_verts =clst_verts_pad[:h_rszd, :w_rszd]
clst_verts = F.interpolate(clst_verts[None,None].float(), (h,w),mode='nearest')[0,0].long()
clst_verts =clst_verts.cpu().numpy()
return clst_verts, image_bgr, embedding, embedding_norm, bbox
|
banmo-main
|
utils/cselib.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
def label_colormap():
"""
colormap for visualizing bones
"""
return np.asarray(
[[155, 122, 157],
[ 45, 245, 50],
[ 71, 25, 64],
[231, 176, 35],
[125, 249, 245],
[ 32, 75, 253],
[241, 31, 111],
[218, 71, 252],
[248, 220, 197],
[ 34, 194, 198],
[108, 178, 96],
[ 33, 101, 119],
[125, 100, 26],
[209, 235, 102],
[116, 105, 241],
[100, 50, 147],
[193, 159, 222],
[ 95, 254, 138],
[197, 130, 75],
[144, 31, 211],
[ 46, 150, 26],
[242, 90, 174],
[179, 41, 38],
[118, 204, 174],
[145, 209, 38],
[188, 74, 125],
[ 95, 158, 210],
[237, 152, 130],
[ 53, 151, 157],
[ 69, 86, 193],
[ 60, 204, 122],
[251, 77, 58],
[174, 248, 170],
[ 28, 81, 36],
[252, 134, 243],
[ 62, 254, 193],
[ 68, 209, 254],
[ 44, 25, 184],
[131, 58, 80],
[188, 251, 27],
[156, 25, 132],
[248, 36, 225],
[ 95, 130, 63],
[222, 204, 244],
[185, 186, 134],
[160, 146, 44],
[244, 196, 89],
[ 39, 60, 87],
[134, 239, 87],
[ 25, 166, 97],
[ 79, 36, 229],
[ 45, 130, 216],
[177, 90, 200],
[ 86, 218, 30],
[ 97, 115, 165],
[159, 104, 99],
[168, 220, 219],
[134, 76, 180],
[ 31, 238, 157],
[ 79, 140, 253],
[124, 23, 27],
[245, 234, 46],
[188, 30, 174],
[253, 246, 148],
[228, 94, 92],]
)
|
banmo-main
|
utils/colors.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import os
import os.path as osp
import sys
sys.path.insert(0,'third_party')
import time
import pdb
import numpy as np
from absl import flags
import cv2
import time
import mcubes
from nnutils import banmo
import subprocess
from torch.utils.tensorboard import SummaryWriter
from kmeans_pytorch import kmeans
import torch.distributed as dist
import torch.nn.functional as F
import trimesh
import torchvision
from torch.autograd import Variable
from collections import defaultdict
from pytorch3d import transforms
from torch.nn.utils import clip_grad_norm_
from matplotlib.pyplot import cm
from nnutils.geom_utils import lbs, reinit_bones, warp_bw, warp_fw, vec_to_sim3,\
obj_to_cam, get_near_far, near_far_to_bound, \
compute_point_visibility, process_so3_seq, \
ood_check_cse, align_sfm_sim3, gauss_mlp_skinning, \
correct_bones
from nnutils.nerf import grab_xyz_weights
from ext_utils.flowlib import flow_to_image
from utils.io import mkdir_p
from nnutils.vis_utils import image_grid
from dataloader import frameloader
from utils.io import save_vid, draw_cams, extract_data_info, merge_dict,\
render_root_txt, save_bones, draw_cams_pair, get_vertex_colors
from utils.colors import label_colormap
class DataParallelPassthrough(torch.nn.parallel.DistributedDataParallel):
"""
for multi-gpu access
"""
def __getattr__(self, name):
try:
return super().__getattr__(name)
except AttributeError:
return getattr(self.module, name)
def __delattr__(self, name):
try:
return super().__delattr__(name)
except AttributeError:
return delattr(self.module, name)
class v2s_trainer():
def __init__(self, opts, is_eval=False):
self.opts = opts
self.is_eval=is_eval
self.local_rank = opts.local_rank
self.save_dir = os.path.join(opts.checkpoint_dir, opts.logname)
self.accu_steps = opts.accu_steps
# write logs
if opts.local_rank==0:
if not os.path.exists(self.save_dir): os.makedirs(self.save_dir)
log_file = os.path.join(self.save_dir, 'opts.log')
if not self.is_eval:
if os.path.exists(log_file):
os.remove(log_file)
opts.append_flags_into_file(log_file)
def define_model(self, data_info):
opts = self.opts
self.device = torch.device('cuda:{}'.format(opts.local_rank))
self.model = banmo.banmo(opts, data_info)
self.model.forward = self.model.forward_default
self.num_epochs = opts.num_epochs
# load model
if opts.model_path!='':
self.load_network(opts.model_path, is_eval=self.is_eval)
if self.is_eval:
self.model = self.model.to(self.device)
else:
# ddp
self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model)
self.model = self.model.to(self.device)
self.model = DataParallelPassthrough(
self.model,
device_ids=[opts.local_rank],
output_device=opts.local_rank,
find_unused_parameters=True,
)
return
def init_dataset(self):
opts = self.opts
opts_dict = {}
opts_dict['n_data_workers'] = opts.n_data_workers
opts_dict['batch_size'] = opts.batch_size
opts_dict['seqname'] = opts.seqname
opts_dict['img_size'] = opts.img_size
opts_dict['ngpu'] = opts.ngpu
opts_dict['local_rank'] = opts.local_rank
opts_dict['rtk_path'] = opts.rtk_path
opts_dict['preload']= False
opts_dict['accu_steps'] = opts.accu_steps
if self.is_eval and opts.rtk_path=='' and opts.model_path!='':
# automatically load cameras in the logdir
model_dir = opts.model_path.rsplit('/',1)[0]
cam_dir = '%s/init-cam/'%model_dir
if os.path.isdir(cam_dir):
opts_dict['rtk_path'] = cam_dir
self.dataloader = frameloader.data_loader(opts_dict)
if opts.lineload:
opts_dict['lineload'] = True
opts_dict['multiply'] = True # multiple samples in dataset
self.trainloader = frameloader.data_loader(opts_dict)
opts_dict['lineload'] = False
del opts_dict['multiply']
else:
opts_dict['multiply'] = True
self.trainloader = frameloader.data_loader(opts_dict)
del opts_dict['multiply']
opts_dict['img_size'] = opts.render_size
self.evalloader = frameloader.eval_loader(opts_dict)
# compute data offset
data_info = extract_data_info(self.evalloader)
return data_info
def init_training(self):
opts = self.opts
# set as module attributes since they do not change across gpus
self.model.module.final_steps = self.num_epochs * \
min(200,len(self.trainloader)) * opts.accu_steps
# ideally should be greater than 200 batches
params_nerf_coarse=[]
params_nerf_beta=[]
params_nerf_feat=[]
params_nerf_beta_feat=[]
params_nerf_fine=[]
params_nerf_unc=[]
params_nerf_flowbw=[]
params_nerf_skin=[]
params_nerf_vis=[]
params_nerf_root_rts=[]
params_nerf_body_rts=[]
params_root_code=[]
params_pose_code=[]
params_env_code=[]
params_vid_code=[]
params_bones=[]
params_skin_aux=[]
params_ks=[]
params_nerf_dp=[]
params_csenet=[]
for name,p in self.model.named_parameters():
if 'nerf_coarse' in name and 'beta' not in name:
params_nerf_coarse.append(p)
elif 'nerf_coarse' in name and 'beta' in name:
params_nerf_beta.append(p)
elif 'nerf_feat' in name and 'beta' not in name:
params_nerf_feat.append(p)
elif 'nerf_feat' in name and 'beta' in name:
params_nerf_beta_feat.append(p)
elif 'nerf_fine' in name:
params_nerf_fine.append(p)
elif 'nerf_unc' in name:
params_nerf_unc.append(p)
elif 'nerf_flowbw' in name or 'nerf_flowfw' in name:
params_nerf_flowbw.append(p)
elif 'nerf_skin' in name:
params_nerf_skin.append(p)
elif 'nerf_vis' in name:
params_nerf_vis.append(p)
elif 'nerf_root_rts' in name:
params_nerf_root_rts.append(p)
elif 'nerf_body_rts' in name:
params_nerf_body_rts.append(p)
elif 'root_code' in name:
params_root_code.append(p)
elif 'pose_code' in name or 'rest_pose_code' in name:
params_pose_code.append(p)
elif 'env_code' in name:
params_env_code.append(p)
elif 'vid_code' in name:
params_vid_code.append(p)
elif 'module.bones' == name:
params_bones.append(p)
elif 'module.skin_aux' == name:
params_skin_aux.append(p)
elif 'module.ks_param' == name:
params_ks.append(p)
elif 'nerf_dp' in name:
params_nerf_dp.append(p)
elif 'csenet' in name:
params_csenet.append(p)
else: continue
if opts.local_rank==0:
print('optimized params: %s'%name)
self.optimizer = torch.optim.AdamW(
[{'params': params_nerf_coarse},
{'params': params_nerf_beta},
{'params': params_nerf_feat},
{'params': params_nerf_beta_feat},
{'params': params_nerf_fine},
{'params': params_nerf_unc},
{'params': params_nerf_flowbw},
{'params': params_nerf_skin},
{'params': params_nerf_vis},
{'params': params_nerf_root_rts},
{'params': params_nerf_body_rts},
{'params': params_root_code},
{'params': params_pose_code},
{'params': params_env_code},
{'params': params_vid_code},
{'params': params_bones},
{'params': params_skin_aux},
{'params': params_ks},
{'params': params_nerf_dp},
{'params': params_csenet},
],
lr=opts.learning_rate,betas=(0.9, 0.999),weight_decay=1e-4)
if self.model.root_basis=='exp':
lr_nerf_root_rts = 10
elif self.model.root_basis=='cnn':
lr_nerf_root_rts = 0.2
elif self.model.root_basis=='mlp':
lr_nerf_root_rts = 1
elif self.model.root_basis=='expmlp':
lr_nerf_root_rts = 1
else: print('error'); exit()
self.scheduler = torch.optim.lr_scheduler.OneCycleLR(self.optimizer,\
[opts.learning_rate, # params_nerf_coarse
opts.learning_rate, # params_nerf_beta
opts.learning_rate, # params_nerf_feat
10*opts.learning_rate, # params_nerf_beta_feat
opts.learning_rate, # params_nerf_fine
opts.learning_rate, # params_nerf_unc
opts.learning_rate, # params_nerf_flowbw
opts.learning_rate, # params_nerf_skin
opts.learning_rate, # params_nerf_vis
lr_nerf_root_rts*opts.learning_rate, # params_nerf_root_rts
opts.learning_rate, # params_nerf_body_rts
lr_nerf_root_rts*opts.learning_rate, # params_root_code
opts.learning_rate, # params_pose_code
opts.learning_rate, # params_env_code
opts.learning_rate, # params_vid_code
opts.learning_rate, # params_bones
10*opts.learning_rate, # params_skin_aux
10*opts.learning_rate, # params_ks
opts.learning_rate, # params_nerf_dp
opts.learning_rate, # params_csenet
],
int(self.model.module.final_steps/self.accu_steps),
pct_start=2./self.num_epochs, # use 2 epochs to warm up
cycle_momentum=False,
anneal_strategy='linear',
final_div_factor=1./5, div_factor = 25,
)
def save_network(self, epoch_label, prefix=''):
if self.opts.local_rank==0:
param_path = '%s/%sparams_%s.pth'%(self.save_dir,prefix,epoch_label)
save_dict = self.model.state_dict()
torch.save(save_dict, param_path)
var_path = '%s/%svars_%s.npy'%(self.save_dir,prefix,epoch_label)
latest_vars = self.model.latest_vars.copy()
del latest_vars['fp_err']
del latest_vars['flo_err']
del latest_vars['sil_err']
del latest_vars['flo_err_hist']
np.save(var_path, latest_vars)
return
@staticmethod
def rm_module_prefix(states, prefix='module'):
new_dict = {}
for i in states.keys():
v = states[i]
if i[:len(prefix)] == prefix:
i = i[len(prefix)+1:]
new_dict[i] = v
return new_dict
def load_network(self,model_path=None, is_eval=True, rm_prefix=True):
opts = self.opts
states = torch.load(model_path,map_location='cpu')
if rm_prefix: states = self.rm_module_prefix(states)
var_path = model_path.replace('params', 'vars').replace('.pth', '.npy')
latest_vars = np.load(var_path,allow_pickle=True)[()]
if is_eval:
# load variables
self.model.latest_vars = latest_vars
# if size mismatch, delete all related variables
if rm_prefix and states['near_far'].shape[0] != self.model.near_far.shape[0]:
print('!!!deleting video specific dicts due to size mismatch!!!')
self.del_key( states, 'near_far')
self.del_key( states, 'root_code.weight') # only applies to root_basis=mlp
self.del_key( states, 'pose_code.weight')
self.del_key( states, 'pose_code.basis_mlp.weight')
self.del_key( states, 'nerf_body_rts.0.weight')
self.del_key( states, 'nerf_body_rts.0.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.0.weight')
self.del_key( states, 'nerf_root_rts.root_code.weight')
self.del_key( states, 'nerf_root_rts.root_code.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.delta_rt.0.basis_mlp.weight')
self.del_key( states, 'nerf_root_rts.base_rt.se3')
self.del_key( states, 'nerf_root_rts.delta_rt.0.weight')
self.del_key( states, 'env_code.weight')
self.del_key( states, 'env_code.basis_mlp.weight')
if 'vid_code.weight' in states.keys():
self.del_key( states, 'vid_code.weight')
if 'ks_param' in states.keys():
self.del_key( states, 'ks_param')
# delete pose basis(backbones)
if not opts.keep_pose_basis:
del_key_list = []
for k in states.keys():
if 'nerf_body_rts' in k or 'nerf_root_rts' in k:
del_key_list.append(k)
for k in del_key_list:
print(k)
self.del_key( states, k)
if rm_prefix and opts.lbs and states['bones'].shape[0] != self.model.bones.shape[0]:
self.del_key(states, 'bones')
states = self.rm_module_prefix(states, prefix='nerf_skin')
states = self.rm_module_prefix(states, prefix='nerf_body_rts')
# load some variables
# this is important for volume matching
if latest_vars['obj_bound'].size==1:
latest_vars['obj_bound'] = latest_vars['obj_bound'] * np.ones(3)
self.model.latest_vars['obj_bound'] = latest_vars['obj_bound']
# load nerf_coarse, nerf_bone/root (not code), nerf_vis, nerf_feat, nerf_unc
#TODO somehow, this will reset the batch stats for
# a pretrained cse model, to keep those, we want to manually copy to states
if opts.ft_cse and \
'csenet.net.backbone.fpn_lateral2.weight' not in states.keys():
self.add_cse_to_states(self.model, states)
self.model.load_state_dict(states, strict=False)
return
@staticmethod
def add_cse_to_states(model, states):
states_init = model.state_dict()
for k in states_init.keys():
v = states_init[k]
if 'csenet' in k:
states[k] = v
def eval_cam(self, idx_render=None):
"""
idx_render: list of frame index to render
"""
opts = self.opts
with torch.no_grad():
self.model.eval()
# load data
for dataset in self.evalloader.dataset.datasets:
dataset.load_pair = False
batch = []
for i in idx_render:
batch.append( self.evalloader.dataset[i] )
batch = self.evalloader.collate_fn(batch)
for dataset in self.evalloader.dataset.datasets:
dataset.load_pair = True
#TODO can be further accelerated
self.model.convert_batch_input(batch)
if opts.unc_filter:
# process densepoe feature
valid_list, error_list = ood_check_cse(self.model.dp_feats,
self.model.dp_embed,
self.model.dps.long())
valid_list = valid_list.cpu().numpy()
error_list = error_list.cpu().numpy()
else:
valid_list = np.ones( len(idx_render))
error_list = np.zeros(len(idx_render))
self.model.convert_root_pose()
rtk = self.model.rtk
kaug = self.model.kaug
#TODO may need to recompute after removing the invalid predictions
# need to keep this to compute near-far planes
self.model.save_latest_vars()
# extract mesh sequences
aux_seq = {
'is_valid':[],
'err_valid':[],
'rtk':[],
'kaug':[],
'impath':[],
'masks':[],
}
for idx,_ in enumerate(idx_render):
frameid=self.model.frameid[idx]
if opts.local_rank==0:
print('extracting frame %d'%(frameid.cpu().numpy()))
aux_seq['rtk'].append(rtk[idx].cpu().numpy())
aux_seq['kaug'].append(kaug[idx].cpu().numpy())
aux_seq['masks'].append(self.model.masks[idx].cpu().numpy())
aux_seq['is_valid'].append(valid_list[idx])
aux_seq['err_valid'].append(error_list[idx])
impath = self.model.impath[frameid.long()]
aux_seq['impath'].append(impath)
return aux_seq
def eval(self, idx_render=None, dynamic_mesh=False):
"""
idx_render: list of frame index to render
dynamic_mesh: whether to extract canonical shape, or dynamic shape
"""
opts = self.opts
with torch.no_grad():
self.model.eval()
# run marching cubes on canonical shape
mesh_dict_rest = self.extract_mesh(self.model, opts.chunk, \
opts.sample_grid3d, opts.mc_threshold)
# choose a grid image or the whold video
if idx_render is None: # render 9 frames
idx_render = np.linspace(0,len(self.evalloader)-1, 9, dtype=int)
# render
chunk=opts.rnd_frame_chunk
rendered_seq = defaultdict(list)
aux_seq = {'mesh_rest': mesh_dict_rest['mesh'],
'mesh':[],
'rtk':[],
'impath':[],
'bone':[],}
for j in range(0, len(idx_render), chunk):
batch = []
idx_chunk = idx_render[j:j+chunk]
for i in idx_chunk:
batch.append( self.evalloader.dataset[i] )
batch = self.evalloader.collate_fn(batch)
rendered = self.render_vid(self.model, batch)
for k, v in rendered.items():
rendered_seq[k] += [v]
hbs=len(idx_chunk)
sil_rszd = F.interpolate(self.model.masks[:hbs,None],
(opts.render_size, opts.render_size))[:,0,...,None]
rendered_seq['img'] += [self.model.imgs.permute(0,2,3,1)[:hbs]]
rendered_seq['sil'] += [self.model.masks[...,None] [:hbs]]
rendered_seq['flo'] += [self.model.flow.permute(0,2,3,1)[:hbs]]
rendered_seq['dpc'] += [self.model.dp_vis[self.model.dps.long()][:hbs]]
rendered_seq['occ'] += [self.model.occ[...,None] [:hbs]]
rendered_seq['feat']+= [self.model.dp_feats.std(1)[...,None][:hbs]]
rendered_seq['flo_coarse'][-1] *= sil_rszd
rendered_seq['img_loss_samp'][-1] *= sil_rszd
if 'frame_cyc_dis' in rendered_seq.keys() and \
len(rendered_seq['frame_cyc_dis'])>0:
rendered_seq['frame_cyc_dis'][-1] *= 255/rendered_seq['frame_cyc_dis'][-1].max()
rendered_seq['frame_rigloss'][-1] *= 255/rendered_seq['frame_rigloss'][-1].max()
if opts.use_embed:
rendered_seq['pts_pred'][-1] *= sil_rszd
rendered_seq['pts_exp'] [-1] *= rendered_seq['sil_coarse'][-1]
rendered_seq['feat_err'][-1] *= sil_rszd
rendered_seq['feat_err'][-1] *= 255/rendered_seq['feat_err'][-1].max()
if opts.use_proj:
rendered_seq['proj_err'][-1] *= sil_rszd
rendered_seq['proj_err'][-1] *= 255/rendered_seq['proj_err'][-1].max()
if opts.use_unc:
rendered_seq['unc_pred'][-1] -= rendered_seq['unc_pred'][-1].min()
rendered_seq['unc_pred'][-1] *= 255/rendered_seq['unc_pred'][-1].max()
# extract mesh sequences
for idx in range(len(idx_chunk)):
frameid=self.model.frameid[idx].long()
embedid=self.model.embedid[idx].long()
print('extracting frame %d'%(frameid.cpu().numpy()))
# run marching cubes
if dynamic_mesh:
if not opts.queryfw:
mesh_dict_rest=None
mesh_dict = self.extract_mesh(self.model,opts.chunk,
opts.sample_grid3d, opts.mc_threshold,
embedid=embedid, mesh_dict_in=mesh_dict_rest)
mesh=mesh_dict['mesh']
if mesh_dict_rest is not None and opts.ce_color:
mesh.visual.vertex_colors = mesh_dict_rest['mesh'].\
visual.vertex_colors # assign rest surface color
else:
# get view direction
obj_center = self.model.rtk[idx][:3,3:4]
cam_center = -self.model.rtk[idx][:3,:3].T.matmul(obj_center)[:,0]
view_dir = torch.cuda.FloatTensor(mesh.vertices, device=self.device) \
- cam_center[None]
vis = get_vertex_colors(self.model, mesh_dict_rest['mesh'],
frame_idx=idx, view_dir=view_dir)
mesh.visual.vertex_colors[:,:3] = vis*255
# save bones
if 'bones' in mesh_dict.keys():
bone = mesh_dict['bones'][0].cpu().numpy()
aux_seq['bone'].append(bone)
else:
mesh=mesh_dict_rest['mesh']
aux_seq['mesh'].append(mesh)
# save cams
aux_seq['rtk'].append(self.model.rtk[idx].cpu().numpy())
# save image list
impath = self.model.impath[frameid]
aux_seq['impath'].append(impath)
# save canonical mesh and extract skinning weights
mesh_rest = aux_seq['mesh_rest']
if len(mesh_rest.vertices)>100:
self.model.latest_vars['mesh_rest'] = mesh_rest
if opts.lbs:
bones_rst = self.model.bones
bones_rst,_ = correct_bones(self.model, bones_rst)
# compute skinning color
if mesh_rest.vertices.shape[0]>100:
rest_verts = torch.Tensor(mesh_rest.vertices).to(self.device)
nerf_skin = self.model.nerf_skin if opts.nerf_skin else None
rest_pose_code = self.model.rest_pose_code(torch.Tensor([0])\
.long().to(self.device))
skins = gauss_mlp_skinning(rest_verts[None],
self.model.embedding_xyz,
bones_rst, rest_pose_code,
nerf_skin, skin_aux=self.model.skin_aux)[0]
skins = skins.cpu().numpy()
num_bones = skins.shape[-1]
colormap = label_colormap()
# TODO use a larger color map
colormap = np.repeat(colormap[None],4,axis=0).reshape(-1,3)
colormap = colormap[:num_bones]
colormap = (colormap[None] * skins[...,None]).sum(1)
mesh_rest_skin = mesh_rest.copy()
mesh_rest_skin.visual.vertex_colors = colormap
aux_seq['mesh_rest_skin'] = mesh_rest_skin
aux_seq['bone_rest'] = bones_rst.cpu().numpy()
# draw camera trajectory
suffix_id=0
if hasattr(self.model, 'epoch'):
suffix_id = self.model.epoch
if opts.local_rank==0:
mesh_cam = draw_cams(aux_seq['rtk'])
mesh_cam.export('%s/mesh_cam-%02d.obj'%(self.save_dir,suffix_id))
mesh_path = '%s/mesh_rest-%02d.obj'%(self.save_dir,suffix_id)
mesh_rest.export(mesh_path)
if opts.lbs:
bone_rest = aux_seq['bone_rest']
bone_path = '%s/bone_rest-%02d.obj'%(self.save_dir,suffix_id)
save_bones(bone_rest, 0.1, bone_path)
# save images
for k,v in rendered_seq.items():
rendered_seq[k] = torch.cat(rendered_seq[k],0)
##TODO
#if opts.local_rank==0:
# print('saving %s to gif'%k)
# is_flow = self.isflow(k)
# upsample_frame = min(30,len(rendered_seq[k]))
# save_vid('%s/%s'%(self.save_dir,k),
# rendered_seq[k].cpu().numpy(),
# suffix='.gif', upsample_frame=upsample_frame,
# is_flow=is_flow)
return rendered_seq, aux_seq
def train(self):
opts = self.opts
if opts.local_rank==0:
log = SummaryWriter('%s/%s'%(opts.checkpoint_dir,opts.logname), comment=opts.logname)
else: log=None
self.model.module.total_steps = 0
self.model.module.progress = 0
torch.manual_seed(8) # do it again
torch.cuda.manual_seed(1)
# disable bones before warmup epochs are finished
if opts.lbs:
self.model.num_bone_used = 0
del self.model.module.nerf_models['bones']
if opts.lbs and opts.nerf_skin:
del self.model.module.nerf_models['nerf_skin']
# warmup shape
if opts.warmup_shape_ep>0:
self.warmup_shape(log)
# CNN pose warmup or load CNN
if opts.warmup_pose_ep>0 or opts.pose_cnn_path!='':
self.warmup_pose(log, pose_cnn_path=opts.pose_cnn_path)
else:
# save cameras to latest vars and file
if opts.use_rtk_file:
self.model.module.use_cam=True
self.extract_cams(self.dataloader)
self.model.module.use_cam=opts.use_cam
else:
self.extract_cams(self.dataloader)
#TODO train mlp
if opts.warmup_rootmlp:
# set se3 directly
rmat = torch.Tensor(self.model.latest_vars['rtk'][:,:3,:3])
quat = transforms.matrix_to_quaternion(rmat).to(self.device)
self.model.module.nerf_root_rts.base_rt.se3.data[:,3:] = quat
# clear buffers for pytorch1.10+
try: self.model._assign_modules_buffers()
except: pass
# set near-far plane
if opts.model_path=='':
self.reset_nf()
# reset idk in latest_vars
self.model.module.latest_vars['idk'][:] = 0.
#TODO save loaded wts of posecs
if opts.freeze_coarse:
self.model.module.shape_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_coarse, clone=True)
self.model.module.skin_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_skin, clone=True)
self.model.module.feat_xyz_wt = \
grab_xyz_weights(self.model.module.nerf_feat, clone=True)
#TODO reset beta
if opts.reset_beta:
self.model.module.nerf_coarse.beta.data[:] = 0.1
# start training
for epoch in range(0, self.num_epochs):
self.model.epoch = epoch
# evaluation
torch.cuda.empty_cache()
self.model.module.img_size = opts.render_size
rendered_seq, aux_seq = self.eval()
self.model.module.img_size = opts.img_size
if epoch==0: self.save_network('0') # to save some cameras
if opts.local_rank==0: self.add_image_grid(rendered_seq, log, epoch)
self.reset_hparams(epoch)
torch.cuda.empty_cache()
## TODO harded coded
#if opts.freeze_proj:
# if self.model.module.progress<0.8:
# #opts.nsample=64
# opts.ndepth=2
# else:
# #opts.nsample = nsample
# opts.ndepth = self.model.module.ndepth_bk
self.train_one_epoch(epoch, log)
print('saving the model at the end of epoch {:d}, iters {:d}'.\
format(epoch, self.model.module.total_steps))
self.save_network('latest')
self.save_network(str(epoch+1))
@staticmethod
def save_cams(opts,aux_seq, save_prefix, latest_vars,datasets, evalsets, obj_scale,
trainloader=None, unc_filter=True):
"""
save cameras to dir and modify dataset
"""
mkdir_p(save_prefix)
dataset_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in datasets}
evalset_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in evalsets}
if trainloader is not None:
line_dict={dataset.imglist[0].split('/')[-2]:dataset for dataset in trainloader}
length = len(aux_seq['impath'])
valid_ids = aux_seq['is_valid']
idx_combine = 0
for i in range(length):
impath = aux_seq['impath'][i]
seqname = impath.split('/')[-2]
rtk = aux_seq['rtk'][i]
if unc_filter:
# in the same sequance find the closest valid frame and replace it
seq_idx = np.asarray([seqname == i.split('/')[-2] \
for i in aux_seq['impath']])
valid_ids_seq = np.where(valid_ids * seq_idx)[0]
if opts.local_rank==0 and i==0:
print('%s: %d frames are valid'%(seqname, len(valid_ids_seq)))
if len(valid_ids_seq)>0 and not aux_seq['is_valid'][i]:
closest_valid_idx = valid_ids_seq[np.abs(i-valid_ids_seq).argmin()]
rtk[:3,:3] = aux_seq['rtk'][closest_valid_idx][:3,:3]
# rescale translation according to input near-far plane
rtk[:3,3] = rtk[:3,3]*obj_scale
rtklist = dataset_dict[seqname].rtklist
idx = int(impath.split('/')[-1].split('.')[-2])
save_path = '%s/%s-%05d.txt'%(save_prefix, seqname, idx)
np.savetxt(save_path, rtk)
rtklist[idx] = save_path
evalset_dict[seqname].rtklist[idx] = save_path
if trainloader is not None:
line_dict[seqname].rtklist[idx] = save_path
#save to rtraw
latest_vars['rt_raw'][idx_combine] = rtk[:3,:4]
latest_vars['rtk'][idx_combine,:3,:3] = rtk[:3,:3]
if idx==len(rtklist)-2:
# to cover the last
save_path = '%s/%s-%05d.txt'%(save_prefix, seqname, idx+1)
if opts.local_rank==0: print('writing cam %s'%save_path)
np.savetxt(save_path, rtk)
rtklist[idx+1] = save_path
evalset_dict[seqname].rtklist[idx+1] = save_path
if trainloader is not None:
line_dict[seqname].rtklist[idx+1] = save_path
idx_combine += 1
latest_vars['rt_raw'][idx_combine] = rtk[:3,:4]
latest_vars['rtk'][idx_combine,:3,:3] = rtk[:3,:3]
idx_combine += 1
def extract_cams(self, full_loader):
# store cameras
opts = self.opts
idx_render = range(len(self.evalloader))
chunk = 50
aux_seq = []
for i in range(0, len(idx_render), chunk):
aux_seq.append(self.eval_cam(idx_render=idx_render[i:i+chunk]))
aux_seq = merge_dict(aux_seq)
aux_seq['rtk'] = np.asarray(aux_seq['rtk'])
aux_seq['kaug'] = np.asarray(aux_seq['kaug'])
aux_seq['masks'] = np.asarray(aux_seq['masks'])
aux_seq['is_valid'] = np.asarray(aux_seq['is_valid'])
aux_seq['err_valid'] = np.asarray(aux_seq['err_valid'])
save_prefix = '%s/init-cam'%(self.save_dir)
trainloader=self.trainloader.dataset.datasets
self.save_cams(opts,aux_seq, save_prefix,
self.model.module.latest_vars,
full_loader.dataset.datasets,
self.evalloader.dataset.datasets,
self.model.obj_scale, trainloader=trainloader,
unc_filter=opts.unc_filter)
dist.barrier() # wait untail all have finished
if opts.local_rank==0:
# draw camera trajectory
for dataset in full_loader.dataset.datasets:
seqname = dataset.imglist[0].split('/')[-2]
render_root_txt('%s/%s-'%(save_prefix,seqname), 0)
def reset_nf(self):
opts = self.opts
# save near-far plane
shape_verts = self.model.dp_verts_unit / 3 * self.model.near_far.mean()
shape_verts = shape_verts * 1.2
# save object bound if first stage
if opts.model_path=='' and opts.bound_factor>0:
shape_verts = shape_verts*opts.bound_factor
self.model.module.latest_vars['obj_bound'] = \
shape_verts.abs().max(0)[0].detach().cpu().numpy()
if self.model.near_far[:,0].sum()==0: # if no valid nf plane loaded
self.model.near_far.data = get_near_far(self.model.near_far.data,
self.model.latest_vars,
pts=shape_verts.detach().cpu().numpy())
save_path = '%s/init-nf.txt'%(self.save_dir)
save_nf = self.model.near_far.data.cpu().numpy() * self.model.obj_scale
np.savetxt(save_path, save_nf)
def warmup_shape(self, log):
opts = self.opts
# force using warmup forward, dataloader, cnn root
self.model.module.forward = self.model.module.forward_warmup_shape
full_loader = self.trainloader # store original loader
self.trainloader = range(200)
self.num_epochs = opts.warmup_shape_ep
# training
self.init_training()
for epoch in range(0, opts.warmup_shape_ep):
self.model.epoch = epoch
self.train_one_epoch(epoch, log, warmup=True)
self.save_network(str(epoch+1), 'mlp-')
# restore dataloader, rts, forward function
self.model.module.forward = self.model.module.forward_default
self.trainloader = full_loader
self.num_epochs = opts.num_epochs
# start from low learning rate again
self.init_training()
self.model.module.total_steps = 0
self.model.module.progress = 0.
def warmup_pose(self, log, pose_cnn_path):
opts = self.opts
# force using warmup forward, dataloader, cnn root
self.model.module.root_basis = 'cnn'
self.model.module.use_cam = False
self.model.module.forward = self.model.module.forward_warmup
full_loader = self.dataloader # store original loader
self.dataloader = range(200)
original_rp = self.model.module.nerf_root_rts
self.model.module.nerf_root_rts = self.model.module.dp_root_rts
del self.model.module.dp_root_rts
self.num_epochs = opts.warmup_pose_ep
self.model.module.is_warmup_pose=True
if pose_cnn_path=='':
# training
self.init_training()
for epoch in range(0, opts.warmup_pose_ep):
self.model.epoch = epoch
self.train_one_epoch(epoch, log, warmup=True)
self.save_network(str(epoch+1), 'cnn-')
# eval
#_,_ = self.model.forward_warmup(None)
# rendered_seq = self.model.warmup_rendered
# if opts.local_rank==0: self.add_image_grid(rendered_seq, log, epoch)
else:
pose_states = torch.load(opts.pose_cnn_path, map_location='cpu')
pose_states = self.rm_module_prefix(pose_states,
prefix='module.nerf_root_rts')
self.model.module.nerf_root_rts.load_state_dict(pose_states,
strict=False)
# extract camera and near far planes
self.extract_cams(full_loader)
# restore dataloader, rts, forward function
self.model.module.root_basis=opts.root_basis
self.model.module.use_cam = opts.use_cam
self.model.module.forward = self.model.module.forward_default
self.dataloader = full_loader
del self.model.module.nerf_root_rts
self.model.module.nerf_root_rts = original_rp
self.num_epochs = opts.num_epochs
self.model.module.is_warmup_pose=False
# start from low learning rate again
self.init_training()
self.model.module.total_steps = 0
self.model.module.progress = 0.
def train_one_epoch(self, epoch, log, warmup=False):
"""
training loop in a epoch
"""
opts = self.opts
self.model.train()
dataloader = self.trainloader
if not warmup: dataloader.sampler.set_epoch(epoch) # necessary for shuffling
for i, batch in enumerate(dataloader):
if i==200*opts.accu_steps:
break
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('load time:%.2f'%(time.time()-start_time))
if not warmup:
self.model.module.progress = float(self.model.total_steps) /\
self.model.final_steps
self.select_loss_indicator(i)
self.update_root_indicator(i)
self.update_body_indicator(i)
self.update_shape_indicator(i)
self.update_cvf_indicator(i)
# rtk_all = self.model.module.compute_rts()
# self.model.module.rtk_all = rtk_all.clone()
#
# # change near-far plane for all views
# if self.model.module.progress>=opts.nf_reset:
# rtk_all = rtk_all.detach().cpu().numpy()
# valid_rts = self.model.module.latest_vars['idk'].astype(bool)
# self.model.module.latest_vars['rtk'][valid_rts,:3] = rtk_all[valid_rts]
# self.model.module.near_far.data = get_near_far(
# self.model.module.near_far.data,
# self.model.module.latest_vars)
#
# self.optimizer.zero_grad()
total_loss,aux_out = self.model(batch)
total_loss = total_loss/self.accu_steps
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('forward time:%.2f'%(time.time()-start_time))
total_loss.mean().backward()
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('forward back time:%.2f'%(time.time()-start_time))
if (i+1)%self.accu_steps == 0:
self.clip_grad(aux_out)
self.optimizer.step()
self.scheduler.step()
self.optimizer.zero_grad()
if aux_out['nerf_root_rts_g']>1*opts.clip_scale and \
self.model.total_steps>200*self.accu_steps:
latest_path = '%s/params_latest.pth'%(self.save_dir)
self.load_network(latest_path, is_eval=False, rm_prefix=False)
for i,param_group in enumerate(self.optimizer.param_groups):
aux_out['lr_%02d'%i] = param_group['lr']
self.model.module.total_steps += 1
self.model.module.counter_frz_rebone -= 1./self.model.final_steps
aux_out['counter_frz_rebone'] = self.model.module.counter_frz_rebone
if opts.local_rank==0:
self.save_logs(log, aux_out, self.model.module.total_steps,
epoch)
if opts.debug:
if 'start_time' in locals().keys():
torch.cuda.synchronize()
print('total step time:%.2f'%(time.time()-start_time))
torch.cuda.synchronize()
start_time = time.time()
def update_cvf_indicator(self, i):
"""
whether to update canoical volume features
0: update all
1: freeze
"""
opts = self.opts
# during kp reprojection optimization
if (opts.freeze_proj and self.model.module.progress >= opts.proj_start and \
self.model.module.progress < (opts.proj_start+opts.proj_end)):
self.model.module.cvf_update = 1
else:
self.model.module.cvf_update = 0
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.cvf_update = 1
if opts.freeze_cvf:
self.model.module.cvf_update = 1
def update_shape_indicator(self, i):
"""
whether to update shape
0: update all
1: freeze shape
"""
opts = self.opts
# incremental optimization
# or during kp reprojection optimization
if (opts.model_path!='' and \
self.model.module.progress < opts.warmup_steps)\
or (opts.freeze_proj and self.model.module.progress >= opts.proj_start and \
self.model.module.progress <(opts.proj_start + opts.proj_end)):
self.model.module.shape_update = 1
else:
self.model.module.shape_update = 0
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.shape_update = 1
if opts.freeze_shape:
self.model.module.shape_update = 1
def update_root_indicator(self, i):
"""
whether to update root pose
1: update
0: freeze
"""
opts = self.opts
if (opts.freeze_proj and \
opts.root_stab and \
self.model.module.progress >=(opts.frzroot_start) and \
self.model.module.progress <=(opts.proj_start + opts.proj_end+0.01))\
: # to stablize
self.model.module.root_update = 0
else:
self.model.module.root_update = 1
# freeze shape after rebone
if self.model.module.counter_frz_rebone > 0:
self.model.module.root_update = 0
if opts.freeze_root: # to stablize
self.model.module.root_update = 0
def update_body_indicator(self, i):
"""
whether to update root pose
1: update
0: freeze
"""
opts = self.opts
if opts.freeze_proj and \
self.model.module.progress <=opts.frzbody_end:
self.model.module.body_update = 0
else:
self.model.module.body_update = 1
def select_loss_indicator(self, i):
"""
0: flo
1: flo/sil/rgb
"""
opts = self.opts
if not opts.root_opt or \
self.model.module.progress > (opts.warmup_steps):
self.model.module.loss_select = 1
elif i%2 == 0:
self.model.module.loss_select = 0
else:
self.model.module.loss_select = 1
#self.model.module.loss_select=1
def reset_hparams(self, epoch):
"""
reset hyper-parameters based on current geometry / cameras
"""
opts = self.opts
mesh_rest = self.model.latest_vars['mesh_rest']
# reset object bound, for feature matching
if epoch>int(self.num_epochs*(opts.bound_reset)):
if mesh_rest.vertices.shape[0]>100:
self.model.latest_vars['obj_bound'] = 1.2*np.abs(mesh_rest.vertices).max(0)
# reinit bones based on extracted surface
# only reinit for the initialization phase
if opts.lbs and opts.model_path=='' and \
(epoch==int(self.num_epochs*opts.reinit_bone_steps) or\
epoch==0 or\
epoch==int(self.num_epochs*opts.warmup_steps)//2):
reinit_bones(self.model.module, mesh_rest, opts.num_bones)
self.init_training() # add new params to optimizer
if epoch>0:
# freeze weights of root pose in the following 1% iters
self.model.module.counter_frz_rebone = 0.01
#reset error stats
self.model.module.latest_vars['fp_err'] [:]=0
self.model.module.latest_vars['flo_err'] [:]=0
self.model.module.latest_vars['sil_err'] [:]=0
self.model.module.latest_vars['flo_err_hist'][:]=0
# need to add bones back at 2nd opt
if opts.model_path!='':
self.model.module.nerf_models['bones'] = self.model.module.bones
# add nerf-skin when the shape is good
if opts.lbs and opts.nerf_skin and \
epoch==int(self.num_epochs*opts.dskin_steps):
self.model.module.nerf_models['nerf_skin'] = self.model.module.nerf_skin
self.broadcast()
def broadcast(self):
"""
broadcast variables to other models
"""
dist.barrier()
if self.opts.lbs:
dist.broadcast_object_list(
[self.model.module.num_bones,
self.model.module.num_bone_used,],
0)
dist.broadcast(self.model.module.bones,0)
dist.broadcast(self.model.module.nerf_body_rts[1].rgb[0].weight, 0)
dist.broadcast(self.model.module.nerf_body_rts[1].rgb[0].bias, 0)
dist.broadcast(self.model.module.near_far,0)
def clip_grad(self, aux_out):
"""
gradient clipping
"""
is_invalid_grad=False
grad_nerf_coarse=[]
grad_nerf_beta=[]
grad_nerf_feat=[]
grad_nerf_beta_feat=[]
grad_nerf_fine=[]
grad_nerf_unc=[]
grad_nerf_flowbw=[]
grad_nerf_skin=[]
grad_nerf_vis=[]
grad_nerf_root_rts=[]
grad_nerf_body_rts=[]
grad_root_code=[]
grad_pose_code=[]
grad_env_code=[]
grad_vid_code=[]
grad_bones=[]
grad_skin_aux=[]
grad_ks=[]
grad_nerf_dp=[]
grad_csenet=[]
for name,p in self.model.named_parameters():
try:
pgrad_nan = p.grad.isnan()
if pgrad_nan.sum()>0:
print(name)
is_invalid_grad=True
except: pass
if 'nerf_coarse' in name and 'beta' not in name:
grad_nerf_coarse.append(p)
elif 'nerf_coarse' in name and 'beta' in name:
grad_nerf_beta.append(p)
elif 'nerf_feat' in name and 'beta' not in name:
grad_nerf_feat.append(p)
elif 'nerf_feat' in name and 'beta' in name:
grad_nerf_beta_feat.append(p)
elif 'nerf_fine' in name:
grad_nerf_fine.append(p)
elif 'nerf_unc' in name:
grad_nerf_unc.append(p)
elif 'nerf_flowbw' in name or 'nerf_flowfw' in name:
grad_nerf_flowbw.append(p)
elif 'nerf_skin' in name:
grad_nerf_skin.append(p)
elif 'nerf_vis' in name:
grad_nerf_vis.append(p)
elif 'nerf_root_rts' in name:
grad_nerf_root_rts.append(p)
elif 'nerf_body_rts' in name:
grad_nerf_body_rts.append(p)
elif 'root_code' in name:
grad_root_code.append(p)
elif 'pose_code' in name or 'rest_pose_code' in name:
grad_pose_code.append(p)
elif 'env_code' in name:
grad_env_code.append(p)
elif 'vid_code' in name:
grad_vid_code.append(p)
elif 'module.bones' == name:
grad_bones.append(p)
elif 'module.skin_aux' == name:
grad_skin_aux.append(p)
elif 'module.ks_param' == name:
grad_ks.append(p)
elif 'nerf_dp' in name:
grad_nerf_dp.append(p)
elif 'csenet' in name:
grad_csenet.append(p)
else: continue
# freeze root pose when using re-projection loss only
if self.model.module.root_update == 0:
self.zero_grad_list(grad_root_code)
self.zero_grad_list(grad_nerf_root_rts)
if self.model.module.body_update == 0:
self.zero_grad_list(grad_pose_code)
self.zero_grad_list(grad_nerf_body_rts)
if self.opts.freeze_body_mlp:
self.zero_grad_list(grad_nerf_body_rts)
if self.model.module.shape_update == 1:
self.zero_grad_list(grad_nerf_coarse)
self.zero_grad_list(grad_nerf_beta)
self.zero_grad_list(grad_nerf_vis)
#TODO add skinning
self.zero_grad_list(grad_bones)
self.zero_grad_list(grad_nerf_skin)
self.zero_grad_list(grad_skin_aux)
if self.model.module.cvf_update == 1:
self.zero_grad_list(grad_nerf_feat)
self.zero_grad_list(grad_nerf_beta_feat)
self.zero_grad_list(grad_csenet)
if self.opts.freeze_coarse:
# freeze shape
# this include nerf_coarse, nerf_skin (optional)
grad_coarse_mlp = []
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_coarse)
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_skin)
grad_coarse_mlp += self.find_nerf_coarse(\
self.model.module.nerf_feat)
self.zero_grad_list(grad_coarse_mlp)
#self.zero_grad_list(grad_nerf_coarse) # freeze shape
# freeze skinning
self.zero_grad_list(grad_bones)
self.zero_grad_list(grad_skin_aux)
#self.zero_grad_list(grad_nerf_skin) # freeze fine shape
## freeze pose mlp
#self.zero_grad_list(grad_nerf_body_rts)
# add vis
self.zero_grad_list(grad_nerf_vis)
#print(self.model.module.nerf_coarse.xyz_encoding_1[0].weight[0,:])
clip_scale=self.opts.clip_scale
#TODO don't clip root pose
aux_out['nerf_coarse_g'] = clip_grad_norm_(grad_nerf_coarse, 1*clip_scale)
aux_out['nerf_beta_g'] = clip_grad_norm_(grad_nerf_beta, 1*clip_scale)
aux_out['nerf_feat_g'] = clip_grad_norm_(grad_nerf_feat, .1*clip_scale)
aux_out['nerf_beta_feat_g']= clip_grad_norm_(grad_nerf_beta_feat,.1*clip_scale)
aux_out['nerf_fine_g'] = clip_grad_norm_(grad_nerf_fine, .1*clip_scale)
aux_out['nerf_unc_g'] = clip_grad_norm_(grad_nerf_unc, .1*clip_scale)
aux_out['nerf_flowbw_g'] = clip_grad_norm_(grad_nerf_flowbw, .1*clip_scale)
aux_out['nerf_skin_g'] = clip_grad_norm_(grad_nerf_skin, .1*clip_scale)
aux_out['nerf_vis_g'] = clip_grad_norm_(grad_nerf_vis, .1*clip_scale)
aux_out['nerf_root_rts_g'] = clip_grad_norm_(grad_nerf_root_rts,100*clip_scale)
aux_out['nerf_body_rts_g'] = clip_grad_norm_(grad_nerf_body_rts,100*clip_scale)
aux_out['root_code_g']= clip_grad_norm_(grad_root_code, .1*clip_scale)
aux_out['pose_code_g']= clip_grad_norm_(grad_pose_code, 100*clip_scale)
aux_out['env_code_g'] = clip_grad_norm_(grad_env_code, .1*clip_scale)
aux_out['vid_code_g'] = clip_grad_norm_(grad_vid_code, .1*clip_scale)
aux_out['bones_g'] = clip_grad_norm_(grad_bones, 1*clip_scale)
aux_out['skin_aux_g'] = clip_grad_norm_(grad_skin_aux, .1*clip_scale)
aux_out['ks_g'] = clip_grad_norm_(grad_ks, .1*clip_scale)
aux_out['nerf_dp_g'] = clip_grad_norm_(grad_nerf_dp, .1*clip_scale)
aux_out['csenet_g'] = clip_grad_norm_(grad_csenet, .1*clip_scale)
#if aux_out['nerf_root_rts_g']>10:
# is_invalid_grad = True
if is_invalid_grad:
self.zero_grad_list(self.model.parameters())
@staticmethod
def find_nerf_coarse(nerf_model):
"""
zero grad for coarse component connected to inputs,
and return intermediate params
"""
param_list = []
input_layers=[0]+nerf_model.skips
input_wt_names = []
for layer in input_layers:
input_wt_names.append(f"xyz_encoding_{layer+1}.0.weight")
for name,p in nerf_model.named_parameters():
if name in input_wt_names:
# get the weights according to coarse posec
# 63 = 3 + 60
# 60 = (num_freqs, 2, 3)
out_dim = p.shape[0]
pos_dim = nerf_model.in_channels_xyz-nerf_model.in_channels_code
# TODO
num_coarse = 8 # out of 10
#num_coarse = 10 # out of 10
#num_coarse = 1 # out of 10
# p.grad[:,:3] = 0 # xyz
# p.grad[:,3:pos_dim].view(out_dim,-1,6)[:,:num_coarse] = 0 # xyz-coarse
p.grad[:,pos_dim:] = 0 # others
else:
param_list.append(p)
return param_list
@staticmethod
def render_vid(model, batch):
opts=model.opts
model.set_input(batch)
rtk = model.rtk
kaug=model.kaug.clone()
embedid=model.embedid
rendered, _ = model.nerf_render(rtk, kaug, embedid, ndepth=opts.ndepth)
if 'xyz_camera_vis' in rendered.keys(): del rendered['xyz_camera_vis']
if 'xyz_canonical_vis' in rendered.keys(): del rendered['xyz_canonical_vis']
if 'pts_exp_vis' in rendered.keys(): del rendered['pts_exp_vis']
if 'pts_pred_vis' in rendered.keys(): del rendered['pts_pred_vis']
rendered_first = {}
for k,v in rendered.items():
if v.dim()>0:
bs=v.shape[0]
rendered_first[k] = v[:bs//2] # remove loss term
return rendered_first
@staticmethod
def extract_mesh(model,chunk,grid_size,
#threshold = -0.005,
threshold = -0.002,
#threshold = 0.,
embedid=None,
mesh_dict_in=None):
opts = model.opts
mesh_dict = {}
if model.near_far is not None:
bound = model.latest_vars['obj_bound']
else: bound=1.5*np.asarray([1,1,1])
if mesh_dict_in is None:
ptx = np.linspace(-bound[0], bound[0], grid_size).astype(np.float32)
pty = np.linspace(-bound[1], bound[1], grid_size).astype(np.float32)
ptz = np.linspace(-bound[2], bound[2], grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pty, ptx, ptz), -1) # (y,x,z)
#pts = np.linspace(-bound, bound, grid_size).astype(np.float32)
#query_yxz = np.stack(np.meshgrid(pts, pts, pts), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).to(model.device).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
query_dir = torch.zeros_like(query_xyz)
bs_pts = query_xyz.shape[0]
out_chunks = []
for i in range(0, bs_pts, chunk):
query_xyz_chunk = query_xyz[i:i+chunk]
query_dir_chunk = query_dir[i:i+chunk]
# backward warping
if embedid is not None and not opts.queryfw:
query_xyz_chunk, mesh_dict = warp_bw(opts, model, mesh_dict,
query_xyz_chunk, embedid)
if opts.symm_shape:
#TODO set to x-symmetric
query_xyz_chunk[...,0] = query_xyz_chunk[...,0].abs()
xyz_embedded = model.embedding_xyz(query_xyz_chunk) # (N, embed_xyz_channels)
out_chunks += [model.nerf_coarse(xyz_embedded, sigma_only=True)]
vol_o = torch.cat(out_chunks, 0)
vol_o = vol_o.view(grid_size, grid_size, grid_size)
#vol_o = F.softplus(vol_o)
if not opts.full_mesh:
#TODO set density of non-observable points to small value
if model.latest_vars['idk'].sum()>0:
vis_chunks = []
for i in range(0, bs_pts, chunk):
query_xyz_chunk = query_xyz[i:i+chunk]
if opts.nerf_vis:
# this leave no room for halucination and is not what we want
xyz_embedded = model.embedding_xyz(query_xyz_chunk) # (N, embed_xyz_channels)
vis_chunk_nerf = model.nerf_vis(xyz_embedded)
vis_chunk = vis_chunk_nerf[...,0].sigmoid()
else:
#TODO deprecated!
vis_chunk = compute_point_visibility(query_xyz_chunk.cpu(),
model.latest_vars, model.device)[None]
vis_chunks += [vis_chunk]
vol_visi = torch.cat(vis_chunks, 0)
vol_visi = vol_visi.view(grid_size, grid_size, grid_size)
vol_o[vol_visi<0.5] = -1
## save color of sampled points
#cmap = cm.get_cmap('cool')
##pts_col = cmap(vol_visi.float().view(-1).cpu())
#pts_col = cmap(vol_o.sigmoid().view(-1).cpu())
#mesh = trimesh.Trimesh(query_xyz.view(-1,3).cpu(), vertex_colors=pts_col)
#mesh.export('0.obj')
#pdb.set_trace()
print('fraction occupied:', (vol_o > threshold).float().mean())
vertices, triangles = mcubes.marching_cubes(vol_o.cpu().numpy(), threshold)
vertices = (vertices - grid_size/2)/grid_size*2*bound[None, :]
mesh = trimesh.Trimesh(vertices, triangles)
# mesh post-processing
if len(mesh.vertices)>0:
if opts.use_cc:
# keep the largest mesh
mesh = [i for i in mesh.split(only_watertight=False)]
mesh = sorted(mesh, key=lambda x:x.vertices.shape[0])
mesh = mesh[-1]
# assign color based on canonical location
vis = mesh.vertices
try:
model.module.vis_min = vis.min(0)[None]
model.module.vis_len = vis.max(0)[None] - vis.min(0)[None]
except: # test time
model.vis_min = vis.min(0)[None]
model.vis_len = vis.max(0)[None] - vis.min(0)[None]
vis = vis - model.vis_min
vis = vis / model.vis_len
if not opts.ce_color:
vis = get_vertex_colors(model, mesh, frame_idx=0)
mesh.visual.vertex_colors[:,:3] = vis*255
# forward warping
if embedid is not None and opts.queryfw:
mesh = mesh_dict_in['mesh'].copy()
vertices = mesh.vertices
vertices, mesh_dict = warp_fw(opts, model, mesh_dict,
vertices, embedid)
mesh.vertices = vertices
mesh_dict['mesh'] = mesh
return mesh_dict
def save_logs(self, log, aux_output, total_steps, epoch):
for k,v in aux_output.items():
self.add_scalar(log, k, aux_output,total_steps)
def add_image_grid(self, rendered_seq, log, epoch):
for k,v in rendered_seq.items():
grid_img = image_grid(rendered_seq[k],3,3)
if k=='depth_rnd':scale=True
elif k=='occ':scale=True
elif k=='unc_pred':scale=True
elif k=='proj_err':scale=True
elif k=='feat_err':scale=True
else: scale=False
self.add_image(log, k, grid_img, epoch, scale=scale)
def add_image(self, log,tag,timg,step,scale=True):
"""
timg, h,w,x
"""
if self.isflow(tag):
timg = timg.detach().cpu().numpy()
timg = flow_to_image(timg)
elif scale:
timg = (timg-timg.min())/(timg.max()-timg.min())
else:
timg = torch.clamp(timg, 0,1)
if len(timg.shape)==2:
formats='HW'
elif timg.shape[0]==3:
formats='CHW'
print('error'); pdb.set_trace()
else:
formats='HWC'
log.add_image(tag,timg,step,dataformats=formats)
@staticmethod
def add_scalar(log,tag,data,step):
if tag in data.keys():
log.add_scalar(tag, data[tag], step)
@staticmethod
def del_key(states, key):
if key in states.keys():
del states[key]
@staticmethod
def isflow(tag):
flolist = ['flo_coarse', 'fdp_coarse', 'flo', 'fdp', 'flo_at_samp']
if tag in flolist:
return True
else:
return False
@staticmethod
def zero_grad_list(paramlist):
"""
Clears the gradients of all optimized :class:`torch.Tensor`
"""
for p in paramlist:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
|
banmo-main
|
nnutils/train_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
# adopted from nerf-pl
import numpy as np
import pdb
import torch
import torch.nn.functional as F
from pytorch3d import transforms
from nnutils.geom_utils import lbs, Kmatinv, mat2K, pinhole_cam, obj_to_cam,\
vec_to_sim3, rtmat_invert, rot_angle, mlp_skinning,\
bone_transform, skinning, vrender_flo, \
gauss_mlp_skinning, diff_flo
from nnutils.loss_utils import elastic_loss, visibility_loss, feat_match_loss,\
kp_reproj_loss, compute_pts_exp, kp_reproj, evaluate_mlp
def render_rays(models,
embeddings,
rays,
N_samples=64,
use_disp=False,
perturb=0,
noise_std=1,
chunk=1024*32,
obj_bound=None,
use_fine=False,
img_size=None,
progress=None,
opts=None,
render_vis=False,
):
"""
Render rays by computing the output of @model applied on @rays
Inputs:
models: list of NeRF models (coarse and fine) defined in nerf.py
embeddings: list of embedding models of origin and direction defined in nerf.py
rays: (N_rays, 3+3+2), ray origins, directions and near, far depth bounds
N_samples: number of coarse samples per ray
use_disp: whether to sample in disparity space (inverse depth)
perturb: factor to perturb the sampling position on the ray (for coarse model only)
noise_std: factor to perturb the model's prediction of sigma
chunk: the chunk size in batched inference
Outputs:
result: dictionary containing final rgb and depth maps for coarse and fine models
"""
if use_fine: N_samples = N_samples//2 # use half samples to importance sample
# Extract models from lists
embedding_xyz = embeddings['xyz']
embedding_dir = embeddings['dir']
# Decompose the inputs
rays_o = rays['rays_o']
rays_d = rays['rays_d'] # both (N_rays, 3)
near = rays['near']
far = rays['far'] # both (N_rays, 1)
N_rays = rays_d.shape[0]
# Embed direction
rays_d_norm = rays_d / rays_d.norm(2,-1)[:,None]
dir_embedded = embedding_dir(rays_d_norm) # (N_rays, embed_dir_channels)
# Sample depth points
z_steps = torch.linspace(0, 1, N_samples, device=rays_d.device) # (N_samples)
if not use_disp: # use linear sampling in depth space
z_vals = near * (1-z_steps) + far * z_steps
else: # use linear sampling in disparity space
z_vals = 1/(1/near * (1-z_steps) + 1/far * z_steps)
z_vals = z_vals.expand(N_rays, N_samples)
if perturb > 0: # perturb sampling depths (z_vals)
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:]) # (N_rays, N_samples-1) interval mid points
# get intervals between samples
upper = torch.cat([z_vals_mid, z_vals[: ,-1:]], -1)
lower = torch.cat([z_vals[: ,:1], z_vals_mid], -1)
perturb_rand = perturb * torch.rand(z_vals.shape, device=rays_d.device)
z_vals = lower + (upper - lower) * perturb_rand
# zvals are not optimized
# produce points in the root body space
xyz_sampled = rays_o.unsqueeze(1) + \
rays_d.unsqueeze(1) * z_vals.unsqueeze(2) # (N_rays, N_samples, 3)
if use_fine: # sample points for fine model
# output:
# loss: 'img_coarse', 'sil_coarse', 'feat_err', 'proj_err'
# 'vis_loss', 'flo/fdp_coarse', 'flo/fdp_valid',
# not loss: 'depth_rnd', 'pts_pred', 'pts_exp'
with torch.no_grad():
_, weights_coarse = inference_deform(xyz_sampled, rays, models,
chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts,fine_iter=False)
# reset N_importance
N_importance = N_samples
z_vals_mid = 0.5 * (z_vals[: ,:-1] + z_vals[: ,1:])
z_vals_ = sample_pdf(z_vals_mid, weights_coarse[:, 1:-1],
N_importance, det=(perturb==0)).detach()
# detach so that grad doesn't propogate to weights_coarse from here
z_vals, _ = torch.sort(torch.cat([z_vals, z_vals_], -1), -1)
xyz_sampled = rays_o.unsqueeze(1) + \
rays_d.unsqueeze(1) * z_vals.unsqueeze(2)
N_samples = N_samples + N_importance # get back to original # of samples
result, _ = inference_deform(xyz_sampled, rays, models,
chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts,render_vis=render_vis)
return result
def inference(models, embedding_xyz, xyz_, dir_, dir_embedded, z_vals,
N_rays, N_samples,chunk, noise_std,
env_code=None, weights_only=False, clip_bound = None, vis_pred=None):
"""
Helper function that performs model inference.
Inputs:
model: NeRF model (coarse or fine)
embedding_xyz: embedding module for xyz
xyz_: (N_rays, N_samples_, 3) sampled positions
N_samples_ is the number of sampled points in each ray;
= N_samples for coarse model
= N_samples+N_importance for fine model
dir_: (N_rays, 3) ray directions
dir_embedded: (N_rays, embed_dir_channels) embedded directions
z_vals: (N_rays, N_samples_) depths of the sampled positions
weights_only: do inference on sigma only or not
Outputs:
rgb_final: (N_rays, 3) the final rgb image
depth_final: (N_rays) depth map
weights: (N_rays, N_samples_): weights of each sample
"""
nerf_sdf = models['coarse']
N_samples_ = xyz_.shape[1]
# Embed directions
xyz_ = xyz_.view(-1, 3) # (N_rays*N_samples_, 3)
if not weights_only:
dir_embedded = torch.repeat_interleave(dir_embedded, repeats=N_samples_, dim=0)
# (N_rays*N_samples_, embed_dir_channels)
# Perform model inference to get rgb and raw sigma
chunk_size=4096
B = xyz_.shape[0]
xyz_input = xyz_.view(N_rays,N_samples,3)
out = evaluate_mlp(nerf_sdf, xyz_input,
embed_xyz = embedding_xyz,
dir_embedded = dir_embedded.view(N_rays,N_samples,-1),
code=env_code,
chunk=chunk_size, sigma_only=weights_only).view(B,-1)
rgbsigma = out.view(N_rays, N_samples_, 4)
rgbs = rgbsigma[..., :3] # (N_rays, N_samples_, 3)
sigmas = rgbsigma[..., 3] # (N_rays, N_samples_)
if 'nerf_feat' in models.keys():
nerf_feat = models['nerf_feat']
feat = evaluate_mlp(nerf_feat, xyz_input,
embed_xyz = embedding_xyz,
chunk=chunk_size).view(N_rays,N_samples_,-1)
else:
feat = torch.zeros_like(rgbs)
# Convert these values using volume rendering (Section 4)
deltas = z_vals[:, 1:] - z_vals[:, :-1] # (N_rays, N_samples_-1)
# a hacky way to ensures prob. sum up to 1
# while the prob. of last bin does not correspond with the values
delta_inf = 1e10 * torch.ones_like(deltas[:, :1]) # (N_rays, 1) the last delta is infinity
deltas = torch.cat([deltas, delta_inf], -1) # (N_rays, N_samples_)
# Multiply each distance by the norm of its corresponding direction ray
# to convert to real world distance (accounts for non-unit directions).
deltas = deltas * torch.norm(dir_.unsqueeze(1), dim=-1)
noise = torch.randn(sigmas.shape, device=sigmas.device) * noise_std
# compute alpha by the formula (3)
sigmas = sigmas+noise
#sigmas = F.softplus(sigmas)
#sigmas = torch.relu(sigmas)
ibetas = 1/(nerf_sdf.beta.abs()+1e-9)
#ibetas = 100
sdf = -sigmas
sigmas = (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() * ibetas)) # 0-1
# alternative:
#sigmas = F.sigmoid(-sdf*ibetas)
sigmas = sigmas * ibetas
alphas = 1-torch.exp(-deltas*sigmas) # (N_rays, N_samples_), p_i
#set out-of-bound and nonvisible alphas to zero
if clip_bound is not None:
clip_bound = torch.Tensor(clip_bound).to(xyz_.device)[None,None]
oob = (xyz_.abs()>clip_bound).sum(-1).view(N_rays,N_samples)>0
alphas[oob]=0
if vis_pred is not None:
alphas[vis_pred<0.5] = 0
alphas_shifted = \
torch.cat([torch.ones_like(alphas[:, :1]), 1-alphas+1e-10], -1) # [1, a1, a2, ...]
alpha_prod = torch.cumprod(alphas_shifted, -1)[:, :-1]
weights = alphas * alpha_prod # (N_rays, N_samples_)
weights_sum = weights.sum(1) # (N_rays), the accumulated opacity along the rays
# equals "1 - (1-a1)(1-a2)...(1-an)" mathematically
visibility = alpha_prod.detach() # 1 q_0 q_j-1
# compute final weighted outputs
rgb_final = torch.sum(weights.unsqueeze(-1)*rgbs, -2) # (N_rays, 3)
feat_final = torch.sum(weights.unsqueeze(-1)*feat, -2) # (N_rays, 3)
depth_final = torch.sum(weights*z_vals, -1) # (N_rays)
return rgb_final, feat_final, depth_final, weights, visibility
def inference_deform(xyz_coarse_sampled, rays, models, chunk, N_samples,
N_rays, embedding_xyz, rays_d, noise_std,
obj_bound, dir_embedded, z_vals,
img_size, progress,opts, fine_iter=True,
render_vis=False):
"""
fine_iter: whether to render loss-related terms
render_vis: used for novel view synthesis
"""
is_training = models['coarse'].training
xys = rays['xys']
# root space point correspondence in t2
if opts.dist_corresp:
xyz_coarse_target = xyz_coarse_sampled.clone()
xyz_coarse_dentrg = xyz_coarse_sampled.clone()
xyz_coarse_frame = xyz_coarse_sampled.clone()
# free deform
if 'flowbw' in models.keys():
model_flowbw = models['flowbw']
model_flowfw = models['flowfw']
time_embedded = rays['time_embedded'][:,None]
xyz_coarse_embedded = embedding_xyz(xyz_coarse_sampled)
flow_bw = evaluate_mlp(model_flowbw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled, code=time_embedded)
xyz_coarse_sampled=xyz_coarse_sampled + flow_bw
if fine_iter:
# cycle loss (in the joint canonical space)
xyz_coarse_embedded = embedding_xyz(xyz_coarse_sampled)
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded)
frame_cyc_dis = (flow_bw+flow_fw).norm(2,-1)
# rigidity loss
frame_disp3d = flow_fw.norm(2,-1)
if "time_embedded_target" in rays.keys():
time_embedded_target = rays['time_embedded_target'][:,None]
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded_target)
xyz_coarse_target=xyz_coarse_sampled + flow_fw
if "time_embedded_dentrg" in rays.keys():
time_embedded_dentrg = rays['time_embedded_dentrg'][:,None]
flow_fw = evaluate_mlp(model_flowfw, xyz_coarse_embedded,
chunk=chunk//N_samples, xyz=xyz_coarse_sampled,code=time_embedded_dentrg)
xyz_coarse_dentrg=xyz_coarse_sampled + flow_fw
elif 'bones' in models.keys():
bones_rst = models['bones_rst']
bone_rts_fw = rays['bone_rts']
skin_aux = models['skin_aux']
rest_pose_code = models['rest_pose_code']
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones_rst.device))
if 'nerf_skin' in models.keys():
# compute delta skinning weights of bs, N, B
nerf_skin = models['nerf_skin']
else:
nerf_skin = None
time_embedded = rays['time_embedded'][:,None]
# coords after deform
bones_dfm = bone_transform(bones_rst, bone_rts_fw, is_vec=True)
skin_backward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz,
bones_dfm, time_embedded, nerf_skin, skin_aux=skin_aux)
# backward skinning
xyz_coarse_sampled, bones_dfm = lbs(bones_rst,
bone_rts_fw,
skin_backward,
xyz_coarse_sampled,
)
if fine_iter:
#if opts.dist_corresp:
skin_forward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz,
bones_rst,rest_pose_code, nerf_skin, skin_aux=skin_aux)
# cycle loss (in the joint canonical space)
xyz_coarse_frame_cyc,_ = lbs(bones_rst, bone_rts_fw,
skin_forward, xyz_coarse_sampled, backward=False)
frame_cyc_dis = (xyz_coarse_frame - xyz_coarse_frame_cyc).norm(2,-1)
# rigidity loss (not used as optimization objective)
num_bone = bones_rst.shape[0]
bone_fw_reshape = bone_rts_fw.view(-1,num_bone,12)
bone_trn = bone_fw_reshape[:,:,9:12]
bone_rot = bone_fw_reshape[:,:,0:9].view(-1,num_bone,3,3)
frame_rigloss = bone_trn.pow(2).sum(-1)+rot_angle(bone_rot)
if opts.dist_corresp and 'bone_rts_target' in rays.keys():
bone_rts_target = rays['bone_rts_target']
xyz_coarse_target,_ = lbs(bones_rst, bone_rts_target,
skin_forward, xyz_coarse_sampled,backward=False)
if opts.dist_corresp and 'bone_rts_dentrg' in rays.keys():
bone_rts_dentrg = rays['bone_rts_dentrg']
xyz_coarse_dentrg,_ = lbs(bones_rst, bone_rts_dentrg,
skin_forward, xyz_coarse_sampled,backward=False)
# nerf shape/rgb
model_coarse = models['coarse']
if 'env_code' in rays.keys():
env_code = rays['env_code']
else:
env_code = None
# set out of bounds weights to zero
if render_vis:
clip_bound = obj_bound
xyz_embedded = embedding_xyz(xyz_coarse_sampled)
vis_pred = evaluate_mlp(models['nerf_vis'],
xyz_embedded, chunk=chunk)[...,0].sigmoid()
else:
clip_bound = None
vis_pred = None
if opts.symm_shape:
##TODO set to x-symmetric here
symm_ratio = 0.5
xyz_x = xyz_coarse_sampled[...,:1].clone()
symm_mask = torch.rand_like(xyz_x) < symm_ratio
xyz_x[symm_mask] = -xyz_x[symm_mask]
xyz_input = torch.cat([xyz_x, xyz_coarse_sampled[...,1:3]],-1)
else:
xyz_input = xyz_coarse_sampled
rgb_coarse, feat_rnd, depth_rnd, weights_coarse, vis_coarse = \
inference(models, embedding_xyz, xyz_input, rays_d,
dir_embedded, z_vals, N_rays, N_samples, chunk, noise_std,
weights_only=False, env_code=env_code,
clip_bound=clip_bound, vis_pred=vis_pred)
sil_coarse = weights_coarse[:,:-1].sum(1)
result = {'img_coarse': rgb_coarse,
'depth_rnd': depth_rnd,
'sil_coarse': sil_coarse,
}
# render visibility scores
if render_vis:
result['vis_pred'] = (vis_pred * weights_coarse).sum(-1)
if fine_iter:
if opts.use_corresp:
# for flow rendering
pts_exp = compute_pts_exp(weights_coarse, xyz_coarse_sampled)
pts_target = kp_reproj(pts_exp, models, embedding_xyz, rays,
to_target=True) # N,1,2
# viser feature matching
if 'feats_at_samp' in rays.keys():
feats_at_samp = rays['feats_at_samp']
nerf_feat = models['nerf_feat']
xyz_coarse_sampled_feat = xyz_coarse_sampled
weights_coarse_feat = weights_coarse
pts_pred, pts_exp, feat_err = feat_match_loss(nerf_feat, embedding_xyz,
feats_at_samp, xyz_coarse_sampled_feat, weights_coarse_feat,
obj_bound, is_training=is_training)
# 3d-2d projection
proj_err = kp_reproj_loss(pts_pred, xys, models,
embedding_xyz, rays)
proj_err = proj_err/img_size * 2
result['pts_pred'] = pts_pred
result['pts_exp'] = pts_exp
result['feat_err'] = feat_err # will be used as loss
result['proj_err'] = proj_err # will be used as loss
if opts.dist_corresp and 'rtk_vec_target' in rays.keys():
# compute correspondence: root space to target view space
# RT: root space to camera space
rtk_vec_target = rays['rtk_vec_target']
Rmat = rtk_vec_target[:,0:9].view(N_rays,1,3,3)
Tmat = rtk_vec_target[:,9:12].view(N_rays,1,3)
Kinv = rtk_vec_target[:,12:21].view(N_rays,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_target = obj_to_cam(xyz_coarse_target, Rmat, Tmat)
xyz_coarse_target = pinhole_cam(xyz_coarse_target,K)
if opts.dist_corresp and 'rtk_vec_dentrg' in rays.keys():
# compute correspondence: root space to dentrg view space
# RT: root space to camera space
rtk_vec_dentrg = rays['rtk_vec_dentrg']
Rmat = rtk_vec_dentrg[:,0:9].view(N_rays,1,3,3)
Tmat = rtk_vec_dentrg[:,9:12].view(N_rays,1,3)
Kinv = rtk_vec_dentrg[:,12:21].view(N_rays,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_dentrg = obj_to_cam(xyz_coarse_dentrg, Rmat, Tmat)
xyz_coarse_dentrg = pinhole_cam(xyz_coarse_dentrg,K)
# raw 3d points for visualization
result['xyz_camera_vis'] = xyz_coarse_frame
if 'flowbw' in models.keys() or 'bones' in models.keys():
result['xyz_canonical_vis'] = xyz_coarse_sampled
if 'feats_at_samp' in rays.keys():
result['pts_exp_vis'] = pts_exp
result['pts_pred_vis'] = pts_pred
if 'flowbw' in models.keys() or 'bones' in models.keys():
# cycle loss (in the joint canonical space)
#if opts.dist_corresp:
result['frame_cyc_dis'] = (frame_cyc_dis * weights_coarse.detach()).sum(-1)
#else:
# pts_exp_reg = pts_exp[:,None].detach()
# skin_forward = gauss_mlp_skinning(pts_exp_reg, embedding_xyz,
# bones_rst,rest_pose_code, nerf_skin, skin_aux=skin_aux)
# pts_exp_fw,_ = lbs(bones_rst, bone_rts_fw,
# skin_forward, pts_exp_reg, backward=False)
# skin_backward = gauss_mlp_skinning(pts_exp_fw, embedding_xyz,
# bones_dfm, time_embedded, nerf_skin, skin_aux=skin_aux)
# pts_exp_fwbw,_ = lbs(bones_rst, bone_rts_fw,
# skin_backward,pts_exp_fw)
# frame_cyc_dis = (pts_exp_fwbw - pts_exp_reg).norm(2,-1)
# result['frame_cyc_dis'] = sil_coarse.detach() * frame_cyc_dis[...,-1]
if 'flowbw' in models.keys():
result['frame_rigloss'] = (frame_disp3d * weights_coarse.detach()).sum(-1)
# only evaluate at with_grad mode
if xyz_coarse_frame.requires_grad:
# elastic energy
result['elastic_loss'] = elastic_loss(model_flowbw, embedding_xyz,
xyz_coarse_frame, time_embedded)
else:
result['frame_rigloss'] = (frame_rigloss).mean(-1)
### script to plot sigmas/weights
#from matplotlib import pyplot as plt
#plt.ioff()
#sil_rays = weights_coarse[rays['sil_at_samp'][:,0]>0]
#plt.plot(sil_rays[::1000].T.cpu().numpy(),'*-')
#plt.savefig('tmp/probs.png')
#plt.cla()
if is_training and 'nerf_vis' in models.keys():
result['vis_loss'] = visibility_loss(models['nerf_vis'], embedding_xyz,
xyz_coarse_sampled, vis_coarse, obj_bound, chunk)
# render flow
if 'rtk_vec_target' in rays.keys():
if opts.dist_corresp:
flo_coarse, flo_valid = vrender_flo(weights_coarse, xyz_coarse_target,
xys, img_size)
else:
flo_coarse = diff_flo(pts_target, xys, img_size)
flo_valid = torch.ones_like(flo_coarse[...,:1])
result['flo_coarse'] = flo_coarse
result['flo_valid'] = flo_valid
if 'rtk_vec_dentrg' in rays.keys():
if opts.dist_corresp:
fdp_coarse, fdp_valid = vrender_flo(weights_coarse,
xyz_coarse_dentrg, xys, img_size)
else:
fdp_coarse = diff_flo(pts_dentrg, xys, img_size)
fdp_valid = torch.ones_like(fdp_coarse[...,:1])
result['fdp_coarse'] = fdp_coarse
result['fdp_valid'] = fdp_valid
if 'nerf_unc' in models.keys():
# xys: bs,nsample,2
# t: bs
nerf_unc = models['nerf_unc']
ts = rays['ts']
vid_code = rays['vid_code']
# change according to K
xysn = rays['xysn']
xyt = torch.cat([xysn, ts],-1)
xyt_embedded = embedding_xyz(xyt)
xyt_code = torch.cat([xyt_embedded, vid_code],-1)
unc_pred = nerf_unc(xyt_code)
#TODO add activation function
#unc_pred = F.softplus(unc_pred)
result['unc_pred'] = unc_pred
if 'img_at_samp' in rays.keys():
# compute other losses
img_at_samp = rays['img_at_samp']
sil_at_samp = rays['sil_at_samp']
vis_at_samp = rays['vis_at_samp']
flo_at_samp = rays['flo_at_samp']
cfd_at_samp = rays['cfd_at_samp']
# img loss
img_loss_samp = (rgb_coarse - img_at_samp).pow(2).mean(-1)[...,None]
# sil loss, weight sil loss based on # points
if is_training and sil_at_samp.sum()>0 and (1-sil_at_samp).sum()>0:
pos_wt = vis_at_samp.sum()/ sil_at_samp[vis_at_samp>0].sum()
neg_wt = vis_at_samp.sum()/(1-sil_at_samp[vis_at_samp>0]).sum()
sil_balance_wt = 0.5*pos_wt*sil_at_samp + 0.5*neg_wt*(1-sil_at_samp)
else: sil_balance_wt = 1
sil_loss_samp = (sil_coarse[...,None] - sil_at_samp).pow(2) * sil_balance_wt
sil_loss_samp = sil_loss_samp * vis_at_samp
# flo loss, confidence weighting: 30x normalized distance - 0.1x pixel error
flo_loss_samp = (flo_coarse - flo_at_samp).pow(2).sum(-1)
# hard-threshold cycle error
sil_at_samp_flo = (sil_at_samp>0)\
& (flo_valid==1)
sil_at_samp_flo[cfd_at_samp==0] = False
if sil_at_samp_flo.sum()>0:
cfd_at_samp = cfd_at_samp / cfd_at_samp[sil_at_samp_flo].mean()
flo_loss_samp = flo_loss_samp[...,None] * cfd_at_samp
result['img_at_samp'] = img_at_samp
result['sil_at_samp'] = sil_at_samp
result['vis_at_samp'] = vis_at_samp
result['sil_at_samp_flo'] = sil_at_samp_flo
result['flo_at_samp'] = flo_at_samp
result['img_loss_samp'] = img_loss_samp
result['sil_loss_samp'] = sil_loss_samp
result['flo_loss_samp'] = flo_loss_samp
# exclude error outside mask
result['img_loss_samp']*=sil_at_samp
result['flo_loss_samp']*=sil_at_samp
if 'feats_at_samp' in rays.keys():
# feat loss
feats_at_samp=rays['feats_at_samp']
feat_rnd = F.normalize(feat_rnd, 2,-1)
frnd_loss_samp = (feat_rnd - feats_at_samp).pow(2).mean(-1)
result['frnd_loss_samp'] = frnd_loss_samp * sil_at_samp[...,0]
return result, weights_coarse
def sample_pdf(bins, weights, N_importance, det=False, eps=1e-5):
"""
Sample @N_importance samples from @bins with distribution defined by @weights.
Inputs:
bins: (N_rays, N_samples_+1) where N_samples_ is "the number of coarse samples per ray - 2"
weights: (N_rays, N_samples_)
N_importance: the number of samples to draw from the distribution
det: deterministic or not
eps: a small number to prevent division by zero
Outputs:
samples: the sampled samples
"""
N_rays, N_samples_ = weights.shape
weights = weights + eps # prevent division by zero (don't do inplace op!)
pdf = weights / torch.sum(weights, -1, keepdim=True) # (N_rays, N_samples_)
cdf = torch.cumsum(pdf, -1) # (N_rays, N_samples), cumulative distribution function
cdf = torch.cat([torch.zeros_like(cdf[: ,:1]), cdf], -1) # (N_rays, N_samples_+1)
# padded to 0~1 inclusive
if det:
u = torch.linspace(0, 1, N_importance, device=bins.device)
u = u.expand(N_rays, N_importance)
else:
u = torch.rand(N_rays, N_importance, device=bins.device)
u = u.contiguous()
inds = torch.searchsorted(cdf, u, right=True)
below = torch.clamp_min(inds-1, 0)
above = torch.clamp_max(inds, N_samples_)
inds_sampled = torch.stack([below, above], -1).view(N_rays, 2*N_importance)
cdf_g = torch.gather(cdf, 1, inds_sampled).view(N_rays, N_importance, 2)
bins_g = torch.gather(bins, 1, inds_sampled).view(N_rays, N_importance, 2)
denom = cdf_g[...,1]-cdf_g[...,0]
denom[denom<eps] = 1 # denom equals 0 means a bin has weight 0, in which case it will not be sampled
# anyway, therefore any value for it is fine (set to 1 here)
samples = bins_g[...,0] + (u-cdf_g[...,0])/denom * (bins_g[...,1]-bins_g[...,0])
return samples
|
banmo-main
|
nnutils/rendering.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import numpy as np
import pdb
import torch
from torch import nn
import torch.nn.functional as F
import torchvision
from pytorch3d import transforms
import trimesh
from nnutils.geom_utils import fid_reindex
class Embedding(nn.Module):
def __init__(self, in_channels, N_freqs, logscale=True, alpha=None):
"""
adapted from https://github.com/kwea123/nerf_pl/blob/master/models/nerf.py
Defines a function that embeds x to (x, sin(2^k x), cos(2^k x), ...)
in_channels: number of input channels (3 for both xyz and direction)
"""
super(Embedding, self).__init__()
self.N_freqs = N_freqs
self.in_channels = in_channels
self.funcs = [torch.sin, torch.cos]
self.nfuncs = len(self.funcs)
self.out_channels = in_channels*(len(self.funcs)*N_freqs+1)
if alpha is None:
self.alpha = self.N_freqs
else: self.alpha = alpha
if logscale:
self.freq_bands = 2**torch.linspace(0, N_freqs-1, N_freqs)
else:
self.freq_bands = torch.linspace(1, 2**(N_freqs-1), N_freqs)
def forward(self, x):
"""
Embeds x to (x, sin(2^k x), cos(2^k x), ...)
Different from the paper, "x" is also in the output
See https://github.com/bmild/nerf/issues/12
Inputs:
x: (B, self.in_channels)
Outputs:
out: (B, self.out_channels)
"""
# consine features
if self.N_freqs>0:
shape = x.shape
bs = shape[0]
input_dim = shape[-1]
output_dim = input_dim*(1+self.N_freqs*self.nfuncs)
out_shape = shape[:-1] + ((output_dim),)
device = x.device
x = x.view(-1,input_dim)
out = []
for freq in self.freq_bands:
for func in self.funcs:
out += [func(freq*x)]
out = torch.cat(out, -1)
## Apply the window w = 0.5*( 1+cos(pi + pi clip(alpha-j)) )
out = out.view(-1, self.N_freqs, self.nfuncs, input_dim)
window = self.alpha - torch.arange(self.N_freqs).to(device)
window = torch.clamp(window, 0.0, 1.0)
window = 0.5 * (1 + torch.cos(np.pi * window + np.pi))
window = window.view(1,-1, 1, 1)
out = window * out
out = out.view(-1,self.N_freqs*self.nfuncs*input_dim)
out = torch.cat([x, out],-1)
out = out.view(out_shape)
else: out = x
return out
class NeRF(nn.Module):
def __init__(self,
D=8, W=256,
in_channels_xyz=63, in_channels_dir=27,
out_channels=3,
skips=[4], raw_feat=False, init_beta=1./100,
activation=nn.ReLU(True), in_channels_code=0):
"""
adapted from https://github.com/kwea123/nerf_pl/blob/master/models/nerf.py
D: number of layers for density (sigma) encoder
W: number of hidden units in each layer
in_channels_xyz: number of input channels for xyz (3+3*10*2=63 by default)
in_channels_dir: number of input channels for direction (3+3*4*2=27 by default)
skips: add skip connection in the Dth layer
in_channels_code: only used for nerf_skin,
"""
super(NeRF, self).__init__()
self.D = D
self.W = W
self.in_channels_xyz = in_channels_xyz
self.in_channels_dir = in_channels_dir
self.in_channels_code = in_channels_code
self.skips = skips
self.use_xyz = False
# xyz encoding layers
self.weights_reg = []
for i in range(D):
if i == 0:
layer = nn.Linear(in_channels_xyz, W)
self.weights_reg.append(f"xyz_encoding_{i+1}")
elif i in skips:
layer = nn.Linear(W+in_channels_xyz, W)
self.weights_reg.append(f"xyz_encoding_{i+1}")
else:
layer = nn.Linear(W, W)
layer = nn.Sequential(layer, activation)
setattr(self, f"xyz_encoding_{i+1}", layer)
self.xyz_encoding_final = nn.Linear(W, W)
# direction encoding layers
self.dir_encoding = nn.Sequential(
nn.Linear(W+in_channels_dir, W//2),
activation)
# output layers
self.sigma = nn.Linear(W, 1)
self.rgb = nn.Sequential(
nn.Linear(W//2, out_channels),
)
self.raw_feat = raw_feat
self.beta = torch.Tensor([init_beta]) # logbeta
self.beta = nn.Parameter(self.beta)
# for m in self.modules():
# if isinstance(m, nn.Linear):
# if hasattr(m.weight,'data'):
# nn.init.xavier_uniform_(m.weight)
def forward(self, x ,xyz=None, sigma_only=False):
"""
Encodes input (xyz+dir) to rgb+sigma (not ready to render yet).
For rendering this ray, please see rendering.py
Inputs:
x: (B, self.in_channels_xyz(+self.in_channels_dir))
the embedded vector of position and direction
sigma_only: whether to infer sigma only. If True,
x is of shape (B, self.in_channels_xyz)
raw_feat: does not apply sigmoid
Outputs:
if sigma_ony:
sigma: (B, 1) sigma
else:
out: (B, 4), rgb and sigma
"""
if not sigma_only:
input_xyz, input_dir = \
torch.split(x, [self.in_channels_xyz, self.in_channels_dir], dim=-1)
else:
input_xyz, input_dir = \
torch.split(x, [self.in_channels_xyz, 0], dim=-1)
xyz_ = input_xyz
for i in range(self.D):
if i in self.skips:
xyz_ = torch.cat([input_xyz, xyz_], -1)
xyz_ = getattr(self, f"xyz_encoding_{i+1}")(xyz_)
sigma = self.sigma(xyz_)
if sigma_only:
return sigma
xyz_encoding_final = self.xyz_encoding_final(xyz_)
dir_encoding_input = torch.cat([xyz_encoding_final, input_dir], -1)
dir_encoding = self.dir_encoding(dir_encoding_input)
rgb = self.rgb(dir_encoding)
if self.raw_feat:
out = rgb
else:
rgb = rgb.sigmoid()
out = torch.cat([rgb, sigma], -1)
return out
class Transhead(NeRF):
"""
translation head
"""
def __init__(self, **kwargs):
super(Transhead, self).__init__(**kwargs)
def forward(self, x, xyz=None,sigma_only=False):
flow = super(Transhead, self).forward(x, sigma_only=sigma_only)
flow = flow*0.1
return flow
class SE3head(NeRF):
"""
modify the output to be rigid transforms per point
modified from Nerfies
"""
def __init__(self, **kwargs):
super(SE3head, self).__init__(**kwargs)
self.use_xyz=True
def forward(self, x, xyz=None,sigma_only=False):
x = super(SE3head, self).forward(x, sigma_only=sigma_only)
x = x.view(-1,9)
rotation, pivot, translation = x.split([3,3,3],-1)
pivot = pivot*0.1
translation = translation*0.1
shape = xyz.shape
warped_points = xyz.view(-1,3).clone()
warped_points = warped_points + pivot
rotmat = transforms.so3_exponential_map(rotation)
warped_points = rotmat.matmul(warped_points[...,None])[...,0]
warped_points = warped_points - pivot
warped_points = warped_points + translation
flow = warped_points.view(shape) - xyz
return flow
class RTHead(NeRF):
"""
modify the output to be rigid transforms
"""
def __init__(self, use_quat, **kwargs):
super(RTHead, self).__init__(**kwargs)
# use quaternion when estimating full rotation
# use exponential map when estimating delta rotation
self.use_quat=use_quat
if self.use_quat: self.num_output=7
else: self.num_output=6
for m in self.modules():
if isinstance(m, nn.Linear):
if hasattr(m.bias,'data'):
m.bias.data.zero_()
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
x = super(RTHead, self).forward(x)
bs = x.shape[0]
rts = x.view(-1,self.num_output) # bs B,x
B = rts.shape[0]//bs
tmat= rts[:,0:3] *0.1
if self.use_quat:
rquat=rts[:,3:7]
rquat=F.normalize(rquat,2,-1)
rmat=transforms.quaternion_to_matrix(rquat)
else:
rot=rts[:,3:6]
rmat = transforms.so3_exponential_map(rot)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(bs,1,-1)
return rts
class FrameCode(nn.Module):
"""
frame index and video index to code
"""
def __init__(self, num_freq, embedding_dim, vid_offset, scale=1):
super(FrameCode, self).__init__()
self.vid_offset = vid_offset
self.num_vids = len(vid_offset)-1
# compute maximum frequency:64-127 frame=>10
max_ts = (self.vid_offset[1:] - self.vid_offset[:-1]).max()
self.num_freq = 2*int(np.log2(max_ts))-2
# self.num_freq = num_freq
self.fourier_embed = Embedding(1,num_freq,alpha=num_freq)
self.basis_mlp = nn.Linear(self.num_vids*self.fourier_embed.out_channels,
embedding_dim)
self.scale = scale # input scale factor
def forward(self, fid):
"""
fid->code: N->N,embedding_dim
"""
bs = fid.shape[0]
vid, tid = fid_reindex(fid, self.num_vids, self.vid_offset)
tid = tid*self.scale
tid = tid.view(bs,1)
vid = vid.view(bs,1)
coeff = self.fourier_embed(tid) # N, n_channels
vid = F.one_hot(vid, num_classes=self.num_vids) # N, 1, num_vids
# pad zeros for each
coeff = coeff[...,None] * vid # N, n_channels, num_vids
coeff = coeff.view(bs, -1)
code = self.basis_mlp(coeff)
return code
class RTExplicit(nn.Module):
"""
index rigid transforms from a dictionary
"""
def __init__(self, max_t, delta=False, rand=True):
super(RTExplicit, self).__init__()
self.max_t = max_t
self.delta = delta
# initialize rotation
trans = torch.zeros(max_t, 3)
if delta:
rot = torch.zeros(max_t, 3)
else:
if rand:
rot = torch.rand(max_t, 4) * 2 - 1
else:
rot = torch.zeros(max_t, 4)
rot[:,0] = 1
se3 = torch.cat([trans, rot],-1)
self.se3 = nn.Parameter(se3)
self.num_output = se3.shape[-1]
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
bs = x.shape[0]
x = self.se3[x] # bs B,x
rts = x.view(-1,self.num_output)
B = rts.shape[0]//bs
tmat= rts[:,0:3] *0.1
if self.delta:
rot=rts[:,3:6]
rmat = transforms.so3_exponential_map(rot)
else:
rquat=rts[:,3:7]
rquat=F.normalize(rquat,2,-1)
rmat=transforms.quaternion_to_matrix(rquat)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(bs,1,-1)
return rts
class RTExpMLP(nn.Module):
"""
index rigid transforms from a dictionary
"""
def __init__(self, max_t, num_freqs, t_embed_dim, data_offset, delta=False):
super(RTExpMLP, self).__init__()
#self.root_code = nn.Embedding(max_t, t_embed_dim)
self.root_code = FrameCode(num_freqs, t_embed_dim, data_offset, scale=0.1)
self.base_rt = RTExplicit(max_t, delta=delta,rand=False)
#self.base_rt = RTHead(use_quat=True,
# D=2, W=64,
# in_channels_xyz=t_embed_dim,in_channels_dir=0,
# out_channels=7, raw_feat=True)
#self.base_rt = nn.Sequential(self.root_code, self.base_rt)
self.mlp_rt = RTHead(use_quat=False,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=6, raw_feat=True)
self.delta_rt = nn.Sequential(self.root_code, self.mlp_rt)
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
base_rts = self.base_rt(x)
delt_rts = self.delta_rt(x)
# magnify gradient by 10x
base_rts = base_rts * 10 - (base_rts*9).detach()
rmat = base_rts[:,0,:9].view(-1,3,3)
tmat = base_rts[:,0,9:12]
delt_rmat = delt_rts[:,0,:9].view(-1,3,3)
delt_tmat = delt_rts[:,0,9:12]
tmat = tmat + rmat.matmul(delt_tmat[...,None])[...,0]
rmat = rmat.matmul(delt_rmat)
rmat = rmat.view(-1,9)
rts = torch.cat([rmat,tmat],-1)
rts = rts.view(-1,1,12)
return rts
class ScoreHead(NeRF):
"""
modify the output to be rigid transforms
"""
def __init__(self, recursion_level, **kwargs):
super(ScoreHead, self).__init__(**kwargs)
grid= generate_healpix_grid(recursion_level=recursion_level)
self.register_buffer('grid', grid)
self.num_scores = self.grid.shape[0]
def forward(self, x):
# output: NxBx(9 rotation + 3 translation)
x = super(ScoreHead, self).forward(x)
bs = x.shape[0]
x = x.view(-1,self.num_scores+3) # bs B,x
# do not use tmat since it is not trained
tmat = x[:,0:3]*0.
scores=x[:,3:]
if self.training:
return scores, self.grid
else:
scores = scores.view(bs,-1,1)
rmat = self.grid[None].repeat(bs,1,1,1)
tmat = tmat[:,None].repeat(1,self.num_scores,1)
rmat = rmat.view(bs,-1,9)
rts = torch.cat([scores,rmat, tmat],-1)
rts = rts.view(bs,self.num_scores,-1)
return rts
class NeRFUnc(NeRF):
"""
nerf uncertainty
"""
def __init__(self, **kwargs):
super(NeRFUnc, self).__init__(**kwargs)
def forward(self, x, xyz=None,sigma_only=False):
unc = super(NeRFUnc, self).forward(x, sigma_only=sigma_only)
return unc
class ResNetConv(nn.Module):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
"""
def __init__(self, in_channels):
super(ResNetConv, self).__init__()
self.resnet = torchvision.models.resnet18(pretrained=True)
if in_channels!=3:
self.resnet.conv1 = nn.Conv2d(in_channels, 64, kernel_size=(7, 7),
stride=(2, 2), padding=(3, 3), bias=False)
self.resnet.fc=None
def forward(self, x):
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
return x
class Encoder(nn.Module):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
Current:
Resnet with 4 blocks (x32 spatial dim reduction)
Another conv with stride 2 (x64)
This is sent to 2 fc layers with final output nz_feat.
"""
def __init__(self, input_shape, in_channels=3,out_channels=128, batch_norm=True):
super(Encoder, self).__init__()
self.resnet_conv = ResNetConv(in_channels=in_channels)
self.conv1 = conv2d(batch_norm, 512, 128, stride=1, kernel_size=3)
#net_init(self.conv1)
def forward(self, img):
feat = self.resnet_conv.forward(img) # 512,4,4
feat = self.conv1(feat) # 128,4,4
feat = F.max_pool2d(feat, 4, 4)
feat = feat.view(img.size(0), -1)
return feat
## 2D convolution layers
def conv2d(batch_norm, in_planes, out_planes, kernel_size=3, stride=1):
"""
adapted from https://github.com/shubhtuls/factored3d/blob/master/nnutils/net_blocks.py
"""
if batch_norm:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.BatchNorm2d(out_planes),
nn.LeakyReLU(0.2,inplace=True)
)
else:
return nn.Sequential(
nn.Conv2d(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=(kernel_size-1)//2, bias=True),
nn.LeakyReLU(0.2,inplace=True)
)
def grab_xyz_weights(nerf_model, clone=False):
"""
zero grad for coarse component connected to inputs,
and return intermediate params
"""
param_list = []
input_layers=[0]+nerf_model.skips
input_wt_names = []
for layer in input_layers:
input_wt_names.append(f"xyz_encoding_{layer+1}.0.weight")
for name,p in nerf_model.named_parameters():
if name in input_wt_names:
# equiv since the wt after pos_dim does not change
if clone:
param_list.append(p.detach().clone())
else:
param_list.append(p)
## get the weights according to coarse posec
## 63 = 3 + 60
## 60 = (num_freqs, 2, 3)
#out_dim = p.shape[0]
#pos_dim = nerf_model.in_channels_xyz-nerf_model.in_channels_code
#param_list.append(p[:,:pos_dim]) #
return param_list
|
banmo-main
|
nnutils/nerf.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from collections import defaultdict
import os
import os.path as osp
import pickle
import sys
sys.path.insert(0, 'third_party')
import cv2, numpy as np, time, torch, torchvision
import torch.nn as nn
from torch.autograd import Variable
import torch.nn.functional as F
import trimesh, pytorch3d, pytorch3d.loss, pdb
from pytorch3d import transforms
import configparser
from nnutils.nerf import Embedding, NeRF, RTHead, SE3head, RTExplicit, Encoder,\
ScoreHead, Transhead, NeRFUnc, \
grab_xyz_weights, FrameCode, RTExpMLP
from nnutils.geom_utils import K2mat, mat2K, Kmatinv, K2inv, raycast, sample_xy,\
chunk_rays, generate_bones,\
canonical2ndc, obj_to_cam, vec_to_sim3, \
near_far_to_bound, compute_flow_geodist, \
compute_flow_cse, fb_flow_check, pinhole_cam, \
render_color, mask_aug, bbox_dp2rnd, resample_dp, \
vrender_flo, get_near_far, array2tensor, rot_angle, \
rtk_invert, rtk_compose, bone_transform, correct_bones,\
correct_rest_pose, fid_reindex
from nnutils.rendering import render_rays
from nnutils.loss_utils import eikonal_loss, rtk_loss, \
feat_match_loss, kp_reproj_loss, grad_update_bone, \
loss_filter, loss_filter_line, compute_xyz_wt_loss,\
compute_root_sm_2nd_loss, shape_init_loss
from utils.io import draw_pts
# distributed data parallel
flags.DEFINE_integer('local_rank', 0, 'for distributed training')
flags.DEFINE_integer('ngpu', 1, 'number of gpus to use')
# data io
flags.DEFINE_integer('accu_steps', 1, 'how many steps to do gradient accumulation')
flags.DEFINE_string('seqname', 'syn-spot-40', 'name of the sequence')
flags.DEFINE_string('logname', 'exp_name', 'Experiment Name')
flags.DEFINE_string('checkpoint_dir', 'logdir/', 'Root directory for output files')
flags.DEFINE_string('model_path', '', 'load model path')
flags.DEFINE_string('pose_cnn_path', '', 'path to pre-trained pose cnn')
flags.DEFINE_string('rtk_path', '', 'path to rtk files')
flags.DEFINE_bool('lineload',False,'whether to use pre-computed data per line')
flags.DEFINE_integer('n_data_workers', 1, 'Number of data loading workers')
flags.DEFINE_boolean('use_rtk_file', False, 'whether to use input rtk files')
flags.DEFINE_boolean('debug', False, 'deubg')
# model: shape, appearance, and feature
flags.DEFINE_bool('use_human', False, 'whether to use human cse model')
flags.DEFINE_bool('symm_shape', False, 'whether to set geometry to x-symmetry')
flags.DEFINE_bool('env_code', True, 'whether to use environment code for each video')
flags.DEFINE_bool('env_fourier', True, 'whether to use fourier basis for env')
flags.DEFINE_bool('use_unc',False, 'whether to use uncertainty sampling')
flags.DEFINE_bool('nerf_vis', True, 'use visibility volume')
flags.DEFINE_bool('anneal_freq', False, 'whether to use frequency annealing')
flags.DEFINE_integer('alpha', 10, 'maximum frequency for fourier features')
flags.DEFINE_bool('use_cc', True, 'whether to use connected component for mesh')
# model: motion
flags.DEFINE_bool('lbs', True, 'use lbs for backward warping 3d flow')
flags.DEFINE_integer('num_bones', 25, 'maximum number of bones')
flags.DEFINE_bool('nerf_skin', True, 'use mlp skinning function')
flags.DEFINE_integer('t_embed_dim', 128, 'dimension of the pose code')
flags.DEFINE_bool('frame_code', True, 'whether to use frame code')
flags.DEFINE_bool('flowbw', False, 'use backward warping 3d flow')
flags.DEFINE_bool('se3_flow', False, 'whether to use se3 field for 3d flow')
# model: cameras
flags.DEFINE_bool('use_cam', False, 'whether to use pre-defined camera pose')
flags.DEFINE_string('root_basis', 'expmlp', 'which root pose basis to use {mlp, cnn, exp}')
flags.DEFINE_bool('root_opt', True, 'whether to optimize root body poses')
flags.DEFINE_bool('ks_opt', True, 'whether to optimize camera intrinsics')
# optimization: hyperparams
flags.DEFINE_integer('num_epochs', 1000, 'Number of epochs to train')
flags.DEFINE_float('learning_rate', 5e-4, 'learning rate')
flags.DEFINE_integer('batch_size', 2, 'size of minibatches')
flags.DEFINE_integer('img_size', 512, 'image size for optimization')
flags.DEFINE_integer('nsample', 6, 'num of samples per image at optimization time')
flags.DEFINE_float('perturb', 1.0, 'factor to perturb depth sampling points')
flags.DEFINE_float('noise_std', 0., 'std dev of noise added to regularize sigma')
flags.DEFINE_float('nactive', 0.5, 'num of samples per image at optimization time')
flags.DEFINE_integer('ndepth', 128, 'num of depth samples per px at optimization time')
flags.DEFINE_float('clip_scale', 100, 'grad clip scale')
flags.DEFINE_float('warmup_steps', 0.4, 'steps used to increase sil loss')
flags.DEFINE_float('reinit_bone_steps', 0.667, 'steps to initialize bones')
flags.DEFINE_float('dskin_steps', 0.8, 'steps to add delta skinning weights')
flags.DEFINE_float('init_beta', 0.1, 'initial value for transparency beta')
flags.DEFINE_bool('reset_beta', False, 'reset volsdf beta to 0.1')
flags.DEFINE_float('fine_steps', 1.1, 'by default, not using fine samples')
flags.DEFINE_float('nf_reset', 0.5, 'by default, start reseting near-far plane at 50%')
flags.DEFINE_float('bound_reset', 0.5, 'by default, start reseting bound from 50%')
flags.DEFINE_float('bound_factor', 2, 'by default, use a loose bound')
# optimization: initialization
flags.DEFINE_bool('init_ellips', False, 'whether to init shape as ellips')
flags.DEFINE_integer('warmup_pose_ep', 0, 'epochs to pre-train cnn pose predictor')
flags.DEFINE_integer('warmup_shape_ep', 0, 'epochs to pre-train nerf')
flags.DEFINE_bool('warmup_rootmlp', False, 'whether to preset base root pose (compatible with expmlp root basis only)')
flags.DEFINE_bool('unc_filter', True, 'whether to filter root poses init with low uncertainty')
# optimization: fine-tuning
flags.DEFINE_bool('keep_pose_basis', True, 'keep pose basis when loading models at train time')
flags.DEFINE_bool('freeze_coarse', False, 'whether to freeze coarse posec of MLP')
flags.DEFINE_bool('freeze_root', False, 'whether to freeze root body pose')
flags.DEFINE_bool('root_stab', True, 'whether to stablize root at ft')
flags.DEFINE_bool('freeze_cvf', False, 'whether to freeze canonical features')
flags.DEFINE_bool('freeze_shape',False, 'whether to freeze canonical shape')
flags.DEFINE_bool('freeze_proj', False, 'whether to freeze some params w/ proj loss')
flags.DEFINE_bool('freeze_body_mlp', False, 'whether to freeze body pose mlp')
flags.DEFINE_float('proj_start', 0.0, 'steps to strat projection opt')
flags.DEFINE_float('frzroot_start', 0.0, 'steps to strat fixing root pose')
flags.DEFINE_float('frzbody_end', 0.0, 'steps to end fixing body pose')
flags.DEFINE_float('proj_end', 0.2, 'steps to end projection opt')
# CSE fine-tuning (turned off by default)
flags.DEFINE_bool('ft_cse', False, 'whether to fine-tune cse features')
flags.DEFINE_bool('mt_cse', True, 'whether to maintain cse features')
flags.DEFINE_float('mtcse_steps', 0.0, 'only distill cse before several epochs')
flags.DEFINE_float('ftcse_steps', 0.0, 'finetune cse after several epochs')
# render / eval
flags.DEFINE_integer('render_size', 64, 'size used for eval visualizations')
flags.DEFINE_integer('frame_chunk', 20, 'chunk size to split the input frames')
flags.DEFINE_integer('chunk', 32*1024, 'chunk size to split the input to avoid OOM')
flags.DEFINE_integer('rnd_frame_chunk', 3, 'chunk size to render eval images')
flags.DEFINE_bool('queryfw', True, 'use forward warping to query deformed shape')
flags.DEFINE_float('mc_threshold', -0.002, 'marching cubes threshold')
flags.DEFINE_bool('full_mesh', False, 'extract surface without visibility check')
flags.DEFINE_bool('ce_color', True, 'assign mesh color as canonical surface mapping or radiance')
flags.DEFINE_integer('sample_grid3d', 64, 'resolution for mesh extraction from nerf')
flags.DEFINE_string('test_frames', '9', 'a list of video index or num of frames, {0,1,2}, 30')
# losses
flags.DEFINE_bool('use_embed', True, 'whether to use feature consistency losses')
flags.DEFINE_bool('use_proj', True, 'whether to use reprojection loss')
flags.DEFINE_bool('use_corresp', True, 'whether to render and compare correspondence')
flags.DEFINE_bool('dist_corresp', True, 'whether to render distributed corresp')
flags.DEFINE_float('total_wt', 1, 'by default, multiple total loss by 1')
flags.DEFINE_float('sil_wt', 0.1, 'weight for silhouette loss')
flags.DEFINE_float('img_wt', 0.1, 'weight for silhouette loss')
flags.DEFINE_float('feat_wt', 0., 'by default, multiple feat loss by 1')
flags.DEFINE_float('frnd_wt', 1., 'by default, multiple feat loss by 1')
flags.DEFINE_float('proj_wt', 0.02, 'by default, multiple proj loss by 1')
flags.DEFINE_float('flow_wt', 1, 'by default, multiple flow loss by 1')
flags.DEFINE_float('cyc_wt', 1, 'by default, multiple cyc loss by 1')
flags.DEFINE_bool('rig_loss', False,'whether to use globally rigid loss')
flags.DEFINE_bool('root_sm', True, 'whether to use smooth loss for root pose')
flags.DEFINE_float('eikonal_wt', 0., 'weight of eikonal loss')
flags.DEFINE_float('bone_loc_reg', 0.1, 'use bone location regularization')
flags.DEFINE_bool('loss_flt', True, 'whether to use loss filter')
flags.DEFINE_bool('rm_novp', True,'whether to remove loss on non-overlapping pxs')
# for scripts/visualize/match.py
flags.DEFINE_string('match_frames', '0 1', 'a list of frame index')
class banmo(nn.Module):
def __init__(self, opts, data_info):
super(banmo, self).__init__()
self.opts = opts
self.device = torch.device("cuda:%d"%opts.local_rank)
self.config = configparser.RawConfigParser()
self.config.read('configs/%s.config'%opts.seqname)
self.alpha=torch.Tensor([opts.alpha])
self.alpha=nn.Parameter(self.alpha)
self.loss_select = 1 # by default, use all losses
self.root_update = 1 # by default, update root pose
self.body_update = 1 # by default, update body pose
self.shape_update = 0 # by default, update all
self.cvf_update = 0 # by default, update all
self.progress = 0. # also reseted in optimizer
self.counter_frz_rebone = 0. # counter to freeze params for reinit bones
self.use_fine = False # by default not using fine samples
#self.ndepth_bk = opts.ndepth # original ndepth
self.root_basis = opts.root_basis
self.use_cam = opts.use_cam
self.is_warmup_pose = False # by default not warming up
self.img_size = opts.img_size # current rendering size,
# have to be consistent with dataloader,
# eval/train has different size
embed_net = nn.Embedding
# multi-video mode
self.num_vid = len(self.config.sections())-1
self.data_offset = data_info['offset']
self.num_fr=self.data_offset[-1]
self.max_ts = (self.data_offset[1:] - self.data_offset[:-1]).max()
self.impath = data_info['impath']
self.latest_vars = {}
# only used in get_near_far: rtk, idk
# only used in visibility: rtk, vis, idx (deprecated)
# raw rot/trans estimated by pose net
self.latest_vars['rt_raw'] = np.zeros((self.data_offset[-1], 3,4)) # from data
# rtk raw scaled and refined
self.latest_vars['rtk'] = np.zeros((self.data_offset[-1], 4,4))
self.latest_vars['idk'] = np.zeros((self.data_offset[-1],))
self.latest_vars['mesh_rest'] = trimesh.Trimesh()
if opts.lineload:
#TODO todo, this should be idx512,-1
self.latest_vars['fp_err'] = np.zeros((self.data_offset[-1]*opts.img_size,2)) # feat, proj
self.latest_vars['flo_err'] = np.zeros((self.data_offset[-1]*opts.img_size,6))
self.latest_vars['sil_err'] = np.zeros((self.data_offset[-1]*opts.img_size,))
self.latest_vars['flo_err_hist'] = np.zeros((self.data_offset[-1]*opts.img_size,6,10))
else:
self.latest_vars['fp_err'] = np.zeros((self.data_offset[-1],2)) # feat, proj
self.latest_vars['flo_err'] = np.zeros((self.data_offset[-1],6))
self.latest_vars['sil_err'] = np.zeros((self.data_offset[-1],))
self.latest_vars['flo_err_hist'] = np.zeros((self.data_offset[-1],6,10))
# get near-far plane
self.near_far = np.zeros((self.data_offset[-1],2))
self.near_far[...,1] = 6.
self.near_far = self.near_far.astype(np.float32)
self.near_far = torch.Tensor(self.near_far).to(self.device)
self.obj_scale = float(near_far_to_bound(self.near_far)) / 0.3 # to 0.3
self.near_far = self.near_far / self.obj_scale
self.near_far_base = self.near_far.clone() # used for create_base_se3()
self.near_far = nn.Parameter(self.near_far)
# object bound
self.latest_vars['obj_bound'] = np.asarray([1.,1.,1.])
self.latest_vars['obj_bound'] *= near_far_to_bound(self.near_far)
self.vis_min=np.asarray([[0,0,0]])
self.vis_len=self.latest_vars['obj_bound']/2
# set shape/appearancce model
self.num_freqs = 10
in_channels_xyz=3+3*self.num_freqs*2
in_channels_dir=27
if opts.env_code:
# add video-speficit environment lighting embedding
env_code_dim = 64
if opts.env_fourier:
self.env_code = FrameCode(self.num_freqs, env_code_dim, self.data_offset, scale=1)
else:
self.env_code = embed_net(self.num_fr, env_code_dim)
else:
env_code_dim = 0
self.nerf_coarse = NeRF(in_channels_xyz=in_channels_xyz,
in_channels_dir=in_channels_dir+env_code_dim,
init_beta=opts.init_beta)
self.embedding_xyz = Embedding(3,self.num_freqs,alpha=self.alpha.data[0])
self.embedding_dir = Embedding(3,4, alpha=self.alpha.data[0])
self.embeddings = {'xyz':self.embedding_xyz, 'dir':self.embedding_dir}
self.nerf_models= {'coarse':self.nerf_coarse}
# set motion model
t_embed_dim = opts.t_embed_dim
if opts.frame_code:
self.pose_code = FrameCode(self.num_freqs, t_embed_dim, self.data_offset)
else:
self.pose_code = embed_net(self.num_fr, t_embed_dim)
if opts.flowbw:
if opts.se3_flow:
flow3d_arch = SE3head
out_channels=9
else:
flow3d_arch = Transhead
out_channels=3
self.nerf_flowbw = flow3d_arch(in_channels_xyz=in_channels_xyz+t_embed_dim,
D=5, W=128,
out_channels=out_channels,in_channels_dir=0, raw_feat=True)
self.nerf_flowfw = flow3d_arch(in_channels_xyz=in_channels_xyz+t_embed_dim,
D=5, W=128,
out_channels=out_channels,in_channels_dir=0, raw_feat=True)
self.nerf_models['flowbw'] = self.nerf_flowbw
self.nerf_models['flowfw'] = self.nerf_flowfw
elif opts.lbs:
self.num_bones = opts.num_bones
bones= generate_bones(self.num_bones, self.num_bones, 0, self.device)
self.bones = nn.Parameter(bones)
self.nerf_models['bones'] = self.bones
self.num_bone_used = self.num_bones # bones used in the model
self.nerf_body_rts = nn.Sequential(self.pose_code,
RTHead(use_quat=False,
#D=5,W=128,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=6*self.num_bones, raw_feat=True))
#TODO scale+constant parameters
skin_aux = torch.Tensor([0,self.obj_scale])
self.skin_aux = nn.Parameter(skin_aux)
self.nerf_models['skin_aux'] = self.skin_aux
if opts.nerf_skin:
self.nerf_skin = NeRF(in_channels_xyz=in_channels_xyz+t_embed_dim,
# D=5,W=128,
D=5,W=64,
in_channels_dir=0, out_channels=self.num_bones,
raw_feat=True, in_channels_code=t_embed_dim)
self.rest_pose_code = embed_net(1, t_embed_dim)
self.nerf_models['nerf_skin'] = self.nerf_skin
self.nerf_models['rest_pose_code'] = self.rest_pose_code
# set visibility nerf
if opts.nerf_vis:
self.nerf_vis = NeRF(in_channels_xyz=in_channels_xyz, D=5, W=64,
out_channels=1, in_channels_dir=0,
raw_feat=True)
self.nerf_models['nerf_vis'] = self.nerf_vis
# optimize camera
if opts.root_opt:
if self.use_cam:
use_quat=False
out_channels=6
else:
use_quat=True
out_channels=7
# train a cnn pose predictor for warmup
cnn_in_channels = 16
cnn_head = RTHead(use_quat=True, D=1,
in_channels_xyz=128,in_channels_dir=0,
out_channels=7, raw_feat=True)
self.dp_root_rts = nn.Sequential(
Encoder((112,112), in_channels=cnn_in_channels,
out_channels=128), cnn_head)
if self.root_basis == 'cnn':
self.nerf_root_rts = nn.Sequential(
Encoder((112,112), in_channels=cnn_in_channels,
out_channels=128),
RTHead(use_quat=use_quat, D=1,
in_channels_xyz=128,in_channels_dir=0,
out_channels=out_channels, raw_feat=True))
elif self.root_basis == 'exp':
self.nerf_root_rts = RTExplicit(self.num_fr, delta=self.use_cam)
elif self.root_basis == 'expmlp':
self.nerf_root_rts = RTExpMLP(self.num_fr,
self.num_freqs,t_embed_dim,self.data_offset,
delta=self.use_cam)
elif self.root_basis == 'mlp':
self.root_code = embed_net(self.num_fr, t_embed_dim)
output_head = RTHead(use_quat=use_quat,
in_channels_xyz=t_embed_dim,in_channels_dir=0,
out_channels=out_channels, raw_feat=True)
self.nerf_root_rts = nn.Sequential(self.root_code, output_head)
else: print('error'); exit()
# intrinsics
ks_list = []
for i in range(self.num_vid):
fx,fy,px,py=[float(i) for i in \
self.config.get('data_%d'%i, 'ks').split(' ')]
ks_list.append([fx,fy,px,py])
self.ks_param = torch.Tensor(ks_list).to(self.device)
if opts.ks_opt:
self.ks_param = nn.Parameter(self.ks_param)
# densepose
detbase='./third_party/detectron2/'
if opts.use_human:
canonical_mesh_name = 'smpl_27554'
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
else:
canonical_mesh_name = 'sheep_5004'
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml'%(detbase)
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
canonical_mesh_path = 'mesh_material/%s_sph.pkl'%canonical_mesh_name
with open(canonical_mesh_path, 'rb') as f:
dp = pickle.load(f)
self.dp_verts = dp['vertices']
self.dp_faces = dp['faces'].astype(int)
self.dp_verts = torch.Tensor(self.dp_verts).cuda(self.device)
self.dp_faces = torch.Tensor(self.dp_faces).cuda(self.device).long()
self.dp_verts -= self.dp_verts.mean(0)[None]
self.dp_verts /= self.dp_verts.abs().max()
self.dp_verts_unit = self.dp_verts.clone()
self.dp_verts *= (self.near_far[:,1] - self.near_far[:,0]).mean()/2
# visualize
self.dp_vis = self.dp_verts.detach()
self.dp_vmin = self.dp_vis.min(0)[0][None]
self.dp_vis = self.dp_vis - self.dp_vmin
self.dp_vmax = self.dp_vis.max(0)[0][None]
self.dp_vis = self.dp_vis / self.dp_vmax
# save colorvis
if not os.path.isdir('tmp'): os.mkdir('tmp')
trimesh.Trimesh(self.dp_verts_unit.cpu().numpy(),
dp['faces'],
vertex_colors = self.dp_vis.cpu().numpy())\
.export('tmp/%s.obj'%canonical_mesh_name)
if opts.unc_filter:
from utils.cselib import create_cse
# load surface embedding
_, _, mesh_vertex_embeddings = create_cse(config_path,
weight_path)
self.dp_embed = mesh_vertex_embeddings[canonical_mesh_name]
# add densepose mlp
if opts.use_embed:
self.num_feat = 16
# TODO change this to D-8
self.nerf_feat = NeRF(in_channels_xyz=in_channels_xyz, D=5, W=128,
out_channels=self.num_feat,in_channels_dir=0, raw_feat=True, init_beta=1.)
self.nerf_models['nerf_feat'] = self.nerf_feat
if opts.ft_cse:
from nnutils.cse import CSENet
self.csenet = CSENet(ishuman=opts.use_human)
# add uncertainty MLP
if opts.use_unc:
# input, (x,y,t)+code, output, (1)
vid_code_dim=32 # add video-specific code
self.vid_code = embed_net(self.num_vid, vid_code_dim)
#self.nerf_unc = NeRFUnc(in_channels_xyz=in_channels_xyz, D=5, W=128,
self.nerf_unc = NeRFUnc(in_channels_xyz=in_channels_xyz, D=8, W=256,
out_channels=1,in_channels_dir=vid_code_dim, raw_feat=True, init_beta=1.)
self.nerf_models['nerf_unc'] = self.nerf_unc
if opts.warmup_pose_ep>0:
# soft renderer
import soft_renderer as sr
self.mesh_renderer = sr.SoftRenderer(image_size=256, sigma_val=1e-12,
camera_mode='look_at',perspective=False, aggr_func_rgb='hard',
light_mode='vertex', light_intensity_ambient=1.,light_intensity_directionals=0.)
self.resnet_transform = torchvision.transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
def forward_default(self, batch):
opts = self.opts
# get root poses
rtk_all = self.compute_rts()
# change near-far plane for all views
if self.progress>=opts.nf_reset:
rtk_np = rtk_all.clone().detach().cpu().numpy()
valid_rts = self.latest_vars['idk'].astype(bool)
self.latest_vars['rtk'][valid_rts,:3] = rtk_np[valid_rts]
self.near_far.data = get_near_far(
self.near_far.data,
self.latest_vars)
if opts.debug:
torch.cuda.synchronize()
start_time = time.time()
if opts.lineload:
bs = self.set_input(batch, load_line=True)
else:
bs = self.set_input(batch)
if opts.debug:
torch.cuda.synchronize()
print('set input time:%.2f'%(time.time()-start_time))
rtk = self.rtk
kaug= self.kaug
embedid=self.embedid
aux_out={}
# Render
rendered, rand_inds = self.nerf_render(rtk, kaug, embedid,
nsample=opts.nsample, ndepth=opts.ndepth)
if opts.debug:
torch.cuda.synchronize()
print('set input + render time:%.2f'%(time.time()-start_time))
# image and silhouette loss
sil_at_samp = rendered['sil_at_samp']
sil_at_samp_flo = rendered['sil_at_samp_flo']
vis_at_samp = rendered['vis_at_samp']
if opts.loss_flt:
# frame-level rejection of bad segmentations
if opts.lineload:
invalid_idx = loss_filter_line(self.latest_vars['sil_err'],
self.errid.long(),self.frameid.long(),
rendered['sil_loss_samp']*opts.sil_wt,
opts.img_size)
else:
sil_err, invalid_idx = loss_filter(self.latest_vars['sil_err'],
rendered['sil_loss_samp']*opts.sil_wt,
sil_at_samp>-1, scale_factor=10)
self.latest_vars['sil_err'][self.errid.long()] = sil_err
if self.progress > (opts.warmup_steps):
rendered['sil_loss_samp'][invalid_idx] *= 0.
if invalid_idx.sum()>0:
print('%d removed from sil'%(invalid_idx.sum()))
img_loss_samp = opts.img_wt*rendered['img_loss_samp']
if opts.loss_flt:
img_loss_samp[invalid_idx] *= 0
img_loss = img_loss_samp
if opts.rm_novp:
img_loss = img_loss * rendered['sil_coarse'].detach()
img_loss = img_loss[sil_at_samp[...,0]>0].mean() # eval on valid pts
sil_loss_samp = opts.sil_wt*rendered['sil_loss_samp']
sil_loss = sil_loss_samp[vis_at_samp>0].mean()
aux_out['sil_loss'] = sil_loss
aux_out['img_loss'] = img_loss
total_loss = img_loss
total_loss = total_loss + sil_loss
# feat rnd loss
frnd_loss_samp = opts.frnd_wt*rendered['frnd_loss_samp']
if opts.loss_flt:
frnd_loss_samp[invalid_idx] *= 0
if opts.rm_novp:
frnd_loss_samp = frnd_loss_samp * rendered['sil_coarse'].detach()
feat_rnd_loss = frnd_loss_samp[sil_at_samp[...,0]>0].mean() # eval on valid pts
aux_out['feat_rnd_loss'] = feat_rnd_loss
total_loss = total_loss + feat_rnd_loss
# flow loss
if opts.use_corresp:
flo_loss_samp = rendered['flo_loss_samp']
if opts.loss_flt:
flo_loss_samp[invalid_idx] *= 0
if opts.rm_novp:
flo_loss_samp = flo_loss_samp * rendered['sil_coarse'].detach()
# eval on valid pts
flo_loss = flo_loss_samp[sil_at_samp_flo[...,0]].mean() * 2
#flo_loss = flo_loss_samp[sil_at_samp_flo[...,0]].mean()
flo_loss = flo_loss * opts.flow_wt
# warm up by only using flow loss to optimize root pose
if self.loss_select == 0:
total_loss = total_loss*0. + flo_loss
else:
total_loss = total_loss + flo_loss
aux_out['flo_loss'] = flo_loss
# viser loss
if opts.use_embed:
feat_err_samp = rendered['feat_err']* opts.feat_wt
if opts.loss_flt:
feat_err_samp[invalid_idx] *= 0
feat_loss = feat_err_samp
if opts.rm_novp:
feat_loss = feat_loss * rendered['sil_coarse'].detach()
feat_loss = feat_loss[sil_at_samp>0].mean()
total_loss = total_loss + feat_loss
aux_out['feat_loss'] = feat_loss
aux_out['beta_feat'] = self.nerf_feat.beta.clone().detach()[0]
if opts.use_proj:
proj_err_samp = rendered['proj_err']* opts.proj_wt
if opts.loss_flt:
proj_err_samp[invalid_idx] *= 0
proj_loss = proj_err_samp[sil_at_samp>0].mean()
aux_out['proj_loss'] = proj_loss
if opts.freeze_proj:
total_loss = total_loss + proj_loss
## warm up by only using projection loss to optimize bones
warmup_weight = (self.progress - opts.proj_start)/(opts.proj_end-opts.proj_start)
warmup_weight = (warmup_weight - 0.8) * 5 # [-4,1]
warmup_weight = np.clip(warmup_weight, 0,1)
if (self.progress > opts.proj_start and \
self.progress < opts.proj_end):
total_loss = total_loss*warmup_weight + \
10*proj_loss*(1-warmup_weight)
else:
# only add it after feature volume is trained well
total_loss = total_loss + proj_loss
# regularization
if 'frame_cyc_dis' in rendered.keys():
# cycle loss
cyc_loss = rendered['frame_cyc_dis'].mean()
total_loss = total_loss + cyc_loss * opts.cyc_wt
#total_loss = total_loss + cyc_loss*0
aux_out['cyc_loss'] = cyc_loss
# globally rigid prior
rig_loss = 0.0001*rendered['frame_rigloss'].mean()
if opts.rig_loss:
total_loss = total_loss + rig_loss
else:
total_loss = total_loss + rig_loss*0
aux_out['rig_loss'] = rig_loss
# elastic energy for se3 field / translation field
if 'elastic_loss' in rendered.keys():
elastic_loss = rendered['elastic_loss'].mean() * 1e-3
total_loss = total_loss + elastic_loss
aux_out['elastic_loss'] = elastic_loss
# regularization of root poses
if opts.root_sm:
root_sm_loss = compute_root_sm_2nd_loss(rtk_all, self.data_offset)
aux_out['root_sm_loss'] = root_sm_loss
total_loss = total_loss + root_sm_loss
if opts.eikonal_wt > 0:
ekl_loss = opts.eikonal_wt * eikonal_loss(self.nerf_coarse, self.embedding_xyz,
rendered['xyz_canonical_vis'], self.latest_vars['obj_bound'])
aux_out['ekl_loss'] = ekl_loss
total_loss = total_loss + ekl_loss
# bone location regularization: pull bones away from empth space (low sdf)
if opts.lbs and opts.bone_loc_reg>0:
bones_rst = self.bones
bones_rst,_ = correct_bones(self, bones_rst)
mesh_rest = self.latest_vars['mesh_rest']
if len(mesh_rest.vertices)>100: # not a degenerate mesh
# issue #4 the following causes error on certain archs for torch110+cu113
# seems to be a conflict between geomloss and pytorch3d
# mesh_rest = pytorch3d.structures.meshes.Meshes(
# verts=torch.Tensor(mesh_rest.vertices[None]),
# faces=torch.Tensor(mesh_rest.faces[None]))
# a ugly workaround
mesh_verts = [torch.Tensor(mesh_rest.vertices)]
mesh_faces = [torch.Tensor(mesh_rest.faces).long()]
try:
mesh_rest = pytorch3d.structures.meshes.Meshes(verts=mesh_verts, faces=mesh_faces)
except:
mesh_rest = pytorch3d.structures.meshes.Meshes(verts=mesh_verts, faces=mesh_faces)
shape_samp = pytorch3d.ops.sample_points_from_meshes(mesh_rest,
1000, return_normals=False)
shape_samp = shape_samp[0].to(self.device)
from geomloss import SamplesLoss
samploss = SamplesLoss(loss="sinkhorn", p=2, blur=.05)
bone_loc_loss = samploss(bones_rst[:,:3]*10, shape_samp*10)
bone_loc_loss = opts.bone_loc_reg*bone_loc_loss
total_loss = total_loss + bone_loc_loss
aux_out['bone_loc_loss'] = bone_loc_loss
# visibility loss
if 'vis_loss' in rendered.keys():
vis_loss = 0.01*rendered['vis_loss'].mean()
total_loss = total_loss + vis_loss
aux_out['visibility_loss'] = vis_loss
# uncertainty MLP inference
if opts.use_unc:
# add uncertainty MLP loss, loss = | |img-img_r|*sil - unc_pred |
unc_pred = rendered['unc_pred']
unc_rgb = sil_at_samp[...,0]*img_loss_samp.mean(-1)
unc_feat= (sil_at_samp*feat_err_samp)[...,0]
unc_proj= (sil_at_samp*proj_err_samp)[...,0]
unc_sil = sil_loss_samp[...,0]
#unc_accumulated = unc_feat + unc_proj
#unc_accumulated = unc_feat + unc_proj + unc_rgb*0.1
# unc_accumulated = unc_feat + unc_proj + unc_rgb
unc_accumulated = unc_rgb
# unc_accumulated = unc_rgb + unc_sil
unc_loss = (unc_accumulated.detach() - unc_pred[...,0]).pow(2)
unc_loss = unc_loss.mean()
aux_out['unc_loss'] = unc_loss
total_loss = total_loss + unc_loss
# cse feature tuning
if opts.ft_cse and opts.mt_cse:
csenet_loss = (self.csenet_feats - self.csepre_feats).pow(2).sum(1)
csenet_loss = csenet_loss[self.dp_feats_mask].mean()* 1e-5
if self.progress < opts.mtcse_steps:
total_loss = total_loss*0 + csenet_loss
else:
total_loss = total_loss + csenet_loss
aux_out['csenet_loss'] = csenet_loss
if opts.freeze_coarse:
# compute nerf xyz wt loss
shape_xyz_wt_curr = grab_xyz_weights(self.nerf_coarse)
shape_xyz_wt_loss = 100*compute_xyz_wt_loss(self.shape_xyz_wt,
shape_xyz_wt_curr)
skin_xyz_wt_curr = grab_xyz_weights(self.nerf_skin)
skin_xyz_wt_loss = 100*compute_xyz_wt_loss(self.skin_xyz_wt,
skin_xyz_wt_curr)
feat_xyz_wt_curr = grab_xyz_weights(self.nerf_feat)
feat_xyz_wt_loss = 100*compute_xyz_wt_loss(self.feat_xyz_wt,
feat_xyz_wt_curr)
aux_out['shape_xyz_wt_loss'] = shape_xyz_wt_loss
aux_out['skin_xyz_wt_loss'] = skin_xyz_wt_loss
aux_out['feat_xyz_wt_loss'] = feat_xyz_wt_loss
total_loss = total_loss + shape_xyz_wt_loss + skin_xyz_wt_loss\
+ feat_xyz_wt_loss
# save some variables
if opts.lbs:
aux_out['skin_scale'] = self.skin_aux[0].clone().detach()
aux_out['skin_const'] = self.skin_aux[1].clone().detach()
total_loss = total_loss * opts.total_wt
aux_out['total_loss'] = total_loss
aux_out['beta'] = self.nerf_coarse.beta.clone().detach()[0]
if opts.debug:
torch.cuda.synchronize()
print('set input + render + loss time:%.2f'%(time.time()-start_time))
return total_loss, aux_out
def forward_warmup_rootmlp(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
device = self.device
# loss
aux_out={}
self.rtk = torch.zeros(self.num_fr,4,4).to(device)
self.frameid = torch.Tensor(range(self.num_fr)).to(device)
self.dataid,_ = fid_reindex(self.frameid, self.num_vid, self.data_offset)
self.convert_root_pose()
rtk_gt = torch.Tensor(self.latest_vars['rtk']).to(device)
_ = rtk_loss(self.rtk, rtk_gt, aux_out)
root_sm_loss = compute_root_sm_2nd_loss(self.rtk, self.data_offset)
total_loss = 0.1*aux_out['rot_loss'] + 0.01*root_sm_loss
aux_out['warmup_root_sm_loss'] = root_sm_loss
del aux_out['trn_loss']
return total_loss, aux_out
def forward_warmup_shape(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
# loss
shape_factor = 0.1
aux_out={}
total_loss = shape_init_loss(self.dp_verts_unit*shape_factor,self.dp_faces, \
self.nerf_coarse, self.embedding_xyz,
bound_factor=opts.bound_factor * 1.2, use_ellips=opts.init_ellips)
aux_out['shape_init_loss'] = total_loss
return total_loss, aux_out
def forward_warmup(self, batch):
"""
batch variable is not never being used here
"""
# render ground-truth data
opts = self.opts
bs_rd = 16
with torch.no_grad():
vertex_color = self.dp_embed
dp_feats_rd, rtk_raw = self.render_dp(self.dp_verts_unit,
self.dp_faces, vertex_color, self.near_far, self.device,
self.mesh_renderer, bs_rd)
aux_out={}
# predict delta se3
root_rts = self.nerf_root_rts(dp_feats_rd)
root_rmat = root_rts[:,0,:9].view(-1,3,3)
root_tmat = root_rts[:,0,9:12]
# construct base se3
rtk = torch.zeros(bs_rd, 4,4).to(self.device)
rtk[:,:3] = self.create_base_se3(bs_rd, self.device)
# compose se3
rmat = rtk[:,:3,:3]
tmat = rtk[:,:3,3]
tmat = tmat + rmat.matmul(root_tmat[...,None])[...,0]
rmat = rmat.matmul(root_rmat)
rtk[:,:3,:3] = rmat
rtk[:,:3,3] = tmat.detach() # do not train translation
# loss
total_loss = rtk_loss(rtk, rtk_raw, aux_out)
aux_out['total_loss'] = total_loss
return total_loss, aux_out
def nerf_render(self, rtk, kaug, embedid, nsample=256, ndepth=128):
opts=self.opts
# render rays
if opts.debug:
torch.cuda.synchronize()
start_time = time.time()
# 2bs,...
Rmat, Tmat, Kinv = self.prepare_ray_cams(rtk, kaug)
bs = Kinv.shape[0]
# for batch:2bs, nsample+x
# for line: 2bs*(nsample+x),1
rand_inds, rays, frameid, errid = self.sample_pxs(bs, nsample, Rmat, Tmat, Kinv,
self.dataid, self.frameid, self.frameid_sub, self.embedid,self.lineid,self.errid,
self.imgs, self.masks, self.vis2d, self.flow, self.occ, self.dp_feats)
self.frameid = frameid # only used in loss filter
self.errid = errid
if opts.debug:
torch.cuda.synchronize()
print('prepare rays time: %.2f'%(time.time()-start_time))
bs_rays = rays['bs'] * rays['nsample'] # over pixels
results=defaultdict(list)
for i in range(0, bs_rays, opts.chunk):
rays_chunk = chunk_rays(rays,i,opts.chunk)
# decide whether to use fine samples
if self.progress > opts.fine_steps:
self.use_fine = True
else:
self.use_fine = False
rendered_chunks = render_rays(self.nerf_models,
self.embeddings,
rays_chunk,
N_samples = ndepth,
use_disp=False,
perturb=opts.perturb,
noise_std=opts.noise_std,
chunk=opts.chunk, # chunk size is effective in val mode
obj_bound=self.latest_vars['obj_bound'],
use_fine=self.use_fine,
img_size=self.img_size,
progress=self.progress,
opts=opts,
)
for k, v in rendered_chunks.items():
results[k] += [v]
for k, v in results.items():
if v[0].dim()==0: # loss
v = torch.stack(v).mean()
else:
v = torch.cat(v, 0)
if self.training:
v = v.view(rays['bs'],rays['nsample'],-1)
else:
v = v.view(bs,self.img_size, self.img_size, -1)
results[k] = v
if opts.debug:
torch.cuda.synchronize()
print('rendering time: %.2f'%(time.time()-start_time))
# viser feature matching
if opts.use_embed:
results['pts_pred'] = (results['pts_pred'] - torch.Tensor(self.vis_min[None]).\
to(self.device)) / torch.Tensor(self.vis_len[None]).to(self.device)
results['pts_exp'] = (results['pts_exp'] - torch.Tensor(self.vis_min[None]).\
to(self.device)) / torch.Tensor(self.vis_len[None]).to(self.device)
results['pts_pred'] = results['pts_pred'].clamp(0,1)
results['pts_exp'] = results['pts_exp'].clamp(0,1)
if opts.debug:
torch.cuda.synchronize()
print('compute flow time: %.2f'%(time.time()-start_time))
return results, rand_inds
@staticmethod
def render_dp(dp_verts_unit, dp_faces, dp_embed, near_far, device,
mesh_renderer, bs):
"""
render a pair of (densepose feature bsx16x112x112, se3)
input is densepose surface model and near-far plane
"""
verts = dp_verts_unit
faces = dp_faces
dp_embed = dp_embed
num_verts, embed_dim = dp_embed.shape
img_size = 256
crop_size = 112
focal = 2
std_rot = 6.28 # rotation std
std_dep = 0.5 # depth std
# scale geometry and translation based on near-far plane
d_mean = near_far.mean()
verts = verts / 3 * d_mean # scale based on mean depth
dep_rand = 1 + np.random.normal(0,std_dep,bs)
dep_rand = torch.Tensor(dep_rand).to(device)
d_obj = d_mean * dep_rand
d_obj = torch.max(d_obj, 1.2*1/3 * d_mean)
# set cameras
rot_rand = np.random.normal(0,std_rot,(bs,3))
rot_rand = torch.Tensor(rot_rand).to(device)
Rmat = transforms.axis_angle_to_matrix(rot_rand)
Tmat = torch.cat([torch.zeros(bs, 2).to(device), d_obj[:,None]],-1)
K = torch.Tensor([[focal,focal,0,0]]).to(device).repeat(bs,1)
# add RTK: [R_3x3|T_3x1]
# [fx,fy,px,py], to the ndc space
Kimg = torch.Tensor([[focal*img_size/2.,focal*img_size/2.,img_size/2.,
img_size/2.]]).to(device).repeat(bs,1)
rtk = torch.zeros(bs,4,4).to(device)
rtk[:,:3,:3] = Rmat
rtk[:,:3, 3] = Tmat
rtk[:,3, :] = Kimg
# repeat mesh
verts = verts[None].repeat(bs,1,1)
faces = faces[None].repeat(bs,1,1)
dp_embed = dp_embed[None].repeat(bs,1,1)
# obj-cam transform
verts = obj_to_cam(verts, Rmat, Tmat)
# pespective projection
verts = pinhole_cam(verts, K)
# render sil+rgb
rendered = []
for i in range(0,embed_dim,3):
dp_chunk = dp_embed[...,i:i+3]
dp_chunk_size = dp_chunk.shape[-1]
if dp_chunk_size<3:
dp_chunk = torch.cat([dp_chunk,
dp_embed[...,:(3-dp_chunk_size)]],-1)
rendered_chunk = render_color(mesh_renderer, verts, faces,
dp_chunk, texture_type='vertex')
rendered_chunk = rendered_chunk[:,:3]
rendered.append(rendered_chunk)
rendered = torch.cat(rendered, 1)
rendered = rendered[:,:embed_dim]
# resize to bounding box
rendered_crops = []
for i in range(bs):
mask = rendered[i].max(0)[0]>0
mask = mask.cpu().numpy()
indices = np.where(mask>0); xid = indices[1]; yid = indices[0]
center = ( (xid.max()+xid.min())//2, (yid.max()+yid.min())//2)
length = ( int((xid.max()-xid.min())*1.//2 ),
int((yid.max()-yid.min())*1.//2 ))
left,top,w,h = [center[0]-length[0], center[1]-length[1],
length[0]*2, length[1]*2]
rendered_crop = torchvision.transforms.functional.resized_crop(\
rendered[i], top,left,h,w,(50,50))
# mask augmentation
rendered_crop = mask_aug(rendered_crop)
rendered_crops.append( rendered_crop)
#cv2.imwrite('%d.png'%i, rendered_crop.std(0).cpu().numpy()*1000)
rendered_crops = torch.stack(rendered_crops,0)
rendered_crops = F.interpolate(rendered_crops, (crop_size, crop_size),
mode='bilinear')
rendered_crops = F.normalize(rendered_crops, 2,1)
return rendered_crops, rtk
@staticmethod
def create_base_se3(bs, device):
"""
create a base se3 based on near-far plane
"""
rt = torch.zeros(bs,3,4).to(device)
rt[:,:3,:3] = torch.eye(3)[None].repeat(bs,1,1).to(device)
rt[:,:2,3] = 0.
rt[:,2,3] = 0.3
return rt
@staticmethod
def prepare_ray_cams(rtk, kaug):
"""
in: rtk, kaug
out: Rmat, Tmat, Kinv
"""
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
Kaug = K2inv(kaug) # p = Kaug Kmat P
Kinv = Kmatinv(Kaug.matmul(Kmat))
return Rmat, Tmat, Kinv
def sample_pxs(self, bs, nsample, Rmat, Tmat, Kinv,
dataid, frameid, frameid_sub, embedid, lineid,errid,
imgs, masks, vis2d, flow, occ, dp_feats):
"""
make sure self. is not modified
xys: bs, nsample, 2
rand_inds: bs, nsample
"""
opts = self.opts
Kinv_in=Kinv.clone()
dataid_in=dataid.clone()
frameid_sub_in = frameid_sub.clone()
# sample 1x points, sample 4x points for further selection
nsample_a = 4*nsample
rand_inds, xys = sample_xy(self.img_size, bs, nsample+nsample_a, self.device,
return_all= not(self.training), lineid=lineid)
if self.training and opts.use_unc and \
self.progress >= (opts.warmup_steps):
is_active=True
nsample_s = int(opts.nactive * nsample) # active
nsample = int(nsample*(1-opts.nactive)) # uniform
else:
is_active=False
if self.training:
rand_inds_a, xys_a = rand_inds[:,-nsample_a:].clone(), xys[:,-nsample_a:].clone()
rand_inds, xys = rand_inds[:,:nsample].clone(), xys[:,:nsample].clone()
if opts.lineload:
# expand frameid, Rmat,Tmat, Kinv
frameid_a= frameid[:,None].repeat(1,nsample_a)
frameid_sub_a=frameid_sub[:,None].repeat(1,nsample_a)
dataid_a= dataid[:,None].repeat(1,nsample_a)
errid_a= errid[:,None].repeat(1,nsample_a)
Rmat_a = Rmat[:,None].repeat(1,nsample_a,1,1)
Tmat_a = Tmat[:,None].repeat(1,nsample_a,1)
Kinv_a = Kinv[:,None].repeat(1,nsample_a,1,1)
# expand
frameid = frameid[:,None].repeat(1,nsample)
frameid_sub = frameid_sub[:,None].repeat(1,nsample)
dataid = dataid[:,None].repeat(1,nsample)
errid = errid[:,None].repeat(1,nsample)
Rmat = Rmat[:,None].repeat(1,nsample,1,1)
Tmat = Tmat[:,None].repeat(1,nsample,1)
Kinv = Kinv[:,None].repeat(1,nsample,1,1)
batch_map = torch.Tensor(range(bs)).to(self.device)[:,None].long()
batch_map_a = batch_map.repeat(1,nsample_a)
batch_map = batch_map.repeat(1,nsample)
# importance sampling
if is_active:
with torch.no_grad():
# run uncertainty estimation
ts = frameid_sub_in.to(self.device) / self.max_ts * 2 -1
ts = ts[:,None,None].repeat(1,nsample_a,1)
dataid_in = dataid_in.long().to(self.device)
vid_code = self.vid_code(dataid_in)[:,None].repeat(1,nsample_a,1)
# convert to normalized coords
xysn = torch.cat([xys_a, torch.ones_like(xys_a[...,:1])],2)
xysn = xysn.matmul(Kinv_in.permute(0,2,1))[...,:2]
xyt = torch.cat([xysn, ts],-1)
xyt_embedded = self.embedding_xyz(xyt)
xyt_code = torch.cat([xyt_embedded, vid_code],-1)
unc_pred = self.nerf_unc(xyt_code)[...,0]
# preprocess to format 2,bs,w
if opts.lineload:
unc_pred = unc_pred.view(2,-1)
xys = xys.view(2,-1,2)
xys_a = xys_a.view(2,-1,2)
rand_inds = rand_inds.view(2,-1)
rand_inds_a = rand_inds_a.view(2,-1)
frameid = frameid.view(2,-1)
frameid_a = frameid_a.view(2,-1)
frameid_sub = frameid_sub.view(2,-1)
frameid_sub_a = frameid_sub_a.view(2,-1)
dataid = dataid.view(2,-1)
dataid_a = dataid_a.view(2,-1)
errid = errid.view(2,-1)
errid_a = errid_a.view(2,-1)
batch_map = batch_map.view(2,-1)
batch_map_a = batch_map_a.view(2,-1)
Rmat = Rmat.view(2,-1,3,3)
Rmat_a = Rmat_a.view(2,-1,3,3)
Tmat = Tmat.view(2,-1,3)
Tmat_a = Tmat_a.view(2,-1,3)
Kinv = Kinv.view(2,-1,3,3)
Kinv_a = Kinv_a.view(2,-1,3,3)
nsample_s = nsample_s * bs//2
bs=2
# merge top nsamples
topk_samp = unc_pred.topk(nsample_s,dim=-1)[1] # bs,nsamp
# use the first imgs (in a pair) sampled index
xys_a = torch.stack( [xys_a[i][topk_samp[0]] for i in range(bs)],0)
rand_inds_a = torch.stack( [rand_inds_a[i][topk_samp[0]] for i in range(bs)],0)
frameid_a = torch.stack( [frameid_a[i][topk_samp[0]] for i in range(bs)],0)
frameid_sub_a=torch.stack( [frameid_sub_a[i][topk_samp[0]] for i in range(bs)],0)
dataid_a = torch.stack( [dataid_a[i][topk_samp[0]] for i in range(bs)],0)
errid_a = torch.stack( [errid_a[i][topk_samp[0]] for i in range(bs)],0)
batch_map_a = torch.stack( [batch_map_a[i][topk_samp[0]] for i in range(bs)],0)
Rmat_a = torch.stack( [Rmat_a[i][topk_samp[0]] for i in range(bs)],0)
Tmat_a = torch.stack( [Tmat_a[i][topk_samp[0]] for i in range(bs)],0)
Kinv_a = torch.stack( [Kinv_a[i][topk_samp[0]] for i in range(bs)],0)
xys = torch.cat([xys,xys_a],1)
rand_inds = torch.cat([rand_inds,rand_inds_a],1)
frameid = torch.cat([frameid,frameid_a],1)
frameid_sub=torch.cat([frameid_sub,frameid_sub_a],1)
dataid = torch.cat([dataid,dataid_a],1)
errid = torch.cat([errid,errid_a],1)
batch_map = torch.cat([batch_map,batch_map_a],1)
Rmat = torch.cat([Rmat,Rmat_a],1)
Tmat = torch.cat([Tmat,Tmat_a],1)
Kinv = torch.cat([Kinv,Kinv_a],1)
else:
topk_samp = unc_pred.topk(nsample_s,dim=-1)[1] # bs,nsamp
xys_a = torch.stack( [xys_a[i][topk_samp[i]] for i in range(bs)],0)
rand_inds_a = torch.stack([rand_inds_a[i][topk_samp[i]] for i in range(bs)],0)
xys = torch.cat([xys,xys_a],1)
rand_inds = torch.cat([rand_inds,rand_inds_a],1)
# for line: reshape to 2*bs, 1,...
if self.training and opts.lineload:
frameid = frameid.view(-1)
frameid_sub = frameid_sub.view(-1)
dataid = dataid.view(-1)
errid = errid.view(-1)
batch_map = batch_map.view(-1)
xys = xys.view(-1,1,2)
rand_inds = rand_inds.view(-1,1)
Rmat = Rmat.view(-1,3,3)
Tmat = Tmat.view(-1,3)
Kinv = Kinv.view(-1,3,3)
near_far = self.near_far[frameid.long()]
rays = raycast(xys, Rmat, Tmat, Kinv, near_far)
# need to reshape dataid, frameid_sub, embedid #TODO embedid equiv to frameid
self.update_rays(rays, bs>1, dataid, frameid_sub, frameid, xys, Kinv)
if 'bones' in self.nerf_models.keys():
# update delta rts fw
self.update_delta_rts(rays)
# for line: 2bs*nsamp,1
# for batch:2bs,nsamp
#TODO reshape imgs, masks, etc.
if self.training and opts.lineload:
self.obs_to_rays_line(rays, rand_inds, imgs, masks, vis2d, flow, occ,
dp_feats, batch_map)
else:
self.obs_to_rays(rays, rand_inds, imgs, masks, vis2d, flow, occ, dp_feats)
# TODO visualize samples
#pdb.set_trace()
#self.imgs_samp = []
#for i in range(bs):
# self.imgs_samp.append(draw_pts(self.imgs[i], xys_a[i]))
#self.imgs_samp = torch.stack(self.imgs_samp,0)
return rand_inds, rays, frameid, errid
def obs_to_rays_line(self, rays, rand_inds, imgs, masks, vis2d,
flow, occ, dp_feats,batch_map):
"""
convert imgs, masks, flow, occ, dp_feats to rays
rand_map: map pixel index to original batch index
rand_inds: bs,
"""
opts = self.opts
rays['img_at_samp']=torch.gather(imgs[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,3,1))[:,None][...,0]
rays['sil_at_samp']=torch.gather(masks[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
rays['vis_at_samp']=torch.gather(vis2d[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
rays['flo_at_samp']=torch.gather(flow[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,2,1))[:,None][...,0]
rays['cfd_at_samp']=torch.gather(occ[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,1,1))[:,None][...,0]
if opts.use_embed:
rays['feats_at_samp']=torch.gather(dp_feats[batch_map][...,0], 2,
rand_inds[:,None].repeat(1,16,1))[:,None][...,0]
def obs_to_rays(self, rays, rand_inds, imgs, masks, vis2d,
flow, occ, dp_feats):
"""
convert imgs, masks, flow, occ, dp_feats to rays
"""
opts = self.opts
bs = imgs.shape[0]
rays['img_at_samp'] = torch.stack([imgs[i].view(3,-1).T[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,3
rays['sil_at_samp'] = torch.stack([masks[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
rays['vis_at_samp'] = torch.stack([vis2d[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
rays['flo_at_samp'] = torch.stack([flow[i].view(2,-1).T[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,2
rays['cfd_at_samp'] = torch.stack([occ[i].view(-1,1)[rand_inds[i]]\
for i in range(bs)],0) # bs,ns,1
if opts.use_embed:
feats_at_samp = [dp_feats[i].view(16,-1).T\
[rand_inds[i].long()] for i in range(bs)]
feats_at_samp = torch.stack(feats_at_samp,0) # bs,ns,num_feat
rays['feats_at_samp'] = feats_at_samp
def update_delta_rts(self, rays):
"""
change bone_rts_fw to delta fw
"""
opts = self.opts
bones_rst, bone_rts_rst = correct_bones(self, self.nerf_models['bones'])
self.nerf_models['bones_rst']=bones_rst
# delta rts
rays['bone_rts'] = correct_rest_pose(opts, rays['bone_rts'], bone_rts_rst)
if 'bone_rts_target' in rays.keys():
rays['bone_rts_target'] = correct_rest_pose(opts,
rays['bone_rts_target'], bone_rts_rst)
if 'bone_rts_dentrg' in rays.keys():
rays['bone_rts_dentrg'] = correct_rest_pose(opts,
rays['bone_rts_dentrg'], bone_rts_rst)
def update_rays(self, rays, is_pair, dataid, frameid_sub, embedid, xys, Kinv):
"""
"""
opts = self.opts
# append target frame rtk
embedid = embedid.long().to(self.device)
if is_pair:
rtk_vec = rays['rtk_vec'] # bs, N, 21
rtk_vec_target = rtk_vec.view(2,-1).flip(0)
rays['rtk_vec_target'] = rtk_vec_target.reshape(rays['rtk_vec'].shape)
embedid_target = embedid.view(2,-1).flip(0).reshape(-1)
if opts.flowbw:
time_embedded_target = self.pose_code(embedid_target)[:,None]
rays['time_embedded_target'] = time_embedded_target.repeat(1,
rays['nsample'],1)
elif opts.lbs and self.num_bone_used>0:
bone_rts_target = self.nerf_body_rts(embedid_target)
rays['bone_rts_target'] = bone_rts_target.repeat(1,rays['nsample'],1)
# pass time-dependent inputs
time_embedded = self.pose_code(embedid)[:,None]
rays['time_embedded'] = time_embedded.repeat(1,rays['nsample'],1)
if opts.lbs and self.num_bone_used>0:
bone_rts = self.nerf_body_rts(embedid)
rays['bone_rts'] = bone_rts.repeat(1,rays['nsample'],1)
if opts.env_code:
rays['env_code'] = self.env_code(embedid)[:,None]
rays['env_code'] = rays['env_code'].repeat(1,rays['nsample'],1)
#rays['env_code'] = self.env_code(dataid.long().to(self.device))
#rays['env_code'] = rays['env_code'][:,None].repeat(1,rays['nsample'],1)
if opts.use_unc:
ts = frameid_sub.to(self.device) / self.max_ts * 2 -1
ts = ts[:,None,None].repeat(1,rays['nsample'],1)
rays['ts'] = ts
dataid = dataid.long().to(self.device)
vid_code = self.vid_code(dataid)[:,None].repeat(1,rays['nsample'],1)
rays['vid_code'] = vid_code
xysn = torch.cat([xys, torch.ones_like(xys[...,:1])],2)
xysn = xysn.matmul(Kinv.permute(0,2,1))[...,:2]
rays['xysn'] = xysn
def convert_line_input(self, batch):
device = self.device
opts = self.opts
# convert to float
for k,v in batch.items():
batch[k] = batch[k].float()
bs=batch['dataid'].shape[0]
self.imgs = batch['img'] .view(bs,2,3, -1).permute(1,0,2,3).reshape(bs*2,3, -1,1).to(device)
self.masks = batch['mask'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.vis2d = batch['vis2d'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.flow = batch['flow'] .view(bs,2,2, -1).permute(1,0,2,3).reshape(bs*2,2, -1,1).to(device)
self.occ = batch['occ'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.dps = batch['dp'] .view(bs,2,1, -1).permute(1,0,2,3).reshape(bs*2,1, -1,1).to(device)
self.dp_feats = batch['dp_feat_rsmp'].view(bs,2,16,-1).permute(1,0,2,3).reshape(bs*2,16,-1,1).to(device)
self.dp_feats = F.normalize(self.dp_feats, 2,1)
self.rtk = batch['rtk'] .view(bs,-1,4,4).permute(1,0,2,3).reshape(-1,4,4) .to(device)
self.kaug = batch['kaug'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
self.frameid = batch['frameid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.dataid = batch['dataid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.lineid = batch['lineid'] .view(bs,-1).permute(1,0).reshape(-1).to(device)
self.frameid_sub = self.frameid.clone() # id within a video
self.embedid = self.frameid + self.data_offset[self.dataid.long()]
self.frameid = self.frameid + self.data_offset[self.dataid.long()]
self.errid = self.frameid*opts.img_size + self.lineid.cpu() # for err filter
self.rt_raw = self.rtk.clone()[:,:3]
# process silhouette
self.masks = (self.masks*self.vis2d)>0
self.masks = self.masks.float()
def convert_batch_input(self, batch):
device = self.device
opts = self.opts
if batch['img'].dim()==4:
bs,_,h,w = batch['img'].shape
else:
bs,_,_,h,w = batch['img'].shape
# convert to float
for k,v in batch.items():
batch[k] = batch[k].float()
img_tensor = batch['img'].view(bs,-1,3,h,w).permute(1,0,2,3,4).reshape(-1,3,h,w)
input_img_tensor = img_tensor.clone()
for b in range(input_img_tensor.size(0)):
input_img_tensor[b] = self.resnet_transform(input_img_tensor[b])
self.input_imgs = input_img_tensor.to(device)
self.imgs = img_tensor.to(device)
self.masks = batch['mask'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.vis2d = batch['vis2d'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.dps = batch['dp'] .view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
dpfd = 16
dpfs = 112
self.dp_feats = batch['dp_feat'] .view(bs,-1,dpfd,dpfs,dpfs).permute(1,0,2,3,4).reshape(-1,dpfd,dpfs,dpfs).to(device)
self.dp_bbox = batch['dp_bbox'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
if opts.use_embed and opts.ft_cse and (not self.is_warmup_pose):
self.dp_feats_mask = self.dp_feats.abs().sum(1)>0
self.csepre_feats = self.dp_feats.clone()
# unnormalized features
self.csenet_feats, self.dps = self.csenet(self.imgs, self.masks)
# for visualization
self.dps = self.dps * self.dp_feats_mask.float()
if self.progress > opts.ftcse_steps:
self.dp_feats = self.csenet_feats
else:
self.dp_feats = self.csenet_feats.detach()
self.dp_feats = F.normalize(self.dp_feats, 2,1)
self.rtk = batch['rtk'] .view(bs,-1,4,4).permute(1,0,2,3).reshape(-1,4,4) .to(device)
self.kaug = batch['kaug'] .view(bs,-1,4).permute(1,0,2).reshape(-1,4) .to(device)
self.frameid = batch['frameid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.dataid = batch['dataid'] .view(bs,-1).permute(1,0).reshape(-1).cpu()
self.frameid_sub = self.frameid.clone() # id within a video
self.embedid = self.frameid + self.data_offset[self.dataid.long()]
self.frameid = self.frameid + self.data_offset[self.dataid.long()]
self.errid = self.frameid # for err filter
self.rt_raw = self.rtk.clone()[:,:3]
# process silhouette
self.masks = (self.masks*self.vis2d)>0
self.masks = self.masks.float()
self.flow = batch['flow'].view(bs,-1,2,h,w).permute(1,0,2,3,4).reshape(-1,2,h,w).to(device)
self.occ = batch['occ'].view(bs,-1,h,w).permute(1,0,2,3).reshape(-1,h,w) .to(device)
self.lineid = None
def convert_root_pose(self):
"""
assumes has self.
{rtk, frameid, dp_feats, dps, masks, kaug }
produces self.
"""
opts = self.opts
bs = self.rtk.shape[0]
device = self.device
# scale initial poses
if self.use_cam:
self.rtk[:,:3,3] = self.rtk[:,:3,3] / self.obj_scale
else:
self.rtk[:,:3] = self.create_base_se3(bs, device)
# compute delta pose
if self.opts.root_opt:
if self.root_basis == 'cnn':
frame_code = self.dp_feats
elif self.root_basis == 'mlp' or self.root_basis == 'exp'\
or self.root_basis == 'expmlp':
frame_code = self.frameid.long().to(device)
else: print('error'); exit()
root_rts = self.nerf_root_rts(frame_code)
self.rtk = self.refine_rt(self.rtk, root_rts)
self.rtk[:,3,:] = self.ks_param[self.dataid.long()] #TODO kmat
@staticmethod
def refine_rt(rt_raw, root_rts):
"""
input: rt_raw representing the initial root poses (after scaling)
input: root_rts representing delta se3
output: current estimate of rtks for all frames
"""
rt_raw = rt_raw.clone()
root_rmat = root_rts[:,0,:9].view(-1,3,3)
root_tmat = root_rts[:,0,9:12]
rmat = rt_raw[:,:3,:3].clone()
tmat = rt_raw[:,:3,3].clone()
tmat = tmat + rmat.matmul(root_tmat[...,None])[...,0]
rmat = rmat.matmul(root_rmat)
rt_raw[:,:3,:3] = rmat
rt_raw[:,:3,3] = tmat
return rt_raw
def compute_rts(self):
"""
Assumpions
- use_cam
- use mlp or exp root pose
input: rt_raw representing the initial root poses
output: current estimate of rtks for all frames
"""
device = self.device
opts = self.opts
frameid = torch.Tensor(range(self.num_fr)).to(device).long()
if self.use_cam:
# scale initial poses
rt_raw = torch.Tensor(self.latest_vars['rt_raw']).to(device)
rt_raw[:,:3,3] = rt_raw[:,:3,3] / self.obj_scale
else:
rt_raw = self.create_base_se3(self.num_fr, device)
# compute mlp rts
if opts.root_opt:
if self.root_basis == 'mlp' or self.root_basis == 'exp'\
or self.root_basis == 'expmlp':
root_rts = self.nerf_root_rts(frameid)
else: print('error'); exit()
rt_raw = self.refine_rt(rt_raw, root_rts)
return rt_raw
def save_latest_vars(self):
"""
in: self.
{rtk, kaug, frameid, vis2d}
out: self.
{latest_vars}
these are only used in get_near_far_plane and compute_visibility
"""
rtk = self.rtk.clone().detach()
Kmat = K2mat(rtk[:,3])
Kaug = K2inv(self.kaug) # p = Kaug Kmat P
rtk[:,3] = mat2K(Kaug.matmul(Kmat))
# TODO don't want to save k at eval time (due to different intrinsics)
self.latest_vars['rtk'][self.frameid.long()] = rtk.cpu().numpy()
self.latest_vars['rt_raw'][self.frameid.long()] = self.rt_raw.cpu().numpy()
self.latest_vars['idk'][self.frameid.long()] = 1
def set_input(self, batch, load_line=False):
device = self.device
opts = self.opts
if load_line:
self.convert_line_input(batch)
else:
self.convert_batch_input(batch)
bs = self.imgs.shape[0]
self.convert_root_pose()
self.save_latest_vars()
if opts.lineload and self.training:
self.dp_feats = self.dp_feats
else:
self.dp_feats = resample_dp(self.dp_feats,
self.dp_bbox, self.kaug, self.img_size)
if self.training and self.opts.anneal_freq:
alpha = self.num_freqs * \
self.progress / (opts.warmup_steps)
#if alpha>self.alpha.data[0]:
self.alpha.data[0] = min(max(6, alpha),self.num_freqs) # alpha from 6 to 10
self.embedding_xyz.alpha = self.alpha.data[0]
self.embedding_dir.alpha = self.alpha.data[0]
return bs
|
banmo-main
|
nnutils/banmo.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import cv2, pdb, os, sys, numpy as np, torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
curr_dir = os.path.abspath(os.getcwd())
sys.path.insert(0, curr_dir)
detbase = './third_party/detectron2/'
sys.path.insert(0, '%s/projects/DensePose/' % detbase)
from detectron2.structures import Boxes
from detectron2.config import get_cfg
from detectron2.modeling import build_model
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.structures import Boxes
from densepose import add_densepose_config
from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
from utils.cselib import create_cse, run_cse
class CSENet(nn.Module):
def __init__(self, ishuman):
super(CSENet, self).__init__()
if ishuman:
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml' % detbase
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x/250713061/model_final_1d3314.pkl'
self.mesh_name = 'smpl_27554'
else:
config_path = '%s/projects/DensePose/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml' % detbase
weight_path = 'https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k/253498611/model_final_6d69b7.pkl'
self.mesh_name = 'sheep_5004'
self.net, self.embedder, self.mesh_vertex_embeddings = create_cse(config_path, weight_path)
def forward(self, img, msk):
bs = img.shape[0]
h = img.shape[2]
device = img.device
img = img * 255
img = torch.flip(img, [1])
pad = h
img = F.pad(img, (pad, pad, pad, pad))
msk = F.pad(msk, (pad, pad, pad, pad))
img = F.interpolate(img, (384, 384),mode='bilinear')
msk = F.interpolate(msk[:,None], (384, 384),mode='nearest')[:,0]
bboxes = []
for i in range(bs):
indices = torch.where(msk[i]>0);
xid = indices[1]; yid = indices[0]
bbox = [xid.min(), yid.min(),
xid.max(), yid.max()]
bbox = torch.Tensor([bbox]).to(device)
bbox = Boxes(bbox)
bboxes.append(bbox)
#dps = []
#feats = []
#for i in range(bs):
# img_sub = img[i].permute(1, 2, 0).cpu().numpy()
# msk_sub = msk[i].cpu().numpy()
# # put into a bigger image: out size 112/512
# dp, img_bgr, feat, feat_norm, bbox = run_cse((self.net), (self.embedder), (self.mesh_vertex_embeddings),
# img_sub,
# msk_sub,
# mesh_name=(self.mesh_name))
# pdb.set_trace()
# dp = torch.Tensor(dp).to(device)
# feat = torch.Tensor(feat).to(device)
# dps.append(dp)
# feats.append(feat)
#dps = torch.stack(dps, 0)
#feats = torch.stack(feats, 0)
#pdb.set_trace()
self.net.eval()
with torch.no_grad():
img = torch.stack([(x - self.net.pixel_mean) / self.net.pixel_std\
for x in img])
features = self.net.backbone(img)
features = [features[f] for f in self.net.roi_heads.in_features]
features = [self.net.roi_heads.decoder(features)]
features_dp = self.net.roi_heads.densepose_pooler(features, bboxes).detach()
densepose_head_outputs = self.net.roi_heads.densepose_head(features_dp)
densepose_predictor_outputs = self.net.roi_heads.densepose_predictor(densepose_head_outputs)
feats = densepose_predictor_outputs.embedding # (xxx,112,112)
with torch.no_grad():
dps = []
for i in range(bs):
assign_mat = squared_euclidean_distance_matrix(feats[i].view(16,-1).T,
self.mesh_vertex_embeddings[self.mesh_name])
dp = assign_mat.argmin(dim=1).view(112,112)
dps.append(dp)
dps = torch.stack(dps,0)
return feats, dps
|
banmo-main
|
nnutils/cse.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import torch
def image_grid(img, row, col):
"""
img: N,h,w,x
collage: 1,.., x
"""
bs,h,w,c=img.shape
device = img.device
collage = torch.zeros(h*row, w*col, c).to(device)
for i in range(row):
for j in range(col):
collage[i*h:(i+1)*h,j*w:(j+1)*w] = img[i*col+j]
return collage
|
banmo-main
|
nnutils/vis_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pdb
import time
import cv2
import numpy as np
import trimesh
from pytorch3d import transforms
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.spatial.transform import Rotation as R
import sys
sys.path.insert(0, 'third_party')
from ext_utils.flowlib import warp_flow, cat_imgflo
def evaluate_mlp(model, xyz_embedded, embed_xyz=None, dir_embedded=None,
chunk=32*1024,
xyz=None,
code=None, sigma_only=False):
"""
embed_xyz: embedding function
chunk is the point-level chunk divided by number of bins
"""
B,nbins,_ = xyz_embedded.shape
out_chunks = []
for i in range(0, B, chunk):
embedded = xyz_embedded[i:i+chunk]
if embed_xyz is not None:
embedded = embed_xyz(embedded)
if dir_embedded is not None:
embedded = torch.cat([embedded,
dir_embedded[i:i+chunk]], -1)
if code is not None:
code_chunk = code[i:i+chunk]
if code_chunk.dim() == 2:
code_chunk = code_chunk[:,None]
code_chunk = code_chunk.repeat(1,nbins,1)
embedded = torch.cat([embedded,code_chunk], -1)
if xyz is not None:
xyz_chunk = xyz[i:i+chunk]
else: xyz_chunk = None
out_chunks += [model(embedded, sigma_only=sigma_only, xyz=xyz_chunk)]
out = torch.cat(out_chunks, 0)
return out
def bone_transform(bones_in, rts, is_vec=False):
"""
bones_in: 1,B,10 - B gaussian ellipsoids of bone coordinates
rts: ...,B,3,4 - B ririd transforms
rts are applied to bone coordinate transforms (left multiply)
is_vec: whether rts are stored as r1...9,t1...3 vector form
"""
B = bones_in.shape[-2]
bones = bones_in.view(-1,B,10).clone()
if is_vec:
rts = rts.view(-1,B,12)
else:
rts = rts.view(-1,B,3,4)
bs = rts.shape[0]
center = bones[:,:,:3]
orient = bones[:,:,3:7] # real first
scale = bones[:,:,7:10]
if is_vec:
Rmat = rts[:,:,:9].view(-1,B,3,3)
Tmat = rts[:,:,9:12].view(-1,B,3,1)
else:
Rmat = rts[:,:,:3,:3]
Tmat = rts[:,:,:3,3:4]
# move bone coordinates (left multiply)
center = Rmat.matmul(center[...,None])[...,0]+Tmat[...,0]
Rquat = transforms.matrix_to_quaternion(Rmat)
orient = transforms.quaternion_multiply(Rquat, orient)
scale = scale.repeat(bs,1,1)
bones = torch.cat([center,orient,scale],-1)
return bones
def rtmat_invert(Rmat, Tmat):
"""
Rmat: ...,3,3 - rotations
Tmat: ...,3 - translations
"""
rts = torch.cat([Rmat, Tmat[...,None]],-1)
rts_i = rts_invert(rts)
Rmat_i = rts_i[...,:3,:3] # bs, B, 3,3
Tmat_i = rts_i[...,:3,3]
return Rmat_i, Tmat_i
def rtk_invert(rtk_in, B):
"""
rtk_in: ... (rot 1...9, trans 1...3)
"""
rtk_shape = rtk_in.shape
rtk_in = rtk_in.view(-1,B,12)# B,12
rmat=rtk_in[:,:,:9]
rmat=rmat.view(-1,B,3,3)
tmat= rtk_in[:,:,9:12]
rts_fw = torch.cat([rmat,tmat[...,None]],-1)
rts_fw = rts_fw.view(-1,B,3,4)
rts_bw = rts_invert(rts_fw)
rvec = rts_bw[...,:3,:3].reshape(-1,9)
tvec = rts_bw[...,:3,3] .reshape(-1,3)
rtk = torch.cat([rvec,tvec],-1).view(rtk_shape)
return rtk
def rts_invert(rts_in):
"""
rts: ...,3,4 - B ririd transforms
"""
rts = rts_in.view(-1,3,4).clone()
Rmat = rts[:,:3,:3] # bs, B, 3,3
Tmat = rts[:,:3,3:]
Rmat_i=Rmat.permute(0,2,1)
Tmat_i=-Rmat_i.matmul(Tmat)
rts_i = torch.cat([Rmat_i, Tmat_i],-1)
rts_i = rts_i.view(rts_in.shape)
return rts_i
def rtk_to_4x4(rtk):
"""
rtk: ...,12
"""
device = rtk.device
bs = rtk.shape[0]
zero_one = torch.Tensor([[0,0,0,1]]).to(device).repeat(bs,1)
rmat=rtk[:,:9]
rmat=rmat.view(-1,3,3)
tmat=rtk[:,9:12]
rts = torch.cat([rmat,tmat[...,None]],-1)
rts = torch.cat([rts,zero_one[:,None]],1)
return rts
def rtk_compose(rtk1, rtk2):
"""
rtk ...
"""
rtk_shape = rtk1.shape
rtk1 = rtk1.view(-1,12)# ...,12
rtk2 = rtk2.view(-1,12)# ...,12
rts1 = rtk_to_4x4(rtk1)
rts2 = rtk_to_4x4(rtk2)
rts = rts1.matmul(rts2)
rvec = rts[...,:3,:3].reshape(-1,9)
tvec = rts[...,:3,3].reshape(-1,3)
rtk = torch.cat([rvec,tvec],-1).view(rtk_shape)
return rtk
def vec_to_sim3(vec):
"""
vec: ...,10
center: ...,3
orient: ...,3,3
scale: ...,3
"""
center = vec[...,:3]
orient = vec[...,3:7] # real first
orient = F.normalize(orient, 2,-1)
orient = transforms.quaternion_to_matrix(orient) # real first
scale = vec[...,7:10].exp()
return center, orient, scale
def gauss_mlp_skinning(xyz, embedding_xyz, bones,
pose_code, nerf_skin, skin_aux=None):
"""
xyz: N_rays, ndepth, 3
bones: ... nbones, 10
pose_code: ...,1, nchannel
"""
N_rays = xyz.shape[0]
#TODO hacky way to make code compaitible with noqueryfw
if pose_code.dim() == 2 and pose_code.shape[0]!=N_rays:
pose_code = pose_code[None].repeat(N_rays, 1,1)
xyz_embedded = embedding_xyz(xyz)
dskin = mlp_skinning(nerf_skin, pose_code, xyz_embedded)
skin = skinning(bones, xyz, dskin, skin_aux=skin_aux) # bs, N, B
return skin
def mlp_skinning(mlp, code, pts_embed):
"""
code: bs, D - N D-dimensional pose code
pts_embed: bs,N,x - N point positional embeddings
dskin: bs,N,B - delta skinning matrix
"""
if mlp is None:
dskin = None
else:
dskin = evaluate_mlp(mlp, pts_embed, code=code, chunk=8*1024)
return dskin
def axis_rotate(orient, mdis):
bs,N,B,_,_ = mdis.shape
mdis = (orient * mdis.view(bs,N,B,1,3)).sum(4)[...,None] # faster
#mdis = orient.matmul(mdis) # bs,N,B,3,1 # slower
return mdis
def skinning_chunk(bones, pts, dskin=None, skin_aux=None):
#def skinning(bones, pts, dskin=None, skin_aux=None):
"""
bone: bs,B,10 - B gaussian ellipsoids
pts: bs,N,3 - N 3d points, usually N=num points per ray, b~=2034
skin: bs,N,B - skinning matrix
"""
device = pts.device
log_scale= skin_aux[0]
w_const = skin_aux[1]
bs,N,_ = pts.shape
B = bones.shape[-2]
if bones.dim()==2: bones = bones[None].repeat(bs,1,1)
bones = bones.view(-1,B,10)
center, orient, scale = vec_to_sim3(bones)
orient = orient.permute(0,1,3,2) # transpose R
# mahalanobis distance [(p-v)^TR^T]S[R(p-v)]
# transform a vector to the local coordinate
mdis = center.view(bs,1,B,3) - pts.view(bs,N,1,3) # bs,N,B,3
mdis = axis_rotate(orient.view(bs,1,B,3,3), mdis[...,None])
mdis = mdis[...,0]
mdis = scale.view(bs,1,B,3) * mdis.pow(2)
# log_scale (being optimized) controls temporature of the skinning weight softmax
# multiply 1000 to make the weights more concentrated initially
inv_temperature = 1000 * log_scale.exp()
mdis = (-inv_temperature * mdis.sum(3)) # bs,N,B
if dskin is not None:
mdis = mdis+dskin
skin = mdis.softmax(2)
return skin
def skinning(bones, pts, dskin=None, skin_aux=None):
"""
bone: ...,B,10 - B gaussian ellipsoids
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
"""
chunk=4096
bs,N,_ = pts.shape
B = bones.shape[-2]
if bones.dim()==2: bones = bones[None].repeat(bs,1,1)
bones = bones.view(-1,B,10)
skin = []
for i in range(0,bs,chunk):
if dskin is None:
dskin_chunk = None
else:
dskin_chunk = dskin[i:i+chunk]
skin_chunk = skinning_chunk(bones[i:i+chunk], pts[i:i+chunk], \
dskin=dskin_chunk, skin_aux=skin_aux)
skin.append( skin_chunk )
skin = torch.cat(skin,0)
return skin
def blend_skinning_chunk(bones, rts, skin, pts):
#def blend_skinning(bones, rts, skin, pts):
"""
bone: bs,B,10 - B gaussian ellipsoids
rts: bs,B,3,4 - B ririd transforms, applied to bone coordinates (points attached to bones in world coords)
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
apply rts to bone coordinates, while computing blending globally
"""
B = rts.shape[-3]
N = pts.shape[-2]
pts = pts.view(-1,N,3)
rts = rts.view(-1,B,3,4)
Rmat = rts[:,:,:3,:3] # bs, B, 3,3
Tmat = rts[:,:,:3,3]
device = Tmat.device
## convert from bone to root transforms
#bones = bones.view(-1,B,10)
#bs = Rmat.shape[0]
#center = bones[:,:,:3]
#orient = bones[:,:,3:7] # real first
#orient = F.normalize(orient, 2,-1)
#orient = transforms.quaternion_to_matrix(orient) # real first
#gmat = torch.eye(4)[None,None].repeat(bs, B, 1, 1).to(device)
#
## root to bone
#gmat_r2b = gmat.clone()
#gmat_r2b[:,:,:3,:3] = orient.permute(0,1,3,2)
#gmat_r2b[:,:,:3,3] = -orient.permute(0,1,3,2).matmul(center[...,None])[...,0]
## bone to root
#gmat_b2r = gmat.clone()
#gmat_b2r[:,:,:3,:3] = orient
#gmat_b2r[:,:,:3,3] = center
## bone to bone
#gmat_b = gmat.clone()
#gmat_b[:,:,:3,:3] = Rmat
#gmat_b[:,:,:3,3] = Tmat
#gmat = gmat_b2r.matmul(gmat_b.matmul(gmat_r2b))
#Rmat = gmat[:,:,:3,:3]
#Tmat = gmat[:,:,:3,3]
# Gi=sum(wbGb), V=RV+T
Rmat_w = (skin[...,None,None] * Rmat[:,None]).sum(2) # bs,N,B,3
Tmat_w = (skin[...,None] * Tmat[:,None]).sum(2) # bs,N,B,3
pts = Rmat_w.matmul(pts[...,None]) + Tmat_w[...,None]
pts = pts[...,0]
return pts
def blend_skinning(bones, rts, skin, pts):
"""
bone: bs,B,10 - B gaussian ellipsoids
rts: bs,B,3,4 - B ririd transforms, applied to bone coordinates
pts: bs,N,3 - N 3d points
skin: bs,N,B - skinning matrix
apply rts to bone coordinates, while computing blending globally
"""
chunk=4096
B = rts.shape[-3]
N = pts.shape[-2]
bones = bones.view(-1,B,10)
pts = pts.view(-1,N,3)
rts = rts.view(-1,B,3,4)
bs = pts.shape[0]
pts_out = []
for i in range(0,bs,chunk):
pts_chunk = blend_skinning_chunk(bones[i:i+chunk], rts[i:i+chunk],
skin[i:i+chunk], pts[i:i+chunk])
pts_out.append(pts_chunk)
pts = torch.cat(pts_out,0)
return pts
def lbs(bones, rts_fw, skin, xyz_in, backward=True):
"""
bones: bs,B,10 - B gaussian ellipsoids indicating rest bone coordinates
rts_fw: bs,B,12 - B rigid transforms, applied to the rest bones
xyz_in: bs,N,3 - N 3d points after transforms in the root coordinates
"""
B = bones.shape[-2]
N = xyz_in.shape[-2]
bs = rts_fw.shape[0]
bones = bones.view(-1,B,10)
xyz_in = xyz_in.view(-1,N,3)
rts_fw = rts_fw.view(-1,B,12)# B,12
rmat=rts_fw[:,:,:9]
rmat=rmat.view(bs,B,3,3)
tmat= rts_fw[:,:,9:12]
rts_fw = torch.cat([rmat,tmat[...,None]],-1)
rts_fw = rts_fw.view(-1,B,3,4)
if backward:
bones_dfm = bone_transform(bones, rts_fw) # bone coordinates after deform
rts_bw = rts_invert(rts_fw)
xyz = blend_skinning(bones_dfm, rts_bw, skin, xyz_in)
else:
xyz = blend_skinning(bones.repeat(bs,1,1), rts_fw, skin, xyz_in)
bones_dfm = bone_transform(bones, rts_fw) # bone coordinates after deform
return xyz, bones_dfm
def obj_to_cam(in_verts, Rmat, Tmat):
"""
verts: ...,N,3
Rmat: ...,3,3
Tmat: ...,3
"""
verts = in_verts.clone()
if verts.dim()==2: verts=verts[None]
verts = verts.view(-1,verts.shape[1],3)
Rmat = Rmat.view(-1,3,3).permute(0,2,1) # left multiply
Tmat = Tmat.view(-1,1,3)
verts = verts.matmul(Rmat) + Tmat
verts = verts.reshape(in_verts.shape)
return verts
def obj2cam_np(pts, Rmat, Tmat):
"""
a wrapper for numpy array
pts: ..., 3
Rmat: 1,3,3
Tmat: 1,3,3
"""
pts_shape = pts.shape
pts = torch.Tensor(pts).cuda().reshape(1,-1,3)
pts = obj_to_cam(pts, Rmat,Tmat)
return pts.view(pts_shape).cpu().numpy()
def K2mat(K):
"""
K: ...,4
"""
K = K.view(-1,4)
device = K.device
bs = K.shape[0]
Kmat = torch.zeros(bs, 3, 3, device=device)
Kmat[:,0,0] = K[:,0]
Kmat[:,1,1] = K[:,1]
Kmat[:,0,2] = K[:,2]
Kmat[:,1,2] = K[:,3]
Kmat[:,2,2] = 1
return Kmat
def mat2K(Kmat):
"""
Kmat: ...,3,3
"""
shape=Kmat.shape[:-2]
Kmat = Kmat.view(-1,3,3)
device = Kmat.device
bs = Kmat.shape[0]
K = torch.zeros(bs, 4, device=device)
K[:,0] = Kmat[:,0,0]
K[:,1] = Kmat[:,1,1]
K[:,2] = Kmat[:,0,2]
K[:,3] = Kmat[:,1,2]
K = K.view(shape+(4,))
return K
def Kmatinv(Kmat):
"""
Kmat: ...,3,3
"""
K = mat2K(Kmat)
Kmatinv = K2inv(K)
Kmatinv = Kmatinv.view(Kmat.shape)
return Kmatinv
def K2inv(K):
"""
K: ...,4
"""
K = K.view(-1,4)
device = K.device
bs = K.shape[0]
Kmat = torch.zeros(bs, 3, 3, device=device)
Kmat[:,0,0] = 1./K[:,0]
Kmat[:,1,1] = 1./K[:,1]
Kmat[:,0,2] = -K[:,2]/K[:,0]
Kmat[:,1,2] = -K[:,3]/K[:,1]
Kmat[:,2,2] = 1
return Kmat
def pinhole_cam(in_verts, K):
"""
in_verts: ...,N,3
K: ...,4
verts: ...,N,3 in (x,y,Z)
"""
verts = in_verts.clone()
verts = verts.view(-1,verts.shape[1],3)
K = K.view(-1,4)
Kmat = K2mat(K)
Kmat = Kmat.permute(0,2,1)
verts = verts.matmul(Kmat)
verts_z = verts[:,:,2:3]
verts_xy = verts[:,:,:2] / (1e-6+verts_z) # deal with neg z
verts = torch.cat([verts_xy,verts_z],-1)
verts = verts.reshape(in_verts.shape)
return verts
def render_color(renderer, in_verts, faces, colors, texture_type='vertex'):
"""
verts in ndc
in_verts: ...,N,3/4
faces: ...,N,3
rendered: ...,4,...
"""
import soft_renderer as sr
verts = in_verts.clone()
verts = verts.view(-1,verts.shape[-2],3)
faces = faces.view(-1,faces.shape[-2],3)
if texture_type=='vertex': colors = colors.view(-1,colors.shape[-2],3)
elif texture_type=='surface': colors = colors.view(-1,colors.shape[1],colors.shape[2],3)
device=verts.device
offset = torch.Tensor( renderer.transform.transformer._eye).to(device)[np.newaxis,np.newaxis]
verts_pre = verts[:,:,:3]-offset
verts_pre[:,:,1] = -1*verts_pre[:,:,1] # pre-flip
rendered = renderer.render_mesh(sr.Mesh(verts_pre,faces,textures=colors,texture_type=texture_type))
return rendered
def render_flow(renderer, verts, faces, verts_n):
"""
rasterization
verts in ndc
verts: ...,N,3/4
verts_n: ...,N,3/4
faces: ...,N,3
"""
verts = verts.view(-1,verts.shape[1],3)
verts_n = verts_n.view(-1,verts_n.shape[1],3)
faces = faces.view(-1,faces.shape[1],3)
device=verts.device
rendered_ndc_n = render_color(renderer, verts, faces, verts_n)
_,_,h,w = rendered_ndc_n.shape
rendered_sil = rendered_ndc_n[:,-1]
ndc = np.meshgrid(range(w), range(h))
ndc = torch.Tensor(ndc).to(device)[None]
ndc[:,0] = ndc[:,0]*2 / (w-1) - 1
ndc[:,1] = ndc[:,1]*2 / (h-1) - 1
flow = rendered_ndc_n[:,:2] - ndc
flow = flow.permute(0,2,3,1) # x,h,w,2
flow = torch.cat([flow, rendered_sil[...,None]],-1)
flow[rendered_sil<1]=0.
flow[...,-1]=0. # discard the last channel
return flow
def force_type(varlist):
for i in range(len(varlist)):
varlist[i] = varlist[i].type(varlist[0].dtype)
return varlist
def tensor2array(tdict):
adict={}
for k,v in tdict.items():
adict[k] = v.detach().cpu().numpy()
return adict
def array2tensor(adict, device='cpu'):
tdict={}
for k,v in adict.items():
try:
tdict[k] = torch.Tensor(v)
if device != 'cpu': tdict[k] = tdict[k].to(device)
except: pass # trimesh object
return tdict
def raycast(xys, Rmat, Tmat, Kinv, near_far):
"""
assuming xys and Rmat have same num of bs
xys: bs, N, 3
Rmat:bs, ...,3,3
Tmat:bs, ...,3, camera to root coord transform
Kinv:bs, ...,3,3
near_far:bs,2
"""
Rmat, Tmat, Kinv, xys = force_type([Rmat, Tmat, Kinv, xys])
Rmat = Rmat.view(-1,3,3)
Tmat = Tmat.view(-1,1,3)
Kinv = Kinv.view(-1,3,3)
bs,nsample,_ = xys.shape
device = Rmat.device
xy1s = torch.cat([xys, torch.ones_like(xys[:,:,:1])],2)
xyz3d = xy1s.matmul(Kinv.permute(0,2,1))
ray_directions = xyz3d.matmul(Rmat) # transpose -> right multiply
ray_origins = -Tmat.matmul(Rmat) # transpose -> right multiply
if near_far is not None:
znear= (torch.ones(bs,nsample,1).to(device) * near_far[:,0,None,None])
zfar = (torch.ones(bs,nsample,1).to(device) * near_far[:,1,None,None])
else:
lbound, ubound=[-1.5,1.5]
znear= Tmat[:,:,-1:].repeat(1,nsample,1)+lbound
zfar = Tmat[:,:,-1:].repeat(1,nsample,1)+ubound
znear[znear<1e-5]=1e-5
ray_origins = ray_origins.repeat(1,nsample,1)
rmat_vec = Rmat.reshape(-1,1,9)
tmat_vec = Tmat.reshape(-1,1,3)
kinv_vec = Kinv.reshape(-1,1,9)
rtk_vec = torch.cat([rmat_vec, tmat_vec, kinv_vec],-1) # x,21
rtk_vec = rtk_vec.repeat(1,nsample,1)
rays={'rays_o': ray_origins,
'rays_d': ray_directions,
'near': znear,
'far': zfar,
'rtk_vec': rtk_vec,
'xys': xys,
'nsample': nsample,
'bs': bs,
}
return rays
def sample_xy(img_size, bs, nsample, device, return_all=False, lineid=None):
"""
rand_inds: bs, ns
xys: bs, ns, 2
"""
xygrid = np.meshgrid(range(img_size), range(img_size)) # w,h->hxw
xygrid = torch.Tensor(xygrid).to(device) # (x,y)
xygrid = xygrid.permute(1,2,0).reshape(1,-1,2) # 1,..., 2
if return_all:
xygrid = xygrid.repeat(bs,1,1) # bs,..., 2
nsample = xygrid.shape[1]
rand_inds=torch.Tensor(range(nsample))
rand_inds=rand_inds[None].repeat(bs,1)
xys = xygrid
else:
if lineid is None:
probs = torch.ones(img_size**2).to(device) # 512*512 vs 128*64
rand_inds = torch.multinomial(probs, bs*nsample, replacement=False)
rand_inds = rand_inds.view(bs,nsample)
xys = torch.stack([xygrid[0][rand_inds[i]] for i in range(bs)],0) # bs,ns,2
else:
probs = torch.ones(img_size).to(device) # 512*512 vs 128*64
rand_inds = torch.multinomial(probs, bs*nsample, replacement=True)
rand_inds = rand_inds.view(bs,nsample)
xys = torch.stack([xygrid[0][rand_inds[i]] for i in range(bs)],0) # bs,ns,2
xys[...,1] = xys[...,1] + lineid[:,None]
rand_inds = rand_inds.long()
return rand_inds, xys
def chunk_rays(rays,start,delta):
"""
rays: a dictionary
"""
rays_chunk = {}
for k,v in rays.items():
if torch.is_tensor(v):
v = v.view(-1, v.shape[-1])
rays_chunk[k] = v[start:start+delta]
return rays_chunk
def generate_bones(num_bones_x, num_bones, bound, device):
"""
num_bones_x: bones along one direction
bones: x**3,9
"""
center = torch.linspace(-bound, bound, num_bones_x).to(device)
center =torch.meshgrid(center, center, center)
center = torch.stack(center,0).permute(1,2,3,0).reshape(-1,3)
center = center[:num_bones]
orient = torch.Tensor([[1,0,0,0]]).to(device)
orient = orient.repeat(num_bones,1)
scale = torch.zeros(num_bones,3).to(device)
bones = torch.cat([center, orient, scale],-1)
return bones
def reinit_bones(model, mesh, num_bones):
"""
update the data of bones and nerf_body_rts[1].rgb without add new parameters
num_bones: number of bones on the surface
mesh: trimesh
warning: ddp does not support adding/deleting parameters after construction
"""
#TODO find another way to add/delete bones
from kmeans_pytorch import kmeans
device = model.device
points = torch.Tensor(mesh.vertices).to(device)
rthead = model.nerf_body_rts[1].rgb
# reinit
num_in = rthead[0].weight.shape[1]
rthead = nn.Sequential(nn.Linear(num_in, 6*num_bones)).to(device)
torch.nn.init.xavier_uniform_(rthead[0].weight, gain=0.5)
torch.nn.init.zeros_(rthead[0].bias)
if points.shape[0]<100:
bound = model.latest_vars['obj_bound']
bound = torch.Tensor(bound)[None]
center = torch.rand(num_bones, 3) * bound*2 - bound
else:
_, center = kmeans(X=points, num_clusters=num_bones, iter_limit=100,
tqdm_flag=False, distance='euclidean', device=device)
center=center.to(device)
orient = torch.Tensor([[1,0,0,0]]).to(device)
orient = orient.repeat(num_bones,1)
scale = torch.zeros(num_bones,3).to(device)
bones = torch.cat([center, orient, scale],-1)
model.num_bones = num_bones
num_output = model.nerf_body_rts[1].num_output
bias_reinit = rthead[0].bias.data
weight_reinit=rthead[0].weight.data
model.nerf_body_rts[1].rgb[0].bias.data[:num_bones*num_output] = bias_reinit
model.nerf_body_rts[1].rgb[0].weight.data[:num_bones*num_output] = weight_reinit
bones,_ = correct_bones(model, bones, inverse=True)
model.bones.data[:num_bones] = bones
model.nerf_models['bones'] = model.bones
return
def correct_bones(model, bones_rst, inverse=False):
# bones=>bones_rst
bones_rst = bones_rst.clone()
rest_pose_code = model.rest_pose_code
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(model.device))
rts_head = model.nerf_body_rts[1]
bone_rts_rst = rts_head(rest_pose_code)[0] # 1,B*12
if inverse:
bone_rts_rst = rtk_invert(bone_rts_rst, model.opts.num_bones)
bones_rst = bone_transform(bones_rst, bone_rts_rst, is_vec=True)[0]
return bones_rst, bone_rts_rst
def correct_rest_pose(opts, bone_rts_fw, bone_rts_rst):
# delta rts
bone_rts_fw = bone_rts_fw.clone()
rts_shape = bone_rts_fw.shape
bone_rts_rst_inv = rtk_invert(bone_rts_rst, opts.num_bones)
bone_rts_rst_inv = bone_rts_rst_inv.repeat(rts_shape[0],rts_shape[1],1)
bone_rts_fw = rtk_compose(bone_rts_rst_inv, bone_rts_fw)
return bone_rts_fw
def warp_bw(opts, model, rt_dict, query_xyz_chunk, embedid):
"""
only used in mesh extraction
embedid: embedding id
"""
chunk = query_xyz_chunk.shape[0]
query_time = torch.ones(chunk,1).to(model.device)*embedid
query_time = query_time.long()
if opts.flowbw:
# flowbw
xyz_embedded = model.embedding_xyz(query_xyz_chunk)
time_embedded = model.pose_code(query_time)[:,0]
xyztime_embedded = torch.cat([xyz_embedded, time_embedded],1)
flowbw_chunk = model.nerf_flowbw(xyztime_embedded, xyz=query_xyz_chunk)
query_xyz_chunk += flowbw_chunk
elif opts.lbs:
# backward skinning
bones_rst = model.bones
bone_rts_fw = model.nerf_body_rts(query_time)
# update bones
bones_rst, bone_rts_rst = correct_bones(model, bones_rst)
bone_rts_fw = correct_rest_pose(opts, bone_rts_fw, bone_rts_rst)
query_xyz_chunk = query_xyz_chunk[:,None]
if opts.nerf_skin:
nerf_skin = model.nerf_skin
else:
nerf_skin = None
time_embedded = model.pose_code(query_time)
bones_dfm = bone_transform(bones_rst, bone_rts_fw, is_vec=True)
skin_backward = gauss_mlp_skinning(query_xyz_chunk, model.embedding_xyz,
bones_dfm, time_embedded, nerf_skin, skin_aux=model.skin_aux )
query_xyz_chunk,bones_dfm = lbs(bones_rst,
bone_rts_fw,
skin_backward,
query_xyz_chunk)
query_xyz_chunk = query_xyz_chunk[:,0]
rt_dict['bones'] = bones_dfm
return query_xyz_chunk, rt_dict
def warp_fw(opts, model, rt_dict, vertices, embedid):
"""
only used in mesh extraction
"""
num_pts = vertices.shape[0]
query_time = torch.ones(num_pts,1).long().to(model.device)*embedid
pts_can=torch.Tensor(vertices).to(model.device)
if opts.flowbw:
# forward flow
pts_can_embedded = model.embedding_xyz(pts_can)
time_embedded = model.pose_code(query_time)[:,0]
ptstime_embedded = torch.cat([pts_can_embedded, time_embedded],1)
pts_dfm = pts_can + model.nerf_flowfw(ptstime_embedded, xyz=pts_can)
elif opts.lbs:
# forward skinning
pts_can = pts_can[:,None]
bones_rst = model.bones
bone_rts_fw = model.nerf_body_rts(query_time)
bones_rst, bone_rts_rst = correct_bones(model, bones_rst)
bone_rts_fw = correct_rest_pose(opts, bone_rts_fw, bone_rts_rst)
if opts.nerf_skin:
nerf_skin = model.nerf_skin
else:
nerf_skin = None
rest_pose_code = model.rest_pose_code
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones_rst.device))
skin_forward = gauss_mlp_skinning(pts_can, model.embedding_xyz, bones_rst,
rest_pose_code, nerf_skin, skin_aux=model.skin_aux)
pts_dfm,bones_dfm = lbs(bones_rst, bone_rts_fw, skin_forward,
pts_can,backward=False)
pts_dfm = pts_dfm[:,0]
rt_dict['bones'] = bones_dfm
vertices = pts_dfm.cpu().numpy()
return vertices, rt_dict
def canonical2ndc(model, dp_canonical_pts, rtk, kaug, embedid):
"""
dp_canonical_pts: 5004,3, pts in the canonical space of each video
dp_px: bs, 5004, 3
"""
Rmat = rtk[:,:3,:3]
Tmat = rtk[:,:3,3]
Kmat = K2mat(rtk[:,3,:])
Kaug = K2inv(kaug) # p = Kaug Kmat P
Kinv = Kmatinv(Kaug.matmul(Kmat))
K = mat2K(Kmatinv(Kinv))
bs = Kinv.shape[0]
npts = dp_canonical_pts.shape[0]
# projection
dp_canonical_pts = dp_canonical_pts[None]
if model.opts.flowbw:
time_embedded = model.pose_code(embedid)
time_embedded = time_embedded.repeat(1,npts, 1)
dp_canonical_embedded = model.embedding_xyz(dp_canonical_pts)[None]
dp_canonical_embedded = dp_canonical_embedded.repeat(bs,1,1)
dp_canonical_embedded = torch.cat([dp_canonical_embedded, time_embedded], -1)
dp_deformed_flo = model.nerf_flowfw(dp_canonical_embedded, xyz=dp_canonical_pts)
dp_deformed_pts = dp_canonical_pts + dp_deformed_flo
else:
dp_deformed_pts = dp_canonical_pts.repeat(bs,1,1)
dp_cam_pts = obj_to_cam(dp_deformed_pts, Rmat, Tmat)
dp_px = pinhole_cam(dp_cam_pts,K)
return dp_px
def get_near_far(near_far, vars_np, tol_fac=1.2, pts=None):
"""
pts: point coordinate N,3
near_far: near and far plane M,2
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
tol_fac tolerance factor
"""
if pts is None:
#pts = vars_np['mesh_rest'].vertices
# turn points to bounding box
pts = trimesh.bounds.corners(vars_np['mesh_rest'].bounds)
device = near_far.device
rtk = torch.Tensor(vars_np['rtk']).to(device)
idk = torch.Tensor(vars_np['idk']).to(device)
pts = pts_to_view(pts, rtk, device)
pmax = pts[...,-1].max(-1)[0]
pmin = pts[...,-1].min(-1)[0]
delta = (pmax - pmin)*(tol_fac-1)
near= pmin-delta
far = pmax+delta
near_far[idk==1,0] = torch.clamp(near[idk==1], min=1e-3)
near_far[idk==1,1] = torch.clamp( far[idk==1], min=1e-3)
return near_far
def pts_to_view(pts, rtk, device):
"""
object to camera coordinates
pts: point coordinate N,3
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
"""
M = rtk.shape[0]
out_pts = []
chunk=100
for i in range(0,M,chunk):
rtk_sub = rtk[i:i+chunk]
pts_sub = torch.Tensor(np.tile(pts[None],
(len(rtk_sub),1,1))).to(device) # M,N,3
pts_sub = obj_to_cam(pts_sub, rtk_sub[:,:3,:3],
rtk_sub[:,:3,3])
pts_sub = pinhole_cam(pts_sub, rtk_sub[:,3])
out_pts.append(pts_sub)
out_pts = torch.cat(out_pts, 0)
return out_pts
def compute_point_visibility(pts, vars_np, device):
"""
pts: point coordinate N,3
rtk: object to camera transform, M,4,4
idk: indicator of obsered or not M
**deprecated** due to K vars_tensor['rtk'] may not be consistent
"""
vars_tensor = array2tensor(vars_np, device=device)
rtk = vars_tensor['rtk']
idk = vars_tensor['idk']
vis = vars_tensor['vis']
pts = pts_to_view(pts, rtk, device) # T, N, 3
h,w = vis.shape[1:]
vis = vis[:,None]
xy = pts[:,None,:,:2]
xy[...,0] = xy[...,0]/w*2 - 1
xy[...,1] = xy[...,1]/h*2 - 1
# grab the visibility value in the mask and sum over frames
vis = F.grid_sample(vis, xy)[:,0,0]
vis = (idk[:,None]*vis).sum(0)
vis = (vis>0).float() # at least seen in one view
return vis
def near_far_to_bound(near_far):
"""
near_far: T, 2 on cuda
bound: float
this can only be used for a single video (and for approximation)
"""
bound=(near_far[:,1]-near_far[:,0]).mean() / 2
bound = bound.detach().cpu().numpy()
return bound
def rot_angle(mat):
"""
rotation angle of rotation matrix
rmat: ..., 3,3
"""
eps=1e-4
cos = ( mat[...,0,0] + mat[...,1,1] + mat[...,2,2] - 1 )/2
cos = cos.clamp(-1+eps,1-eps)
angle = torch.acos(cos)
return angle
def match2coords(match, w_rszd):
tar_coord = torch.cat([match[:,None]%w_rszd, match[:,None]//w_rszd],-1)
tar_coord = tar_coord.float()
return tar_coord
def match2flo(match, w_rszd, img_size, warp_r, warp_t, device):
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.matmul(warp_r[:2,:2]) + warp_r[None,:2,2]
tar_coord = match2coords(match, w_rszd)
tar_coord = tar_coord.matmul(warp_t[:2,:2]) + warp_t[None,:2,2]
flo_dp = (tar_coord - ref_coord) / img_size * 2 # [-2,2]
flo_dp = flo_dp.view(w_rszd, w_rszd, 2)
flo_dp = flo_dp.permute(2,0,1)
xygrid = sample_xy(w_rszd, 1, 0, device, return_all=True)[1] # scale to img_size
xygrid = xygrid * float(img_size/w_rszd)
warp_r_inv = Kmatinv(warp_r)
xygrid = xygrid.matmul(warp_r_inv[:2,:2]) + warp_r_inv[None,:2,2]
xygrid = xygrid / w_rszd * 2 - 1
flo_dp = F.grid_sample(flo_dp[None], xygrid.view(1,w_rszd,w_rszd,2))[0]
return flo_dp
def compute_flow_cse(cse_a,cse_b, warp_a, warp_b, img_size):
"""
compute the flow between two frames under cse feature matching
assuming two feature images have the same dimension (also rectangular)
cse: 16,h,w, feature image
flo_dp: 2,h,w
"""
_,_,w_rszd = cse_a.shape
hw_rszd = w_rszd*w_rszd
device = cse_a.device
cost = (cse_b[:,None,None] * cse_a[...,None,None]).sum(0)
_,match_a = cost.view(hw_rszd, hw_rszd).max(1)
_,match_b = cost.view(hw_rszd, hw_rszd).max(0)
flo_a = match2flo(match_a, w_rszd, img_size, warp_a, warp_b, device)
flo_b = match2flo(match_b, w_rszd, img_size, warp_b, warp_a, device)
return flo_a, flo_b
def compute_flow_geodist(dp_refr,dp_targ, geodists):
"""
compute the flow between two frames under geodesic distance matching
dps: h,w, canonical surface mapping index
geodists N,N, distance matrix
flo_dp: 2,h,w
"""
h_rszd,w_rszd = dp_refr.shape
hw_rszd = h_rszd*w_rszd
device = dp_refr.device
chunk = 1024
# match: hw**2
match = torch.zeros(hw_rszd).to(device)
for i in range(0,hw_rszd,chunk):
chunk_size = len(dp_refr.view(-1,1)[i:i+chunk] )
dp_refr_sub = dp_refr.view(-1,1)[i:i+chunk].repeat(1,hw_rszd).view(-1,1)
dp_targ_sub = dp_targ.view(1,-1) .repeat(chunk_size,1).view(-1,1)
match_sub = geodists[dp_refr_sub, dp_targ_sub]
dis_geo_sub,match_sub = match_sub.view(-1, hw_rszd).min(1)
#match_sub[dis_geo_sub>0.1] = 0
match[i:i+chunk] = match_sub
# cx,cy
tar_coord = match2coords(match, w_rszd)
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.view(h_rszd, w_rszd, 2)
tar_coord = tar_coord.view(h_rszd, w_rszd, 2)
flo_dp = (tar_coord - ref_coord) / w_rszd * 2 # [-2,2]
match = match.view(h_rszd, w_rszd)
flo_dp[match==0] = 0
flo_dp = flo_dp.permute(2,0,1)
return flo_dp
def compute_flow_geodist_old(dp_refr,dp_targ, geodists):
"""
compute the flow between two frames under geodesic distance matching
dps: h,w, canonical surface mapping index
geodists N,N, distance matrix
flo_dp: 2,h,w
"""
h_rszd,w_rszd = dp_refr.shape
hw_rszd = h_rszd*w_rszd
device = dp_refr.device
dp_refr = dp_refr.view(-1,1).repeat(1,hw_rszd).view(-1,1)
dp_targ = dp_targ.view(1,-1).repeat(hw_rszd,1).view(-1,1)
match = geodists[dp_refr, dp_targ]
dis_geo,match = match.view(hw_rszd, hw_rszd).min(1)
#match[dis_geo>0.1] = 0
# cx,cy
tar_coord = match2coords(match, w_rszd)
ref_coord = sample_xy(w_rszd, 1, 0, device, return_all=True)[1].view(-1,2)
ref_coord = ref_coord.view(h_rszd, w_rszd, 2)
tar_coord = tar_coord.view(h_rszd, w_rszd, 2)
flo_dp = (tar_coord - ref_coord) / w_rszd * 2 # [-2,2]
match = match.view(h_rszd, w_rszd)
flo_dp[match==0] = 0
flo_dp = flo_dp.permute(2,0,1)
return flo_dp
def fb_flow_check(flo_refr, flo_targ, img_refr, img_targ, dp_thrd,
save_path=None):
"""
apply forward backward consistency check on flow fields
flo_refr: 2,h,w forward flow
flo_targ: 2,h,w backward flow
fberr: h,w forward backward error
"""
h_rszd, w_rszd = flo_refr.shape[1:]
# clean up flow
flo_refr = flo_refr.permute(1,2,0).cpu().numpy()
flo_targ = flo_targ.permute(1,2,0).cpu().numpy()
flo_refr_mask = np.linalg.norm(flo_refr,2,-1)>0 # this also removes 0 flows
flo_targ_mask = np.linalg.norm(flo_targ,2,-1)>0
flo_refr_px = flo_refr * w_rszd / 2
flo_targ_px = flo_targ * w_rszd / 2
#fb check
x0,y0 =np.meshgrid(range(w_rszd),range(h_rszd))
hp0 = np.stack([x0,y0],-1) # screen coord
flo_fb = warp_flow(hp0 + flo_targ_px, flo_refr_px) - hp0
flo_fb = 2*flo_fb/w_rszd
fberr_fw = np.linalg.norm(flo_fb, 2,-1)
fberr_fw[~flo_refr_mask] = 0
flo_bf = warp_flow(hp0 + flo_refr_px, flo_targ_px) - hp0
flo_bf = 2*flo_bf/w_rszd
fberr_bw = np.linalg.norm(flo_bf, 2,-1)
fberr_bw[~flo_targ_mask] = 0
if save_path is not None:
# vis
thrd_vis = 0.01
img_refr = F.interpolate(img_refr, (h_rszd, w_rszd), mode='bilinear')[0]
img_refr = img_refr.permute(1,2,0).cpu().numpy()[:,:,::-1]
img_targ = F.interpolate(img_targ, (h_rszd, w_rszd), mode='bilinear')[0]
img_targ = img_targ.permute(1,2,0).cpu().numpy()[:,:,::-1]
flo_refr[:,:,0] = (flo_refr[:,:,0] + 2)/2
flo_targ[:,:,0] = (flo_targ[:,:,0] - 2)/2
flo_refr[fberr_fw>thrd_vis]=0.
flo_targ[fberr_bw>thrd_vis]=0.
flo_refr[~flo_refr_mask]=0.
flo_targ[~flo_targ_mask]=0.
img = np.concatenate([img_refr, img_targ], 1)
flo = np.concatenate([flo_refr, flo_targ], 1)
imgflo = cat_imgflo(img, flo)
imgcnf = np.concatenate([fberr_fw, fberr_bw],1)
imgcnf = np.clip(imgcnf, 0, dp_thrd)*(255/dp_thrd)
imgcnf = np.repeat(imgcnf[...,None],3,-1)
imgcnf = cv2.resize(imgcnf, imgflo.shape[::-1][1:])
imgflo_cnf = np.concatenate([imgflo, imgcnf],0)
cv2.imwrite(save_path, imgflo_cnf)
return fberr_fw, fberr_bw
def mask_aug(rendered):
lb = 0.1; ub = 0.3
_,h,w=rendered.shape
if np.random.binomial(1,0.5):
sx = int(np.random.uniform(lb*w,ub*w))
sy = int(np.random.uniform(lb*h,ub*h))
cx = int(np.random.uniform(sx,w-sx))
cy = int(np.random.uniform(sy,h-sy))
feat_mean = rendered.mean(-1).mean(-1)[:,None,None]
rendered[:,cx-sx:cx+sx,cy-sy:cy+sy] = feat_mean
return rendered
def process_so3_seq(rtk_seq, vis=False, smooth=True):
"""
rtk_seq, bs, N, 13 including
{scoresx1, rotationsx9, translationsx3}
"""
from utils.io import draw_cams
scores =rtk_seq[...,0]
bs,N = scores.shape
rmat = rtk_seq[...,1:10]
tmat = rtk_seq[:,0,10:13]
rtk_raw = rtk_seq[:,0,13:29].reshape((-1,4,4))
distribution = torch.Tensor(scores).softmax(1)
entropy = (-distribution.log() * distribution).sum(1)
if vis:
# draw distribution
obj_scale = 3
cam_space = obj_scale * 0.2
tmat_raw = np.tile(rtk_raw[:,None,:3,3], (1,N,1))
scale_factor = obj_scale/tmat_raw[...,-1].mean()
tmat_raw *= scale_factor
tmat_raw = tmat_raw.reshape((bs,12,-1,3))
tmat_raw[...,-1] += np.linspace(-cam_space, cam_space,12)[None,:,None]
tmat_raw = tmat_raw.reshape((bs,-1,3))
# bs, tiltxae
all_rts = np.concatenate([rmat, tmat_raw],-1)
all_rts = np.transpose(all_rts.reshape(bs,N,4,3), [0,1,3,2])
for i in range(bs):
top_idx = scores[i].argsort()[-30:]
top_rt = all_rts[i][top_idx]
top_score = scores[i][top_idx]
top_score = (top_score - top_score.min())/(top_score.max()-top_score.min())
mesh = draw_cams(top_rt, color_list = top_score)
mesh.export('tmp/%d.obj'%(i))
if smooth:
# graph cut scores, bsxN
import pydensecrf.densecrf as dcrf
from pydensecrf.utils import unary_from_softmax
graph = dcrf.DenseCRF2D(bs, 1, N) # width, height, nlabels
unary = unary_from_softmax(distribution.numpy().T.copy())
graph.setUnaryEnergy(unary)
grid = rmat[0].reshape((N,3,3))
drot = np.matmul(grid[None], np.transpose(grid[:,None], (0,1,3,2)))
drot = rot_angle(torch.Tensor(drot))
compat = (-2*(drot).pow(2)).exp()*10
compat = compat.numpy()
graph.addPairwiseGaussian(sxy=10, compat=compat)
Q = graph.inference(100)
scores = np.asarray(Q).T
# argmax
idx_max = scores.argmax(-1)
rmat = rmat[0][idx_max]
rmat = rmat.reshape((-1,9))
rts = np.concatenate([rmat, tmat],-1)
rts = rts.reshape((bs,1,-1))
# post-process se3
root_rmat = rts[:,0,:9].reshape((-1,3,3))
root_tmat = rts[:,0,9:12]
rmat = rtk_raw[:,:3,:3]
tmat = rtk_raw[:,:3,3]
tmat = tmat + np.matmul(rmat, root_tmat[...,None])[...,0]
rmat = np.matmul(rmat, root_rmat)
rtk_raw[:,:3,:3] = rmat
rtk_raw[:,:3,3] = tmat
if vis:
# draw again
pdb.set_trace()
rtk_vis = rtk_raw.copy()
rtk_vis[:,:3,3] *= scale_factor
mesh = draw_cams(rtk_vis)
mesh.export('tmp/final.obj')
return rtk_raw
def align_sim3(rootlist_a, rootlist_b, is_inlier=None, err_valid=None):
"""
nx4x4 matrices
is_inlier: n
"""
# ta = np.matmul(-np.transpose(rootlist_a[:,:3,:3],[0,2,1]),
# rootlist_a[:,:3,3:4])
# ta = ta[...,0].T
# tb = np.matmul(-np.transpose(rootlist_b[:,:3,:3],[0,2,1]),
# rootlist_b[:,:3,3:4])
# tb = tb[...,0].T
# dso3,dtrn,dscale=umeyama_alignment(tb, ta,with_scale=False)
#
# dscale = np.linalg.norm(rootlist_a[0,:3,3],2,-1) /\
# np.linalg.norm(rootlist_b[0,:3,3],2,-1)
# rootlist_b[:,:3,:3] = np.matmul(rootlist_b[:,:3,:3], dso3.T[None])
# rootlist_b[:,:3,3:4] = rootlist_b[:,:3,3:4] - \
# np.matmul(rootlist_b[:,:3,:3], dtrn[None,:,None])
dso3 = np.matmul(np.transpose(rootlist_b[:,:3,:3],(0,2,1)),
rootlist_a[:,:3,:3])
dscale = np.linalg.norm(rootlist_a[:,:3,3],2,-1)/\
np.linalg.norm(rootlist_b[:,:3,3],2,-1)
# select inliers to fit
if is_inlier is not None:
if is_inlier.sum() == 0:
is_inlier[np.argmin(err_valid)] = True
dso3 = dso3[is_inlier]
dscale = dscale[is_inlier]
dso3 = R.from_matrix(dso3).mean().as_matrix()
rootlist_b[:,:3,:3] = np.matmul(rootlist_b[:,:3,:3], dso3[None])
dscale = dscale.mean()
rootlist_b[:,:3,3] = rootlist_b[:,:3,3] * dscale
so3_err = np.matmul(rootlist_a[:,:3,:3],
np.transpose(rootlist_b[:,:3,:3],[0,2,1]))
so3_err = rot_angle(torch.Tensor(so3_err))
so3_err = so3_err / np.pi*180
so3_err_max = so3_err.max()
so3_err_mean = so3_err.mean()
so3_err_med = np.median(so3_err)
so3_err_std = np.asarray(so3_err.std())
print(so3_err)
print('max so3 error (deg): %.1f'%(so3_err_max))
print('med so3 error (deg): %.1f'%(so3_err_med))
print('mean so3 error (deg): %.1f'%(so3_err_mean))
print('std so3 error (deg): %.1f'%(so3_err_std))
return rootlist_b
def align_sfm_sim3(aux_seq, datasets):
from utils.io import draw_cams, load_root
for dataset in datasets:
seqname = dataset.imglist[0].split('/')[-2]
# only process dataset with rtk_path input
if dataset.has_prior_cam:
root_dir = dataset.rtklist[0][:-9]
root_sfm = load_root(root_dir, 0)[:-1] # excluding the last
# split predicted root into multiple sequences
seq_idx = [seqname == i.split('/')[-2] for i in aux_seq['impath']]
root_pred = aux_seq['rtk'][seq_idx]
is_inlier = aux_seq['is_valid'][seq_idx]
err_valid = aux_seq['err_valid'][seq_idx]
# only use certain ones to match
#pdb.set_trace()
#mesh = draw_cams(root_sfm, color='gray')
#mesh.export('0.obj')
# pre-align the center according to cat mask
root_sfm = visual_hull_align(root_sfm,
aux_seq['kaug'][seq_idx],
aux_seq['masks'][seq_idx])
root_sfm = align_sim3(root_pred, root_sfm,
is_inlier=is_inlier, err_valid=err_valid)
# only modify rotation
#root_pred[:,:3,:3] = root_sfm[:,:3,:3]
root_pred = root_sfm
aux_seq['rtk'][seq_idx] = root_pred
aux_seq['is_valid'][seq_idx] = True
else:
print('not aligning %s, no rtk path in config file'%seqname)
def visual_hull_align(rtk, kaug, masks):
"""
input: array
output: array
"""
rtk = torch.Tensor(rtk)
kaug = torch.Tensor(kaug)
masks = torch.Tensor(masks)
num_view,h,w = masks.shape
grid_size = 64
if rtk.shape[0]!=num_view:
print('rtk size mismtach: %d vs %d'%(rtk.shape[0], num_view))
rtk = rtk[:num_view]
rmat = rtk[:,:3,:3]
tmat = rtk[:,:3,3:]
Kmat = K2mat(rtk[:,3])
Kaug = K2inv(kaug) # p = Kaug Kmat P
kmat = mat2K(Kaug.matmul(Kmat))
rmatc = rmat.permute((0,2,1))
tmatc = -rmatc.matmul(tmat)
bound = tmatc.norm(2,-1).mean()
pts = np.linspace(-bound, bound, grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pts, pts, pts), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
score_xyz = []
chunk = 1000
for i in range(0,len(query_xyz),chunk):
query_xyz_chunk = query_xyz[None, i:i+chunk].repeat(num_view, 1,1)
query_xyz_chunk = obj_to_cam(query_xyz_chunk, rmat, tmat)
query_xyz_chunk = pinhole_cam(query_xyz_chunk, kmat)
query_xy = query_xyz_chunk[...,:2]
query_xy[...,0] = query_xy[...,0]/w*2-1
query_xy[...,1] = query_xy[...,1]/h*2-1
# sum over time
score = F.grid_sample(masks[:,None], query_xy[:,None])[:,0,0]
score = score.sum(0)
score_xyz.append(score)
# align the center
score_xyz = torch.cat(score_xyz)
center = query_xyz[score_xyz>0.8*num_view]
print('%d points used to align center'% (len(center)) )
center = center.mean(0)
tmatc = tmatc - center[None,:,None]
tmat = np.matmul(-rmat, tmatc)
rtk[:,:3,3:] = tmat
return rtk
def ood_check_cse(dp_feats, dp_embed, dp_idx):
"""
dp_feats: bs,16,h,w
dp_idx: bs, h,w
dp_embed: N,16
valid_list bs
"""
bs,_,h,w = dp_feats.shape
N,_ = dp_embed.shape
device = dp_feats.device
dp_idx = F.interpolate(dp_idx.float()[None], (h,w), mode='nearest').long()[0]
## dot product
#pdb.set_trace()
#err_list = []
#err_threshold = 0.05
#for i in range(bs):
# err = 1- (dp_embed[dp_idx[i]]*dp_feats[i].permute(1,2,0)).sum(-1)
# err_list.append(err)
# fb check
err_list = []
err_threshold = 12
# TODO no fb check
#err_threshold = 100
for i in range(bs):
# use chunk
chunk = 5000
max_idx = torch.zeros(N).to(device)
for j in range(0,N,chunk):
costmap = (dp_embed.view(N,16,1)[j:j+chunk]*\
dp_feats[i].view(1,16,h*w)).sum(-2)
max_idx[j:j+chunk] = costmap.argmax(-1) # N
rpj_idx = max_idx[dp_idx[i]]
rpj_coord = torch.stack([rpj_idx % w, rpj_idx//w],-1)
ref_coord = sample_xy(w, 1, 0, device, return_all=True)[1].view(h,w,2)
err = (rpj_coord - ref_coord).norm(2,-1)
err_list.append(err)
valid_list = []
error_list = []
for i in range(bs):
err = err_list[i]
mean_error = err[dp_idx[i]!=0].mean()
is_valid = mean_error < err_threshold
error_list.append( mean_error)
valid_list.append( is_valid )
#cv2.imwrite('tmp/%05d.png'%i, (err/mean_error).cpu().numpy()*100)
#print(i); print(mean_error)
error_list = torch.stack(error_list,0)
valid_list = torch.stack(valid_list,0)
return valid_list, error_list
def bbox_dp2rnd(bbox, kaug):
"""
bbox: bs, 4
kaug: bs, 4
cropab2: bs, 3,3, transformation from dp bbox to rendered bbox coords
"""
cropa2im = torch.cat([(bbox[:,2:] - bbox[:,:2]) / 112.,
bbox[:,:2]],-1)
cropa2im = K2mat(cropa2im)
im2cropb = K2inv(kaug)
cropa2b = im2cropb.matmul(cropa2im)
return cropa2b
def resample_dp(dp_feats, dp_bbox, kaug, target_size):
"""
dp_feats: bs, 16, h,w
dp_bbox: bs, 4
kaug: bs, 4
"""
# if dp_bbox are all zeros, just do the resizing
if dp_bbox.abs().sum()==0:
dp_feats_rsmp = F.interpolate(dp_feats, (target_size, target_size),
mode='bilinear')
else:
dp_size = dp_feats.shape[-1]
device = dp_feats.device
dp2rnd = bbox_dp2rnd(dp_bbox, kaug)
rnd2dp = Kmatinv(dp2rnd)
xygrid = sample_xy(target_size, 1, 0, device, return_all=True)[1]
xygrid = xygrid.matmul(rnd2dp[:,:2,:2]) + rnd2dp[:,None,:2,2]
xygrid = xygrid / dp_size * 2 - 1
dp_feats_rsmp = F.grid_sample(dp_feats, xygrid.view(-1,target_size,target_size,2))
return dp_feats_rsmp
def vrender_flo(weights_coarse, xyz_coarse_target, xys, img_size):
"""
weights_coarse: ..., ndepth
xyz_coarse_target: ..., ndepth, 3
flo_coarse: ..., 2
flo_valid: ..., 1
"""
# render flow
weights_coarse = weights_coarse.clone()
xyz_coarse_target = xyz_coarse_target.clone()
# bs, nsamp, -1, x
weights_shape = weights_coarse.shape
xyz_coarse_target = xyz_coarse_target.view(weights_shape+(3,))
xy_coarse_target = xyz_coarse_target[...,:2]
# deal with negative z
invalid_ind = torch.logical_or(xyz_coarse_target[...,-1]<1e-5,
xy_coarse_target.norm(2,-1).abs()>2*img_size)
weights_coarse[invalid_ind] = 0.
xy_coarse_target[invalid_ind] = 0.
# renormalize
weights_coarse = weights_coarse/(1e-9+weights_coarse.sum(-1)[...,None])
# candidate motion vector
xys_unsq = xys.view(weights_shape[:-1]+(1,2))
flo_coarse = xy_coarse_target - xys_unsq
flo_coarse = weights_coarse[...,None] * flo_coarse
flo_coarse = flo_coarse.sum(-2)
## candidate target point
#xys_unsq = xys.view(weights_shape[:-1]+(2,))
#xy_coarse_target = weights_coarse[...,None] * xy_coarse_target
#xy_coarse_target = xy_coarse_target.sum(-2)
#flo_coarse = xy_coarse_target - xys_unsq
flo_coarse = flo_coarse/img_size * 2
flo_valid = (invalid_ind.sum(-1)==0).float()[...,None]
return flo_coarse, flo_valid
def diff_flo(pts_target, xys, img_size):
"""
pts_target: ..., 1, 2
xys: ..., 2
flo_coarse: ..., 2
flo_valid: ..., 1
"""
# candidate motion vector
pts_target = pts_target.view(xys.shape)
flo_coarse = pts_target - xys
flo_coarse = flo_coarse/img_size * 2
return flo_coarse
def fid_reindex(fid, num_vids, vid_offset):
"""
re-index absolute frameid {0,....N} to subsets of video id and relative frameid
fid: N absolution id
vid: N video id
tid: N relative id
"""
tid = torch.zeros_like(fid).float()
vid = torch.zeros_like(fid)
max_ts = (vid_offset[1:] - vid_offset[:-1]).max()
for i in range(num_vids):
assign = torch.logical_and(fid>=vid_offset[i],
fid<vid_offset[i+1])
vid[assign] = i
tid[assign] = fid[assign].float() - vid_offset[i]
doffset = vid_offset[i+1] - vid_offset[i]
tid[assign] = (tid[assign] - doffset/2)/max_ts*2
#tid[assign] = 2*(tid[assign] / doffset)-1
#tid[assign] = (tid[assign] - doffset/2)/1000.
return vid, tid
|
banmo-main
|
nnutils/geom_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
import pdb
import trimesh
import cv2
import numpy as np
import torch
from nnutils.geom_utils import rot_angle, mat2K, Kmatinv, obj_to_cam, \
pinhole_cam, lbs, gauss_mlp_skinning, evaluate_mlp
import torch.nn.functional as F
def nerf_gradient(mlp, embed, pts, use_xyz=False,code=None, sigma_only=False):
"""
gradient of mlp params wrt pts
"""
pts.requires_grad_(True)
pts_embedded = embed(pts)
if use_xyz: xyz=pts
else: xyz=None
y = evaluate_mlp(mlp, pts_embedded, chunk=pts.shape[0],
xyz=xyz,code=code,sigma_only=sigma_only)
sdf = -y
ibetas = 1/(mlp.beta.abs()+1e-9)
sigmas = (0.5 + 0.5 * sdf.sign() * torch.expm1(-sdf.abs() * ibetas))
# get gradient for each size-1 output
gradients = []
for i in range(y.shape[-1]):
y_sub = y [...,i:i+1]
d_output = torch.ones_like(y_sub, requires_grad=False, device=y.device)
gradient = torch.autograd.grad(
outputs=y_sub,
inputs=pts,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
gradients.append( gradient[...,None] )
gradients = torch.cat(gradients,-1) # ...,input-dim, output-dim
return gradients, sigmas
def eikonal_loss(mlp, embed, pts, bound):
"""
pts: X* backward warped points
"""
# make it more efficient
bs = pts.shape[0]
sample_size = 1000
if bs>sample_size:
probs = torch.ones(bs)
rand_inds = torch.multinomial(probs, sample_size, replacement=False)
pts = pts[rand_inds]
pts = pts.view(-1,3).detach()
nsample = pts.shape[0]
device = next(mlp.parameters()).device
bound = torch.Tensor(bound)[None].to(device)
inbound_idx = ((bound - pts.abs()) > 0).sum(-1) == 3
pts = pts[inbound_idx]
pts = pts[None]
g,sigmas_unit = nerf_gradient(mlp, embed, pts, sigma_only=True)
g = g[...,0]
grad_norm = g.norm(2, dim=-1)
eikonal_loss = (grad_norm - 1) ** 2
eikonal_loss = eikonal_loss.mean()
return eikonal_loss
def elastic_loss(mlp, embed, xyz, time_embedded):
xyz = xyz.detach().clone()
time_embedded = time_embedded.detach().clone()
g,_ = nerf_gradient(mlp, embed, xyz, use_xyz=mlp.use_xyz,code=time_embedded)
jacobian = g+torch.eye(3)[None,None].to(g.device)
sign, log_svals = jacobian.slogdet()
log_svals = log_svals.clone()
log_svals[sign<=0] = 0.
elastic_loss = log_svals**2
return elastic_loss
def bone_density_loss(mlp, embed, bones):
pts = bones[:,:3]
pts_embedded = embed(pts)
y = evaluate_mlp(mlp, pts_embedded, pts.shape[0], sigma_only=True)
return bone_density_loss
def visibility_loss(mlp, embed, xyz_pos, w_pos, bound, chunk):
"""
w_pos: num_points x num_samples, visibility returns from nerf
bound: scalar, used to sample negative samples
"""
device = next(mlp.parameters()).device
xyz_pos = xyz_pos.detach().clone()
w_pos = w_pos.detach().clone()
# negative examples
nsample = w_pos.shape[0]*w_pos.shape[1]
bound = torch.Tensor(bound)[None,None]
xyz_neg = torch.rand(1,nsample,3)*2*bound-bound
xyz_neg = xyz_neg.to(device)
xyz_neg_embedded = embed(xyz_neg)
vis_neg_pred = evaluate_mlp(mlp, xyz_neg_embedded, chunk=chunk)[...,0]
vis_loss_neg = -F.logsigmoid(-vis_neg_pred).sum()*0.1/nsample
# positive examples
xyz_pos_embedded = embed(xyz_pos)
vis_pos_pred = evaluate_mlp(mlp, xyz_pos_embedded, chunk=chunk)[...,0]
vis_loss_pos = -(F.logsigmoid(vis_pos_pred) * w_pos).sum()/nsample
vis_loss = vis_loss_pos + vis_loss_neg
return vis_loss
def rtk_loss(rtk, rtk_raw, aux_out):
rot_pred = rtk[:,:3,:3]
rot_gt = rtk_raw[:,:3,:3]
rot_loss = rot_angle(rot_pred.matmul(rot_gt.permute(0,2,1))).mean()
rot_loss = 0.01*rot_loss
trn_pred = rtk[:,:3,3]
trn_gt = rtk_raw[:,:3,3]
trn_loss = (trn_pred - trn_gt).pow(2).sum(-1).mean()
total_loss = rot_loss + trn_loss
aux_out['rot_loss'] = rot_loss
aux_out['trn_loss'] = trn_loss
return total_loss
def compute_pts_exp(pts_prob, pts):
"""
pts: ..., ndepth, 3
pts_prob: ..., ndepth
"""
ndepth = pts_prob.shape[-1]
pts_prob = pts_prob.clone()
pts_prob = pts_prob.view(-1, ndepth,1)
pts_prob = pts_prob/(1e-9+pts_prob.sum(1)[:,None])
pts_exp = (pts * pts_prob).sum(1)
return pts_exp
def feat_match_loss(nerf_feat, embedding_xyz, feats, pts, pts_prob, bound,
is_training=True):
"""
feats: ..., num_feat
pts: ..., ndepth, 3
pts_prob: ..., ndepth
loss: ..., 1
"""
pts = pts.clone()
base_shape = feats.shape[:-1] # bs, ns
nfeat = feats.shape[-1]
ndepth = pts_prob.shape[-1]
feats= feats.view(-1, nfeat)
pts = pts.view(-1, ndepth,3)
# part1: compute expected pts
pts_exp = compute_pts_exp(pts_prob, pts)
## part2: matching
pts_pred = feat_match(nerf_feat, embedding_xyz, feats,
bound,grid_size=20,is_training=is_training)
# part3: compute loss
feat_err = (pts_pred - pts_exp).norm(2,-1) # n,ndepth
# rearrange outputs
pts_pred = pts_pred.view(base_shape+(3,))
pts_exp = pts_exp .view(base_shape+(3,))
feat_err = feat_err .view(base_shape+(1,))
return pts_pred, pts_exp, feat_err
def kp_reproj_loss(pts_pred, xys, models, embedding_xyz, rays):
"""
pts_pred, ...,3
xys, ...,2
out, ...,1 same as pts_pred
gcc loss is only used to update root/body pose and skinning weights
"""
xys = xys.view(-1,1,2)
xy_reproj = kp_reproj(pts_pred, models, embedding_xyz, rays)
proj_err = (xys - xy_reproj[...,:2]).norm(2,-1)
proj_err = proj_err.view(pts_pred.shape[:-1]+(1,))
return proj_err
def kp_reproj(pts_pred, models, embedding_xyz, rays, to_target=False):
"""
pts_pred, ...,3
out, ...,1,3 same as pts_pred
to_target whether reproject to target frame
"""
N = pts_pred.view(-1,3).shape[0]
xyz_coarse_sampled = pts_pred.view(-1,1,3)
# detach grad since reproj-loss would not benefit feature learning
# (due to ambiguity)
#xyz_coarse_sampled = xyz_coarse_sampled.detach()
# TODO wrap flowbw and lbs into the same module
# TODO include loss for flowbw
if to_target: rtk_vec = rays['rtk_vec_target']
else: rtk_vec = rays['rtk_vec']
rtk_vec = rtk_vec.view(N,-1) # bs, ns, 21
if 'bones' in models.keys():
if to_target: bone_rts_fw = rays['bone_rts_target']
else: bone_rts_fw = rays['bone_rts']
bone_rts_fw = bone_rts_fw.view(N,-1) # bs, ns,-1
if 'nerf_skin' in models.keys():
nerf_skin = models['nerf_skin']
else: nerf_skin = None
bones = models['bones_rst']
skin_aux = models['skin_aux']
rest_pose_code = models['rest_pose_code']
rest_pose_code = rest_pose_code(torch.Tensor([0]).long().to(bones.device))
skin_forward = gauss_mlp_skinning(xyz_coarse_sampled, embedding_xyz, bones,
rest_pose_code, nerf_skin, skin_aux=skin_aux)
xyz_coarse_sampled,_ = lbs(bones, bone_rts_fw,
skin_forward, xyz_coarse_sampled, backward=False)
Rmat = rtk_vec[:,0:9] .view(N,1,3,3)
Tmat = rtk_vec[:,9:12] .view(N,1,3)
Kinv = rtk_vec[:,12:21].view(N,1,3,3)
K = mat2K(Kmatinv(Kinv))
xyz_coarse_sampled = obj_to_cam( xyz_coarse_sampled, Rmat, Tmat)
xyz_coarse_sampled = pinhole_cam(xyz_coarse_sampled,K)
xy_coarse_sampled = xyz_coarse_sampled[...,:2]
return xy_coarse_sampled
def feat_match(nerf_feat, embedding_xyz, feats, bound,
grid_size=20,is_training=True, init_pts=None, rt_entropy=False):
"""
feats: -1, num_feat
"""
if is_training:
chunk_pts = 8*1024
else:
chunk_pts = 1024
chunk_pix = 4096
nsample,_ = feats.shape
device = feats.device
feats = F.normalize(feats,2,-1)
# sample model on a regular 3d grid, and correlate with feature, nkxkxk
#p1d = np.linspace(-bound, bound, grid_size).astype(np.float32)
#query_yxz = np.stack(np.meshgrid(p1d, p1d, p1d), -1) # (y,x,z)
pxd = np.linspace(-bound[0], bound[0], grid_size).astype(np.float32)
pyd = np.linspace(-bound[1], bound[1], grid_size).astype(np.float32)
pzd = np.linspace(-bound[2], bound[2], grid_size).astype(np.float32)
query_yxz = np.stack(np.meshgrid(pyd, pxd, pzd), -1) # (y,x,z)
query_yxz = torch.Tensor(query_yxz).to(device).view(-1, 3)
query_xyz = torch.cat([query_yxz[:,1:2], query_yxz[:,0:1], query_yxz[:,2:3]],-1)
if init_pts is not None:
query_xyz = query_xyz[None] + init_pts[:,None]
else:
# N x Ns x 3
query_xyz = query_xyz[None]
# inject some noise at training time
if is_training and init_pts is None:
bound = torch.Tensor(bound)[None,None].to(device)
query_xyz = query_xyz + torch.randn_like(query_xyz) * bound * 0.05
cost_vol = []
for i in range(0,grid_size**3,chunk_pts):
if init_pts is None:
query_xyz_chunk = query_xyz[0,i:i+chunk_pts]
xyz_embedded = embedding_xyz(query_xyz_chunk)[:,None] # (N,1,...)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)[:,0] # (chunk, num_feat)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)[None]
cost_chunk = []
for j in range(0,nsample,chunk_pix):
feats_chunk = feats[j:j+chunk_pix] # (chunk pix, num_feat)
if init_pts is not None:
# only query 3d grid according to each px when they are diff
# vol feature
query_xyz_chunk = query_xyz[j:j+chunk_pix,i:i+chunk_pts].clone()
xyz_embedded = embedding_xyz(query_xyz_chunk)
vol_feat_subchunk = evaluate_mlp(nerf_feat, xyz_embedded)
# normalize vol feat
vol_feat_subchunk = F.normalize(vol_feat_subchunk,2,-1)
# cpix, cpts
# distance metric
cost_subchunk = (vol_feat_subchunk * \
feats_chunk[:,None]).sum(-1) * (nerf_feat.beta.abs()+1e-9)
cost_chunk.append(cost_subchunk)
cost_chunk = torch.cat(cost_chunk,0) # (nsample, cpts)
cost_vol.append(cost_chunk)
cost_vol = torch.cat(cost_vol,-1) # (nsample, k**3)
prob_vol = cost_vol.softmax(-1)
# regress to the true location, n,3
if not is_training: torch.cuda.empty_cache()
# n, ns, 1 * n, ns, 3
pts_pred = (prob_vol[...,None] * query_xyz).sum(1)
if rt_entropy:
# compute normalized entropy
match_unc = (-prob_vol * prob_vol.clamp(1e-9,1-1e-9).log()).sum(1)[:,None]
match_unc = match_unc/np.log(grid_size**3)
return pts_pred, match_unc
else:
return pts_pred
def grad_update_bone(bones,embedding_xyz, nerf_vis, learning_rate):
"""
#TODO need to update bones locally
"""
device = bones.device
bones_data = bones.data.detach()
bones_data.requires_grad_(True)
bone_xyz_embed = embedding_xyz(bones_data[:,None,:3])
sdf_at_bone = evaluate_mlp(nerf_vis, bone_xyz_embed)
bone_loc_loss = F.relu(-sdf_at_bone).mean()
# compute gradient wrt bones
d_output = torch.ones_like(bone_loc_loss, requires_grad=False, device=device)
gradient = torch.autograd.grad(
outputs=bone_loc_loss,
inputs=bones_data,
grad_outputs=d_output,
create_graph=True,
retain_graph=True,
only_inputs=True)[0]
bones.data = bones.data-gradient*learning_rate
return bone_loc_loss
def loss_filter_line(sil_err, errid, frameid, sil_loss_samp, img_size, scale_factor=10):
"""
sil_err: Tx512
errid: N
"""
sil_loss_samp = sil_loss_samp.detach().cpu().numpy().reshape(-1)
sil_err[errid] = sil_loss_samp
sil_err = sil_err.reshape(-1,img_size)
sil_err = sil_err.sum(-1) / (1e-9+(sil_err>0).astype(float).sum(-1))
sil_err_med = np.median(sil_err[sil_err>0])
invalid_frame = sil_err > sil_err_med*scale_factor
invalid_idx = invalid_frame[frameid]
sil_err[:] = 0
return invalid_idx
def loss_filter(g_floerr, flo_loss_samp, sil_at_samp_flo, scale_factor=10):
"""
g_floerr: T,
flo_loss_samp: bs,N,1
sil_at_samp_flo:bs,N,1
"""
bs = sil_at_samp_flo.shape[0]
# find history meidan
g_floerr = g_floerr[g_floerr>0]
# tb updated as history value
#flo_err = []
#for i in range(bs):
# flo_err_sub =flo_loss_samp[i][sil_at_samp_flo[i]]
# if len(flo_err_sub) >0:
# #flo_err_sub = flo_err_sub.median().detach().cpu().numpy()
# flo_err_sub = flo_err_sub.mean().detach().cpu().numpy()
# else:
# flo_err_sub = 0
# flo_err.append(flo_err_sub)
#flo_err = np.stack(flo_err)
# vectorized version but uses mean to update
flo_err = (flo_loss_samp * sil_at_samp_flo).sum(1) /\
(1e-9+sil_at_samp_flo.sum(1)) # bs, N, 1
flo_err = flo_err.detach().cpu().numpy()[...,0]
# find invalid idx
invalid_idx = flo_err > np.median(g_floerr)*scale_factor
return flo_err, invalid_idx
def compute_xyz_wt_loss(gt_list, curr_list):
loss = []
for i in range(len(gt_list)):
loss.append( (gt_list[i].detach() - curr_list[i]).pow(2).mean() )
loss = torch.stack(loss).mean()
return loss
def compute_root_sm_2nd_loss(rtk_all, data_offset):
"""
2nd order loss
"""
rot_sm_loss = []
trn_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
stt_rtk = rtk_all[stt_idx:end_idx-2]
mid_rtk = rtk_all[stt_idx+1:end_idx-1]
end_rtk = rtk_all[stt_idx+2:end_idx]
rot_sub1 = stt_rtk[:,:3,:3].matmul(mid_rtk[:,:3,:3].permute(0,2,1))
rot_sub2 = mid_rtk[:,:3,:3].matmul(end_rtk[:,:3,:3].permute(0,2,1))
trn_sub1 = stt_rtk[:,:3,3] - mid_rtk[:,:3,3]
trn_sub2 = mid_rtk[:,:3,3] - end_rtk[:,:3,3]
rot_sm_sub = rot_sub1.matmul(rot_sub2.permute(0,2,1))
trn_sm_sub = trn_sub1 - trn_sub2
rot_sm_loss.append(rot_sm_sub)
trn_sm_loss.append(trn_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-1
trn_sm_loss = torch.cat(trn_sm_loss,0)
trn_sm_loss = trn_sm_loss.norm(2,-1).mean()
root_sm_loss = rot_sm_loss + trn_sm_loss
root_sm_loss = root_sm_loss * 0.1
return root_sm_loss
def compute_root_sm_loss(rtk_all, data_offset):
rot_sm_loss = []
trans_sm_loss = []
for didx in range(len(data_offset)-1):
stt_idx = data_offset[didx]
end_idx = data_offset[didx+1]
rot_sm_sub = rtk_all[stt_idx:end_idx-1,:3,:3].matmul(
rtk_all[stt_idx+1:end_idx,:3,:3].permute(0,2,1))
trans_sm_sub = rtk_all[stt_idx:end_idx-1,:3,3] - \
rtk_all[stt_idx+1:end_idx,:3,3]
rot_sm_loss.append(rot_sm_sub)
trans_sm_loss.append(trans_sm_sub)
rot_sm_loss = torch.cat(rot_sm_loss,0)
rot_sm_loss = rot_angle(rot_sm_loss).mean()*1e-3
trans_sm_loss = torch.cat(trans_sm_loss,0)
trans_sm_loss = trans_sm_loss.norm(2,-1).mean()*0.1
root_sm_loss = rot_sm_loss + trans_sm_loss
return root_sm_loss
def shape_init_loss(pts, faces, mlp, embed, bound_factor, use_ellips=True):
# compute sdf loss wrt to a mesh
# construct mesh
mesh = trimesh.Trimesh(pts.cpu(), faces=faces.cpu())
device = next(mlp.parameters()).device
# Sample points
nsample =10000
obj_bound = pts.abs().max(0)[0][None,None]
bound = obj_bound * bound_factor
pts_samp = torch.rand(1,nsample,3).to(device)*2*bound-bound
# outside: positive
if use_ellips:
# signed distance to a ellipsoid
dis = (pts_samp/obj_bound).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - 1
dis = dis * obj_bound.mean()
else:
# signed distance to a sphere
dis = (pts_samp).pow(2).sum(2).view(-1)
dis = torch.sqrt(dis)
dis = dis - obj_bound.min()
# compute sdf
pts_embedded = embed(pts_samp)
y = evaluate_mlp(mlp, pts_embedded, chunk=pts_samp.shape[0],
xyz=None,code=None,sigma_only=True)
sdf = -y.view(-1) # positive: outside
shape_loss = (sdf - dis).pow(2).mean()
return shape_loss
|
banmo-main
|
nnutils/loss_utils.py
|
"""
MIT License
Copyright (c) 2019 ThibaultGROUEIX
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import torch
def fscore(dist1, dist2, threshold=0.001):
"""
Calculates the F-score between two point clouds with the corresponding threshold value.
:param dist1: Batch, N-Points
:param dist2: Batch, N-Points
:param th: float
:return: fscore, precision, recall
"""
# NB : In this depo, dist1 and dist2 are squared pointcloud euclidean distances, so you should adapt the threshold accordingly.
precision_1 = torch.mean((dist1 < threshold).float(), dim=1)
precision_2 = torch.mean((dist2 < threshold).float(), dim=1)
fscore = 2 * precision_1 * precision_2 / (precision_1 + precision_2)
fscore[torch.isnan(fscore)] = 0
return fscore, precision_1, precision_2
|
banmo-main
|
third_party/fscore.py
|
# MIT license
# Copyright (c) 2019 LI RUOTENG
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# modified from https://github.com/liruoteng/OpticalFlowToolkit
import png
import numpy as np
import matplotlib.colors as cl
import matplotlib.pyplot as plt
from PIL import Image
import cv2
import pdb
UNKNOWN_FLOW_THRESH = 1e7
SMALLFLOW = 0.0
LARGEFLOW = 1e8
def warp_flow(img, flow, normed=False):
h, w = flow.shape[:2]
flow = flow.copy().astype(np.float32)
if normed:
flow[:,:,0] = flow[:,:,0] * w / 2.
flow[:,:,1] = flow[:,:,1] * h / 2.
flow[:,:,0] += np.arange(w)
flow[:,:,1] += np.arange(h)[:,np.newaxis]
res = cv2.remap(img, flow, None, cv2.INTER_LINEAR)
return res
def cat_imgflo(img, flo):
"""
img in (0,1)
flo in normalized coordinate
"""
img = img.copy() * 255
h,w = img.shape[:2]
flo = flo.copy()
flo[:,:,0] = flo[:,:,0] * 0.5 * w
flo[:,:,1] = flo[:,:,1] * 0.5 * h
imgflo = point_vec(img, flo)
return imgflo
def point_vec(img,flow,skip=10):
skip=10
maxsize=500.
extendfac=1.
#resize_factor = 2
resize_factor = max(1,max(maxsize/img.shape[0], maxsize/img.shape[1]))
dispimg = cv2.resize(img.copy(), None,fx=resize_factor,fy=resize_factor)
flow = cv2.resize(flow.copy(), None, fx=resize_factor, fy=resize_factor,interpolation=cv2.INTER_NEAREST) * resize_factor
meshgrid = np.meshgrid(range(dispimg.shape[1]),range(dispimg.shape[0]))
colorflow = flow_to_image(flow).astype(int)
for i in range(dispimg.shape[1]): # x
for j in range(dispimg.shape[0]): # y
if flow.shape[-1]==3 and flow[j,i,2] != 1: continue
if j%skip!=0 or i%skip!=0: continue
xend = int((meshgrid[0][j,i]+extendfac*flow[j,i,0]))
yend = int((meshgrid[1][j,i]+extendfac*flow[j,i,1]))
leng = np.linalg.norm(flow[j,i,:2]*extendfac)
if leng<1:continue
dispimg = cv2.arrowedLine(dispimg, (meshgrid[0][j,i],meshgrid[1][j,i]),\
(xend,yend),
(int(colorflow[j,i,2]),int(colorflow[j,i,1]),int(colorflow[j,i,0])),1,tipLength=4/leng,line_type=cv2.LINE_AA)
return dispimg
def flow_to_image(flow):
"""
Convert flow into middlebury color code image
:param flow: optical flow map
:return: optical flow image in middlebury color
"""
u = flow[:, :, 0]
v = flow[:, :, 1]
maxu = -999.
maxv = -999.
minu = 999.
minv = 999.
idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH)
u[idxUnknow] = 0
v[idxUnknow] = 0
maxu = max(maxu, np.max(u))
minu = min(minu, np.min(u))
maxv = max(maxv, np.max(v))
minv = min(minv, np.min(v))
rad = np.sqrt(u ** 2 + v ** 2)
maxrad = max(-1, np.max(rad))
u = u/(maxrad + np.finfo(float).eps)
v = v/(maxrad + np.finfo(float).eps)
img = compute_color(u, v)
idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2)
img[idx] = 0
return np.uint8(img)
def compute_color(u, v):
"""
compute optical flow color map
:param u: optical flow horizontal map
:param v: optical flow vertical map
:return: optical flow in color code
"""
[h, w] = u.shape
img = np.zeros([h, w, 3])
nanIdx = np.isnan(u) | np.isnan(v)
u[nanIdx] = 0
v[nanIdx] = 0
colorwheel = make_color_wheel()
ncols = np.size(colorwheel, 0)
rad = np.sqrt(u**2+v**2)
a = np.arctan2(-v, -u) / np.pi
fk = (a+1) / 2 * (ncols - 1) + 1
k0 = np.floor(fk).astype(int)
k1 = k0 + 1
k1[k1 == ncols+1] = 1
f = fk - k0
for i in range(0, np.size(colorwheel,1)):
tmp = colorwheel[:, i]
col0 = tmp[k0-1] / 255
col1 = tmp[k1-1] / 255
col = (1-f) * col0 + f * col1
idx = rad <= 1
col[idx] = 1-rad[idx]*(1-col[idx])
notidx = np.logical_not(idx)
col[notidx] *= 0.75
img[:, :, i] = np.uint8(np.floor(255 * col*(1-nanIdx)))
return img
def make_color_wheel():
"""
Generate color wheel according Middlebury color code
:return: Color wheel
"""
RY = 15
YG = 6
GC = 4
CB = 11
BM = 13
MR = 6
ncols = RY + YG + GC + CB + BM + MR
colorwheel = np.zeros([ncols, 3])
col = 0
# RY
colorwheel[0:RY, 0] = 255
colorwheel[0:RY, 1] = np.transpose(np.floor(255*np.arange(0, RY) / RY))
col += RY
# YG
colorwheel[col:col+YG, 0] = 255 - np.transpose(np.floor(255*np.arange(0, YG) / YG))
colorwheel[col:col+YG, 1] = 255
col += YG
# GC
colorwheel[col:col+GC, 1] = 255
colorwheel[col:col+GC, 2] = np.transpose(np.floor(255*np.arange(0, GC) / GC))
col += GC
# CB
colorwheel[col:col+CB, 1] = 255 - np.transpose(np.floor(255*np.arange(0, CB) / CB))
colorwheel[col:col+CB, 2] = 255
col += CB
# BM
colorwheel[col:col+BM, 2] = 255
colorwheel[col:col+BM, 0] = np.transpose(np.floor(255*np.arange(0, BM) / BM))
col += + BM
# MR
colorwheel[col:col+MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR))
colorwheel[col:col+MR, 0] = 255
return colorwheel
|
banmo-main
|
third_party/ext_utils/flowlib.py
|
# MIT License
#
# Copyright (c) 2019 Carnegie Mellon University
# Copyright (c) 2021 Google LLC
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import math
import png
import struct
import array
import numpy as np
import cv2
import pdb
import sys
import re
from io import *
def readPFM(file):
file = open(file, 'rb')
color = None
width = None
height = None
scale = None
endian = None
header = file.readline().rstrip()
if (sys.version[0]) == '3':
header = header.decode('utf-8')
if header == 'PF':
color = True
elif header == 'Pf':
color = False
else:
raise Exception('Not a PFM file.')
if (sys.version[0]) == '3':
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline().decode('utf-8'))
else:
dim_match = re.match(r'^(\d+)\s(\d+)\s$', file.readline())
if dim_match:
width, height = map(int, dim_match.groups())
else:
raise Exception('Malformed PFM header.')
if (sys.version[0]) == '3':
scale = float(file.readline().rstrip().decode('utf-8'))
else:
scale = float(file.readline().rstrip())
if scale < 0: # little-endian
endian = '<'
scale = -scale
else:
endian = '>' # big-endian
data = np.fromfile(file, endian + 'f')
shape = (height, width, 3) if color else (height, width)
data = np.reshape(data, shape)
data = np.flipud(data)
return data, scale
def write_pfm(path, image, scale=1):
"""Write pfm file.
Args:
path (str): pathto file
image (array): data
scale (int, optional): Scale. Defaults to 1.
"""
# case to hw3
if image.ndim == 3 and image.shape[-1]==2:
image = np.concatenate([image, np.zeros(image.shape[:2] + (1,))],-1)
image = image.astype(np.float32)
with open(path, "wb") as file:
color = None
if image.dtype.name != "float32":
raise Exception("Image dtype must be float32.")
image = np.flipud(image)
if len(image.shape) == 3 and image.shape[2] == 3: # color image
color = True
elif (
len(image.shape) == 2 or len(image.shape) == 3 and image.shape[2] == 1
): # greyscale
color = False
else:
raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.")
file.write("PF\n".encode() if color else "Pf\n".encode())
file.write("%d %d\n".encode() % (image.shape[1], image.shape[0]))
endian = image.dtype.byteorder
if endian == "<" or endian == "=" and sys.byteorder == "little":
scale = -scale
file.write("%f\n".encode() % scale)
image.tofile(file)
|
banmo-main
|
third_party/ext_utils/util_flow.py
|
from setuptools import setup, find_packages
from torch.utils.cpp_extension import BuildExtension, CUDAExtension
CUDA_FLAGS = []
gencodes = [
'-gencode', 'arch=compute_52,code=sm_52',
'-gencode', 'arch=compute_60,code=sm_60',
'-gencode', 'arch=compute_61,code=sm_61',
'-gencode', 'arch=compute_70,code=sm_70',
'-gencode', 'arch=compute_75,code=sm_75',
'-gencode', 'arch=compute_75,code=compute_75',]
extra_compile_flags = {'cxx': [], 'nvcc': []}
extra_compile_flags['nvcc'] += gencodes
ext_modules=[
CUDAExtension('soft_renderer.cuda.load_textures', [
'soft_renderer/cuda/load_textures_cuda.cpp',
'soft_renderer/cuda/load_textures_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.create_texture_image', [
'soft_renderer/cuda/create_texture_image_cuda.cpp',
'soft_renderer/cuda/create_texture_image_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.soft_rasterize', [
'soft_renderer/cuda/soft_rasterize_cuda.cpp',
'soft_renderer/cuda/soft_rasterize_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
CUDAExtension('soft_renderer.cuda.voxelization', [
'soft_renderer/cuda/voxelization_cuda.cpp',
'soft_renderer/cuda/voxelization_cuda_kernel.cu',
],extra_compile_args=extra_compile_flags,),
]
INSTALL_REQUIREMENTS = ['numpy', 'torch', 'torchvision', 'scikit-image', 'tqdm', 'imageio']
setup(
description='PyTorch implementation of "Soft Rasterizer"',
author='Shichen Liu',
author_email='liushichen95@gmail.com',
license='MIT License',
version='1.0.0',
name='soft_renderer',
packages=['soft_renderer', 'soft_renderer.cuda', 'soft_renderer.functional'],
install_requires=INSTALL_REQUIREMENTS,
ext_modules=ext_modules,
cmdclass = {'build_ext': BuildExtension}
)
|
banmo-main
|
third_party/softras/setup.py
|
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy
import soft_renderer as sr
class Renderer(nn.Module):
def __init__(self, image_size=256, background_color=[0,0,0], near=1, far=100,
anti_aliasing=True, fill_back=True, eps=1e-6,
camera_mode='projection',
P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1],
light_mode='surface',
light_intensity_ambient=0.5, light_color_ambient=[1,1,1],
light_intensity_directionals=0.5, light_color_directionals=[1,1,1],
light_directions=[0,1,0]):
super(Renderer, self).__init__()
# light
self.lighting = sr.Lighting(light_mode,
light_intensity_ambient, light_color_ambient,
light_intensity_directionals, light_color_directionals,
light_directions)
# camera
self.transform = sr.Transform(camera_mode,
P, dist_coeffs, orig_size,
perspective, viewing_angle, viewing_scale,
eye, camera_direction)
# rasterization
self.rasterizer = sr.Rasterizer(image_size, background_color, near, far,
anti_aliasing, fill_back, eps)
def forward(self, mesh, mode=None):
mesh = self.lighting(mesh)
mesh = self.transform(mesh)
return self.rasterizer(mesh, mode)
class SoftRenderer(nn.Module):
def __init__(self, image_size=256, background_color=[0,0,0], near=1, far=100,
anti_aliasing=False, fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface',
camera_mode='projection',
P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1],
light_mode='surface',
light_intensity_ambient=0.5, light_color_ambient=[1,1,1],
light_intensity_directionals=0.5, light_color_directionals=[1,1,1],
light_directions=[0,1,0]):
super(SoftRenderer, self).__init__()
# light
self.lighting = sr.Lighting(light_mode,
light_intensity_ambient, light_color_ambient,
light_intensity_directionals, light_color_directionals,
light_directions)
# camera
self.transform = sr.Transform(camera_mode,
P, dist_coeffs, orig_size,
perspective, viewing_angle, viewing_scale,
eye, camera_direction)
# rasterization
self.rasterizer = sr.SoftRasterizer(image_size, background_color, near, far,
anti_aliasing, fill_back, eps,
sigma_val, dist_func, dist_eps,
gamma_val, aggr_func_rgb, aggr_func_alpha,
texture_type)
def set_sigma(self, sigma):
self.rasterizer.sigma_val = sigma
def set_gamma(self, gamma):
self.rasterizer.gamma_val = gamma
def set_texture_mode(self, mode):
assert mode in ['vertex', 'surface'], 'Mode only support surface and vertex'
self.lighting.light_mode = mode
self.rasterizer.texture_type = mode
def render_mesh(self, mesh, mode=None):
self.set_texture_mode(mesh.texture_type)
mesh = self.lighting(mesh)
mesh = self.transform(mesh)
return self.rasterizer(mesh, mode)
def forward(self, vertices, faces, textures=None, mode=None, texture_type='surface'):
mesh = sr.Mesh(vertices, faces, textures=textures, texture_type=texture_type)
return self.render_mesh(mesh, mode)
|
banmo-main
|
third_party/softras/soft_renderer/renderer.py
|
from . import functional
from .mesh import Mesh
from .renderer import Renderer, SoftRenderer
from .transform import Projection, LookAt, Look, Transform
from .lighting import AmbientLighting, DirectionalLighting, Lighting
from .rasterizer import SoftRasterizer
from .losses import LaplacianLoss, FlattenLoss
__version__ = '1.0.0'
|
banmo-main
|
third_party/softras/soft_renderer/__init__.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import soft_renderer.functional as srf
class Mesh(object):
'''
A simple class for creating and manipulating trimesh objects
'''
def __init__(self, vertices, faces, textures=None, texture_res=1, texture_type='surface'):
'''
vertices, faces and textures(if not None) are expected to be Tensor objects
'''
self._vertices = vertices
self._faces = faces
if isinstance(self._vertices, np.ndarray):
self._vertices = torch.from_numpy(self._vertices).float().cuda()
if isinstance(self._faces, np.ndarray):
self._faces = torch.from_numpy(self._faces).int().cuda()
if self._vertices.ndimension() == 2:
self._vertices = self._vertices[None, :, :]
if self._faces.ndimension() == 2:
self._faces = self._faces[None, :, :]
self.device = self._vertices.device
self.texture_type = texture_type
self.batch_size = self._vertices.shape[0]
self.num_vertices = self._vertices.shape[1]
self.num_faces = self._faces.shape[1]
self._face_vertices = None
self._face_vertices_update = True
self._surface_normals = None
self._surface_normals_update = True
self._vertex_normals = None
self._vertex_normals_update = True
self._fill_back = False
# create textures
if textures is None:
if texture_type == 'surface':
self._textures = torch.ones(self.batch_size, self.num_faces, texture_res**2, 3,
dtype=torch.float32).to(self.device)
self.texture_res = texture_res
elif texture_type == 'vertex':
self._textures = torch.ones(self.batch_size, self.num_vertices, 3,
dtype=torch.float32).to(self.device)
self.texture_res = 1
else:
if isinstance(textures, np.ndarray):
textures = torch.from_numpy(textures).float().cuda()
if textures.ndimension() == 3 and texture_type == 'surface':
textures = textures[None, :, :, :]
if textures.ndimension() == 2 and texture_type == 'vertex':
textures = textures[None, :, :]
self._textures = textures
self.texture_res = int(np.sqrt(self._textures.shape[2]))
self._origin_vertices = self._vertices
self._origin_faces = self._faces
self._origin_textures = self._textures
@property
def faces(self):
return self._faces
@faces.setter
def faces(self, faces):
# need check tensor
self._faces = faces
self.num_faces = self._faces.shape[1]
self._face_vertices_update = True
self._surface_normals_update = True
self._vertex_normals_update = True
@property
def vertices(self):
return self._vertices
@vertices.setter
def vertices(self, vertices):
# need check tensor
self._vertices = vertices
self.num_vertices = self._vertices.shape[1]
self._face_vertices_update = True
self._surface_normals_update = True
self._vertex_normals_update = True
@property
def textures(self):
return self._textures
@textures.setter
def textures(self, textures):
# need check tensor
self._textures = textures
@property
def face_vertices(self):
if self._face_vertices_update:
self._face_vertices = srf.face_vertices(self.vertices, self.faces)
self._face_vertices_update = False
return self._face_vertices
@property
def surface_normals(self):
if self._surface_normals_update:
v10 = self.face_vertices[:, :, 0] - self.face_vertices[:, :, 1]
v12 = self.face_vertices[:, :, 2] - self.face_vertices[:, :, 1]
self._surface_normals = F.normalize(torch.cross(v12, v10), p=2, dim=2, eps=1e-6)
self._surface_normals_update = False
return self._surface_normals
@property
def vertex_normals(self):
if self._vertex_normals_update:
self._vertex_normals = srf.vertex_normals(self.vertices, self.faces)
self._vertex_normals_update = False
return self._vertex_normals
@property
def face_textures(self):
if self.texture_type in ['surface']:
return self.textures
elif self.texture_type in ['vertex']:
return srf.face_vertices(self.textures, self.faces)
else:
raise ValueError('texture type not applicable')
def fill_back_(self):
if not self._fill_back:
self.faces = torch.cat((self.faces, self.faces[:, :, [2, 1, 0]]), dim=1)
self.textures = torch.cat((self.textures, self.textures), dim=1)
self._fill_back = True
def reset_(self):
self.vertices = self._origin_vertices
self.faces = self._origin_faces
self.textures = self._origin_textures
self._fill_back = False
@classmethod
def from_obj(cls, filename_obj, normalization=False, load_texture=False, texture_res=1, texture_type='surface'):
'''
Create a Mesh object from a .obj file
'''
if load_texture:
vertices, faces, textures = srf.load_obj(filename_obj,
normalization=normalization,
texture_res=texture_res,
load_texture=True,
texture_type=texture_type)
else:
vertices, faces = srf.load_obj(filename_obj,
normalization=normalization,
texture_res=texture_res,
load_texture=False)
textures = None
return cls(vertices, faces, textures, texture_res, texture_type)
def save_obj(self, filename_obj, save_texture=False, texture_res_out=16):
if self.batch_size != 1:
raise ValueError('Could not save when batch size >= 1')
if save_texture:
srf.save_obj(filename_obj, self.vertices[0], self.faces[0],
textures=self.textures[0],
texture_res=texture_res_out, texture_type=self.texture_type)
else:
srf.save_obj(filename_obj, self.vertices[0], self.faces[0], textures=None)
def voxelize(self, voxel_size=32):
face_vertices_norm = self.face_vertices * voxel_size / (voxel_size - 1) + 0.5
return srf.voxelization(face_vertices_norm, voxel_size, False)
|
banmo-main
|
third_party/softras/soft_renderer/mesh.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import soft_renderer.functional as srf
class AmbientLighting(nn.Module):
def __init__(self, light_intensity=0.5, light_color=(1,1,1)):
super(AmbientLighting, self).__init__()
self.light_intensity = light_intensity
self.light_color = light_color
def forward(self, light):
return srf.ambient_lighting(light, self.light_intensity, self.light_color)
class DirectionalLighting(nn.Module):
def __init__(self, light_intensity=0.5, light_color=(1,1,1), light_direction=(0,1,0)):
super(DirectionalLighting, self).__init__()
self.light_intensity = light_intensity
self.light_color = light_color
self.light_direction = light_direction
def forward(self, light, normals):
return srf.directional_lighting(light, normals,
self.light_intensity, self.light_color,
self.light_direction)
class Lighting(nn.Module):
def __init__(self, light_mode='surface',
intensity_ambient=0.5, color_ambient=[1,1,1],
intensity_directionals=0.5, color_directionals=[1,1,1],
directions=[0,1,0]):
super(Lighting, self).__init__()
if light_mode not in ['surface', 'vertex']:
raise ValueError('Lighting mode only support surface and vertex')
self.light_mode = light_mode
self.ambient = AmbientLighting(intensity_ambient, color_ambient)
self.directionals = nn.ModuleList([DirectionalLighting(intensity_directionals,
color_directionals,
directions)])
def forward(self, mesh):
if self.light_mode == 'surface':
light = torch.zeros_like(mesh.faces, dtype=torch.float32).to(mesh.device)
light = light.contiguous()
light = self.ambient(light)
for directional in self.directionals:
light = directional(light, mesh.surface_normals)
mesh.textures = mesh.textures * light[:, :, None, :]
elif self.light_mode == 'vertex':
light = torch.zeros_like(mesh.vertices, dtype=torch.float32).to(mesh.device)
light = light.contiguous()
light = self.ambient(light)
for directional in self.directionals:
light = directional(light, mesh.vertex_normals)
mesh.textures = mesh.textures * light
return mesh
|
banmo-main
|
third_party/softras/soft_renderer/lighting.py
|
import math
import numpy as np
import torch
import torch.nn as nn
import soft_renderer.functional as srf
class Projection(nn.Module):
def __init__(self, P, dist_coeffs=None, orig_size=512):
super(Projection, self).__init__()
self.P = P
self.dist_coeffs = dist_coeffs
self.orig_size = orig_size
if isinstance(self.P, np.ndarray):
self.P = torch.from_numpy(self.P).cuda()
if self.P is None or self.P.ndimension() != 3 or self.P.shape[1] != 3 or self.P.shape[2] != 4:
raise ValueError('You need to provide a valid (batch_size)x3x4 projection matrix')
if dist_coeffs is None:
self.dist_coeffs = torch.cuda.FloatTensor([[0., 0., 0., 0., 0.]]).repeat(self.P.shape[0], 1)
def forward(self, vertices):
vertices = srf.projection(vertices, self.P, self.dist_coeffs, self.orig_size)
return vertices
class LookAt(nn.Module):
def __init__(self, perspective=True, viewing_angle=30, viewing_scale=1.0, eye=None):
super(LookAt, self).__init__()
self.perspective = perspective
self.viewing_angle = viewing_angle
self.viewing_scale = viewing_scale
self._eye = eye
if self._eye is None:
self._eye = [0, 0, -(1. / math.tan(math.radians(self.viewing_angle)) + 1)]
def forward(self, vertices):
vertices = srf.look_at(vertices, self._eye)
# perspective transformation
if self.perspective:
vertices = srf.perspective(vertices, angle=self.viewing_angle)
else:
vertices = srf.orthogonal(vertices, scale=self.viewing_scale)
return vertices
class Look(nn.Module):
def __init__(self, camera_direction=[0,0,1], perspective=True, viewing_angle=30, viewing_scale=1.0, eye=None):
super(Look, self).__init__()
self.perspective = perspective
self.viewing_angle = viewing_angle
self.viewing_scale = viewing_scale
self._eye = eye
self.camera_direction = camera_direction
if self._eye is None:
self._eye = [0, 0, -(1. / math.tan(math.radians(self.viewing_angle)) + 1)]
def forward(self, vertices):
vertices = srf.look(vertices, self._eye, self.camera_direction)
# perspective transformation
if self.perspective:
vertices = srf.perspective(vertices, angle=self.viewing_angle)
else:
vertices = srf.orthogonal(vertices, scale=self.viewing_scale)
return vertices
class Transform(nn.Module):
def __init__(self, camera_mode='projection', P=None, dist_coeffs=None, orig_size=512,
perspective=True, viewing_angle=30, viewing_scale=1.0,
eye=None, camera_direction=[0,0,1]):
super(Transform, self).__init__()
self.camera_mode = camera_mode
if self.camera_mode == 'projection':
self.transformer = Projection(P, dist_coeffs, orig_size)
elif self.camera_mode == 'look':
self.transformer = Look(perspective, viewing_angle, viewing_scale, eye, camera_direction)
elif self.camera_mode == 'look_at':
self.transformer = LookAt(perspective, viewing_angle, viewing_scale, eye)
else:
raise ValueError('Camera mode has to be one of projection, look or look_at')
def forward(self, mesh):
mesh.vertices = self.transformer(mesh.vertices)
return mesh
def set_eyes_from_angles(self, distances, elevations, azimuths):
if self.camera_mode not in ['look', 'look_at']:
raise ValueError('Projection does not need to set eyes')
self.transformer._eye = srf.get_points_from_angles(distances, elevations, azimuths)
def set_eyes(self, eyes):
if self.camera_mode not in ['look', 'look_at']:
raise ValueError('Projection does not need to set eyes')
self.transformer._eye = eyes
@property
def eyes(self):
return self.transformer._eyes
|
banmo-main
|
third_party/softras/soft_renderer/transform.py
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import soft_renderer.functional as srf
class SoftRasterizer(nn.Module):
def __init__(self, image_size=256, background_color=[0, 0, 0], near=1, far=100,
anti_aliasing=False, fill_back=False, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
super(SoftRasterizer, self).__init__()
if dist_func not in ['hard', 'euclidean', 'barycentric']:
raise ValueError('Distance function only support hard, euclidean and barycentric')
if aggr_func_rgb not in ['hard', 'softmax']:
raise ValueError('Aggregate function(rgb) only support hard and softmax')
if aggr_func_alpha not in ['hard', 'prod', 'sum']:
raise ValueError('Aggregate function(a) only support hard, prod and sum')
if texture_type not in ['surface', 'vertex']:
raise ValueError('Texture type only support surface and vertex')
self.image_size = image_size
self.background_color = background_color
self.near = near
self.far = far
self.anti_aliasing = anti_aliasing
self.eps = eps
self.fill_back = fill_back
self.sigma_val = sigma_val
self.dist_func = dist_func
self.dist_eps = dist_eps
self.gamma_val = gamma_val
self.aggr_func_rgb = aggr_func_rgb
self.aggr_func_alpha = aggr_func_alpha
self.texture_type = texture_type
def forward(self, mesh, mode=None):
image_size = self.image_size * (2 if self.anti_aliasing else 1)
images = srf.soft_rasterize(mesh.face_vertices, mesh.face_textures, image_size,
self.background_color, self.near, self.far,
self.fill_back, self.eps,
self.sigma_val, self.dist_func, self.dist_eps,
self.gamma_val, self.aggr_func_rgb, self.aggr_func_alpha,
self.texture_type)
if self.anti_aliasing:
images = F.avg_pool2d(images, kernel_size=2, stride=2)
return images
|
banmo-main
|
third_party/softras/soft_renderer/rasterizer.py
|
import torch
import torch.nn as nn
import numpy as np
class LaplacianLoss(nn.Module):
def __init__(self, vertex, faces, average=False):
super(LaplacianLoss, self).__init__()
self.nv = vertex.size(0)
self.nf = faces.size(0)
self.average = average
laplacian = np.zeros([self.nv, self.nv]).astype(np.float32)
laplacian[faces[:, 0], faces[:, 1]] = -1
laplacian[faces[:, 1], faces[:, 0]] = -1
laplacian[faces[:, 1], faces[:, 2]] = -1
laplacian[faces[:, 2], faces[:, 1]] = -1
laplacian[faces[:, 2], faces[:, 0]] = -1
laplacian[faces[:, 0], faces[:, 2]] = -1
r, c = np.diag_indices(laplacian.shape[0])
laplacian[r, c] = -laplacian.sum(1)
for i in range(self.nv):
laplacian[i, :] /= laplacian[i, i]
self.register_buffer('laplacian', torch.from_numpy(laplacian))
def forward(self, x):
batch_size = x.size(0)
x = torch.matmul(self.laplacian, x)
dims = tuple(range(x.ndimension())[1:])
x = x.pow(2).sum(dims)
if self.average:
return x.sum() / batch_size
else:
return x
class FlattenLoss(nn.Module):
def __init__(self, faces, average=False):
super(FlattenLoss, self).__init__()
self.nf = faces.size(0)
self.average = average
faces = faces.detach().cpu().numpy()
vertices = list(set([tuple(v) for v in np.sort(np.concatenate((faces[:, 0:2], faces[:, 1:3]), axis=0))]))
v0s = np.array([v[0] for v in vertices], 'int32')
v1s = np.array([v[1] for v in vertices], 'int32')
v2s = []
v3s = []
for v0, v1 in zip(v0s, v1s):
count = 0
for face in faces:
if v0 in face and v1 in face:
v = np.copy(face)
v = v[v != v0]
v = v[v != v1]
if count == 0:
v2s.append(int(v[0]))
count += 1
else:
v3s.append(int(v[0]))
v2s = np.array(v2s, 'int32')
v3s = np.array(v3s, 'int32')
self.register_buffer('v0s', torch.from_numpy(v0s).long())
self.register_buffer('v1s', torch.from_numpy(v1s).long())
self.register_buffer('v2s', torch.from_numpy(v2s).long())
self.register_buffer('v3s', torch.from_numpy(v3s).long())
def forward(self, vertices, eps=1e-6):
# make v0s, v1s, v2s, v3s
batch_size = vertices.size(0)
v0s = vertices[:, self.v0s, :]
v1s = vertices[:, self.v1s, :]
v2s = vertices[:, self.v2s, :]
v3s = vertices[:, self.v3s, :]
a1 = v1s - v0s
b1 = v2s - v0s
a1l2 = a1.pow(2).sum(-1)
b1l2 = b1.pow(2).sum(-1)
a1l1 = (a1l2 + eps).sqrt()
b1l1 = (b1l2 + eps).sqrt()
ab1 = (a1 * b1).sum(-1)
cos1 = ab1 / (a1l1 * b1l1 + eps)
sin1 = (1 - cos1.pow(2) + eps).sqrt()
c1 = a1 * (ab1 / (a1l2 + eps))[:, :, None]
cb1 = b1 - c1
cb1l1 = b1l1 * sin1
a2 = v1s - v0s
b2 = v3s - v0s
a2l2 = a2.pow(2).sum(-1)
b2l2 = b2.pow(2).sum(-1)
a2l1 = (a2l2 + eps).sqrt()
b2l1 = (b2l2 + eps).sqrt()
ab2 = (a2 * b2).sum(-1)
cos2 = ab2 / (a2l1 * b2l1 + eps)
sin2 = (1 - cos2.pow(2) + eps).sqrt()
c2 = a2 * (ab2 / (a2l2 + eps))[:, :, None]
cb2 = b2 - c2
cb2l1 = b2l1 * sin2
cos = (cb1 * cb2).sum(-1) / (cb1l1 * cb2l1 + eps)
dims = tuple(range(cos.ndimension())[1:])
loss = (cos + 1).pow(2).sum(dims)
if self.average:
return loss.sum() / batch_size
else:
return loss
|
banmo-main
|
third_party/softras/soft_renderer/losses.py
|
banmo-main
|
third_party/softras/soft_renderer/cuda/__init__.py
|
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import soft_renderer.cuda.voxelization as voxelization_cuda
def voxelize_sub1(faces, size, dim):
bs = faces.size(0)
nf = faces.size(1)
if dim == 0:
faces = faces[:, :, :, [2, 1, 0]].contiguous()
elif dim == 1:
faces = faces[:, :, :, [0, 2, 1]].contiguous()
voxels = torch.zeros(bs, size, size, size).int().cuda()
return voxelization_cuda.voxelize_sub1(faces, voxels)[0].transpose(dim + 1, -1)
def voxelize_sub2(faces, size):
bs = faces.size(0)
nf = faces.size(1)
voxels = torch.zeros(bs, size, size, size).int().cuda()
return voxelization_cuda.voxelize_sub2(faces, voxels)[0]
def voxelize_sub3(faces, voxels):
bs = voxels.size(0)
vs = voxels.size(1)
visible = torch.zeros_like(voxels, dtype=torch.int32).cuda()
voxels, visible = voxelization_cuda.voxelize_sub3(faces, voxels, visible)
sum_visible = visible.sum()
while True:
voxels, visible = voxelization_cuda.voxelize_sub4(faces, voxels, visible)
if visible.sum() == sum_visible:
break
else:
sum_visible = visible.sum()
return 1 - visible
def voxelization(faces, size, normalize=False):
faces = faces.clone()
if normalize:
pass
else:
faces *= size
voxels0 = voxelize_sub1(faces, size, 0)
voxels1 = voxelize_sub1(faces, size, 1)
voxels2 = voxelize_sub1(faces, size, 2)
voxels3 = voxelize_sub2(faces, size)
voxels = voxels0 + voxels1 + voxels2 + voxels3
voxels = (voxels > 0).int()
voxels = voxelize_sub3(faces, voxels)
return voxels
|
banmo-main
|
third_party/softras/soft_renderer/functional/voxelization.py
|
import numpy as np
import torch
import torch.nn.functional as F
def look_at(vertices, eye, at=[0, 0, 0], up=[0, 1, 0]):
"""
"Look at" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
# if list or tuple convert to numpy array
if isinstance(at, list) or isinstance(at, tuple):
at = torch.tensor(at, dtype=torch.float32, device=device)
# if numpy array convert to tensor
elif isinstance(at, np.ndarray):
at = torch.from_numpy(at).to(device)
elif torch.is_tensor(at):
at.to(device)
if isinstance(up, list) or isinstance(up, tuple):
up = torch.tensor(up, dtype=torch.float32, device=device)
elif isinstance(up, np.ndarray):
up = torch.from_numpy(up).to(device)
elif torch.is_tensor(up):
up.to(device)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = torch.tensor(eye, dtype=torch.float32, device=device)
elif isinstance(eye, np.ndarray):
eye = torch.from_numpy(eye).to(device)
elif torch.is_tensor(eye):
eye = eye.to(device)
batch_size = vertices.shape[0]
if eye.ndimension() == 1:
eye = eye[None, :].repeat(batch_size, 1)
if at.ndimension() == 1:
at = at[None, :].repeat(batch_size, 1)
if up.ndimension() == 1:
up = up[None, :].repeat(batch_size, 1)
# create new axes
# eps is chosen as 1e-5 to match the chainer version
z_axis = F.normalize(at - eye, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# create rotation matrix: [bs, 3, 3]
r = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, r.transpose(1,2))
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/look_at.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def directional_lighting(light, normals, light_intensity=0.5, light_color=(1,1,1),
light_direction=(0,1,0)):
# normals: [nb, :, 3]
device = light.device
if isinstance(light_color, tuple) or isinstance(light_color, list):
light_color = torch.tensor(light_color, dtype=torch.float32, device=device)
elif isinstance(light_color, np.ndarray):
light_color = torch.from_numpy(light_color).float().to(device)
if isinstance(light_direction, tuple) or isinstance(light_direction, list):
light_direction = torch.tensor(light_direction, dtype=torch.float32, device=device)
elif isinstance(light_direction, np.ndarray):
light_direction = torch.from_numpy(light_direction).float().to(device)
if light_color.ndimension() == 1:
light_color = light_color[None, :]
if light_direction.ndimension() == 1:
light_direction = light_direction[None, :] #[nb, 3]
cosine = F.relu(torch.sum(normals * light_direction, dim=2)) #[]
light += light_intensity * (light_color[:, None, :] * cosine[:, :, None])
return light #[nb, :, 3]
|
banmo-main
|
third_party/softras/soft_renderer/functional/directional_lighting.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
def ambient_lighting(light, light_intensity=0.5, light_color=(1,1,1)):
device = light.device
if isinstance(light_color, tuple) or isinstance(light_color, list):
light_color = torch.tensor(light_color, dtype=torch.float32, device=device)
elif isinstance(light_color, np.ndarray):
light_color = torch.from_numpy(light_color).float().to(device)
if light_color.ndimension() == 1:
light_color = light_color[None, :]
light += light_intensity * light_color[:, None, :]
return light #[nb, :, 3]
|
banmo-main
|
third_party/softras/soft_renderer/functional/ambient_lighting.py
|
import math
import torch
def perspective(vertices, angle=30.):
'''
Compute perspective distortion from a given angle
'''
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
angle = torch.tensor(angle / 180 * math.pi, dtype=torch.float32, device=device)
angle = angle[None]
width = torch.tan(angle)
width = width[:, None]
z = vertices[:, :, 2]
x = vertices[:, :, 0] / z / width
y = vertices[:, :, 1] / z / width
vertices = torch.stack((x,y,z), dim=2)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/perspective.py
|
import os
import torch
import numpy as np
from skimage.io import imread
import soft_renderer.cuda.load_textures as load_textures_cuda
def load_mtl(filename_mtl):
'''
load color (Kd) and filename of textures from *.mtl
'''
texture_filenames = {}
colors = {}
material_name = ''
with open(filename_mtl) as f:
for line in f.readlines():
if len(line.split()) != 0:
if line.split()[0] == 'newmtl':
material_name = line.split()[1]
if line.split()[0] == 'map_Kd':
texture_filenames[material_name] = line.split()[1]
if line.split()[0] == 'Kd':
colors[material_name] = np.array(list(map(float, line.split()[1:4])))
return colors, texture_filenames
def load_textures(filename_obj, filename_mtl, texture_res):
# load vertices
vertices = []
with open(filename_obj) as f:
lines = f.readlines()
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'vt':
vertices.append([float(v) for v in line.split()[1:3]])
vertices = np.vstack(vertices).astype(np.float32)
# load faces for textures
faces = []
material_names = []
material_name = ''
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'f':
vs = line.split()[1:]
nv = len(vs)
if '/' in vs[0] and '//' not in vs[0]:
v0 = int(vs[0].split('/')[1])
else:
v0 = 0
for i in range(nv - 2):
if '/' in vs[i + 1] and '//' not in vs[i + 1]:
v1 = int(vs[i + 1].split('/')[1])
else:
v1 = 0
if '/' in vs[i + 2] and '//' not in vs[i + 2]:
v2 = int(vs[i + 2].split('/')[1])
else:
v2 = 0
faces.append((v0, v1, v2))
material_names.append(material_name)
if line.split()[0] == 'usemtl':
material_name = line.split()[1]
faces = np.vstack(faces).astype(np.int32) - 1
faces = vertices[faces]
faces = torch.from_numpy(faces).cuda()
faces[1 < faces] = faces[1 < faces] % 1
colors, texture_filenames = load_mtl(filename_mtl)
textures = torch.ones(faces.shape[0], texture_res**2, 3, dtype=torch.float32)
textures = textures.cuda()
#
for material_name, color in list(colors.items()):
color = torch.from_numpy(color).cuda()
for i, material_name_f in enumerate(material_names):
if material_name == material_name_f:
textures[i, :, :] = color[None, :]
for material_name, filename_texture in list(texture_filenames.items()):
filename_texture = os.path.join(os.path.dirname(filename_obj), filename_texture)
image = imread(filename_texture).astype(np.float32) / 255.
# texture image may have one channel (grey color)
if len(image.shape) == 2:
image = np.stack((image,)*3, -1)
# or has extral alpha channel shoule ignore for now
if image.shape[2] == 4:
image = image[:, :, :3]
# pytorch does not support negative slicing for the moment
image = image[::-1, :, :]
image = torch.from_numpy(image.copy()).cuda()
is_update = (np.array(material_names) == material_name).astype(np.int32)
is_update = torch.from_numpy(is_update).cuda()
textures = load_textures_cuda.load_textures(image, faces, textures, is_update)
return textures
def load_obj(filename_obj, normalization=False, load_texture=False, texture_res=4, texture_type='surface'):
"""
Load Wavefront .obj file.
This function only supports vertices (v x x x) and faces (f x x x).
"""
assert texture_type in ['surface', 'vertex']
# load vertices
vertices = []
with open(filename_obj) as f:
lines = f.readlines()
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
vertices.append([float(v) for v in line.split()[1:4]])
vertices = torch.from_numpy(np.vstack(vertices).astype(np.float32)).cuda()
# load faces
faces = []
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'f':
vs = line.split()[1:]
nv = len(vs)
v0 = int(vs[0].split('/')[0])
for i in range(nv - 2):
v1 = int(vs[i + 1].split('/')[0])
v2 = int(vs[i + 2].split('/')[0])
faces.append((v0, v1, v2))
faces = torch.from_numpy(np.vstack(faces).astype(np.int32)).cuda() - 1
# load textures
if load_texture and texture_type == 'surface':
textures = None
for line in lines:
if line.startswith('mtllib'):
filename_mtl = os.path.join(os.path.dirname(filename_obj), line.split()[1])
textures = load_textures(filename_obj, filename_mtl, texture_res)
if textures is None:
raise Exception('Failed to load textures.')
elif load_texture and texture_type == 'vertex':
textures = []
for line in lines:
if len(line.split()) == 0:
continue
if line.split()[0] == 'v':
textures.append([float(v) for v in line.split()[4:7]])
textures = torch.from_numpy(np.vstack(textures).astype(np.float32)).cuda()
# normalize into a unit cube centered zero
if normalization:
vertices -= vertices.min(0)[0][None, :]
vertices /= torch.abs(vertices).max()
vertices *= 2
vertices -= vertices.max(0)[0][None, :] / 2
if load_texture:
return vertices, faces, textures
else:
return vertices, faces
|
banmo-main
|
third_party/softras/soft_renderer/functional/load_obj.py
|
import torch
def face_vertices(vertices, faces):
"""
:param vertices: [batch size, number of vertices, 3]
:param faces: [batch size, number of faces, 3]
:return: [batch size, number of faces, 3, 3]
"""
assert (vertices.ndimension() == 3)
assert (faces.ndimension() == 3)
assert (vertices.shape[0] == faces.shape[0])
assert (vertices.shape[2] == 3)
assert (faces.shape[2] == 3)
bs, nv = vertices.shape[:2]
bs, nf = faces.shape[:2]
device = vertices.device
faces = faces + (torch.arange(bs, dtype=torch.int32).to(device) * nv)[:, None, None]
vertices = vertices.reshape((bs * nv, 3))
# pytorch only supports long and byte tensors for indexing
return vertices[faces.long()]
|
banmo-main
|
third_party/softras/soft_renderer/functional/face_vertices.py
|
import numpy as np
import torch
import torch.nn.functional as F
def look(vertices, eye, direction=[0, 1, 0], up=None):
"""
"Look" transformation of vertices.
"""
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
device = vertices.device
if isinstance(direction, list) or isinstance(direction, tuple):
direction = torch.tensor(direction, dtype=torch.float32, device=device)
elif isinstance(direction, np.ndarray):
direction = torch.from_numpy(direction).to(device)
elif torch.is_tensor(direction):
direction.to(device)
if isinstance(eye, list) or isinstance(eye, tuple):
eye = torch.tensor(eye, dtype=torch.float32, device=device)
elif isinstance(eye, np.ndarray):
eye = torch.from_numpy(eye).to(device)
elif torch.is_tensor(eye):
eye = eye.to(device)
if eye.ndimension() == 1:
eye = eye[None, :]
if direction.ndimension() == 1:
direction = direction[None, :]
if up.ndimension() == 1:
up = up[None, :]
# create new axes
z_axis = F.normalize(direction, eps=1e-5)
x_axis = F.normalize(torch.cross(up, z_axis), eps=1e-5)
y_axis = F.normalize(torch.cross(z_axis, x_axis), eps=1e-5)
# create rotation matrix: [bs, 3, 3]
r = torch.cat((x_axis[:, None, :], y_axis[:, None, :], z_axis[:, None, :]), dim=1)
# apply
# [bs, nv, 3] -> [bs, nv, 3] -> [bs, nv, 3]
if vertices.shape != eye.shape:
eye = eye[:, None, :]
vertices = vertices - eye
vertices = torch.matmul(vertices, r.transpose(1,2))
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/look.py
|
from .get_points_from_angles import get_points_from_angles
from .ambient_lighting import ambient_lighting
from .directional_lighting import directional_lighting
from .load_obj import load_obj
from .look import look
from .look_at import look_at
from .perspective import perspective
from .orthogonal import orthogonal
from .projection import projection
from .soft_rasterize import soft_rasterize
from .save_obj import (save_obj, save_voxel)
from .face_vertices import face_vertices
from .vertex_normals import vertex_normals
from .voxelization import voxelization
|
banmo-main
|
third_party/softras/soft_renderer/functional/__init__.py
|
import os
import torch
from skimage.io import imsave
import soft_renderer.cuda.create_texture_image as create_texture_image_cuda
def create_texture_image(textures, texture_res=16):
num_faces = textures.shape[0]
tile_width = int((num_faces - 1.) ** 0.5) + 1
tile_height = int((num_faces - 1.) / tile_width) + 1
image = torch.ones(tile_height * texture_res, tile_width * texture_res, 3, dtype=torch.float32)
vertices = torch.zeros((num_faces, 3, 2), dtype=torch.float32) # [:, :, UV]
face_nums = torch.arange(num_faces)
column = face_nums % tile_width
row = face_nums // tile_width
vertices[:, 0, 0] = column * texture_res + texture_res / 2
vertices[:, 0, 1] = row * texture_res + 1
vertices[:, 1, 0] = column * texture_res + 1
vertices[:, 1, 1] = (row + 1) * texture_res - 1 - 1
vertices[:, 2, 0] = (column + 1) * texture_res - 1 - 1
vertices[:, 2, 1] = (row + 1) * texture_res - 1 - 1
image = image.cuda()
vertices = vertices.cuda()
textures = textures.cuda()
image = create_texture_image_cuda.create_texture_image(vertices, textures, image, 1e-5)
vertices[:, :, 0] /= (image.shape[1] - 1)
vertices[:, :, 1] /= (image.shape[0] - 1)
image = image.detach().cpu().numpy()
vertices = vertices.detach().cpu().numpy()
image = image[::-1, ::1]
return image, vertices
def save_obj(filename, vertices, faces, textures=None, texture_res=16, texture_type='surface'):
assert vertices.ndimension() == 2
assert faces.ndimension() == 2
assert texture_type in ['surface', 'vertex']
assert texture_res >= 2
if textures is not None and texture_type == 'surface':
filename_mtl = filename[:-4] + '.mtl'
filename_texture = filename[:-4] + '.png'
material_name = 'material_1'
texture_image, vertices_textures = create_texture_image(textures, texture_res)
texture_image = texture_image.clip(0, 1)
texture_image = (texture_image * 255).astype('uint8')
imsave(filename_texture, texture_image)
faces = faces.detach().cpu().numpy()
with open(filename, 'w') as f:
f.write('# %s\n' % os.path.basename(filename))
f.write('#\n')
f.write('\n')
if textures is not None and texture_type == 'surface':
f.write('mtllib %s\n\n' % os.path.basename(filename_mtl))
if textures is not None and texture_type == 'vertex':
for vertex, color in zip(vertices, textures):
f.write('v %.8f %.8f %.8f %.8f %.8f %.8f\n' % (vertex[0], vertex[1], vertex[2],
color[0], color[1], color[2]))
f.write('\n')
else:
for vertex in vertices:
f.write('v %.8f %.8f %.8f\n' % (vertex[0], vertex[1], vertex[2]))
f.write('\n')
if textures is not None and texture_type == 'surface':
for vertex in vertices_textures.reshape((-1, 2)):
f.write('vt %.8f %.8f\n' % (vertex[0], vertex[1]))
f.write('\n')
f.write('usemtl %s\n' % material_name)
for i, face in enumerate(faces):
f.write('f %d/%d %d/%d %d/%d\n' % (
face[0] + 1, 3 * i + 1, face[1] + 1, 3 * i + 2, face[2] + 1, 3 * i + 3))
f.write('\n')
else:
for face in faces:
f.write('f %d %d %d\n' % (face[0] + 1, face[1] + 1, face[2] + 1))
if textures is not None and texture_type == 'surface':
with open(filename_mtl, 'w') as f:
f.write('newmtl %s\n' % material_name)
f.write('map_Kd %s\n' % os.path.basename(filename_texture))
def save_voxel(filename, voxel):
vertices = []
for i in range(voxel.shape[0]):
for j in range(voxel.shape[1]):
for k in range(voxel.shape[2]):
if voxel[i, j, k] == 1:
vertices.append([i / voxel.shape[0], j / voxel.shape[1], k / voxel.shape[2]])
vertices = torch.autograd.Variable(torch.tensor(vertices))
return save_obj(filename, vertices, torch.autograd.Variable(torch.tensor([])))
|
banmo-main
|
third_party/softras/soft_renderer/functional/save_obj.py
|
import math
import torch
def get_points_from_angles(distance, elevation, azimuth, degrees=True):
if isinstance(distance, float) or isinstance(distance, int):
if degrees:
elevation = math.radians(elevation)
azimuth = math.radians(azimuth)
return (
distance * math.cos(elevation) * math.sin(azimuth),
distance * math.sin(elevation),
-distance * math.cos(elevation) * math.cos(azimuth))
else:
if degrees:
elevation = math.pi / 180. * elevation
azimuth = math.pi / 180. * azimuth
#
return torch.stack([
distance * torch.cos(elevation) * torch.sin(azimuth),
distance * torch.sin(elevation),
-distance * torch.cos(elevation) * torch.cos(azimuth)
]).transpose(1,0)
|
banmo-main
|
third_party/softras/soft_renderer/functional/get_points_from_angles.py
|
import torch
def orthogonal(vertices, scale):
'''
Compute orthogonal projection from a given angle
To find equivalent scale to perspective projection
set scale = focal_pixel / object_depth -- to 0~H/W pixel range
= 1 / ( object_depth * tan(half_fov_angle) ) -- to -1~1 pixel range
'''
if (vertices.ndimension() != 3):
raise ValueError('vertices Tensor should have 3 dimensions')
z = vertices[:, :, 2]
x = vertices[:, :, 0] * scale
y = vertices[:, :, 1] * scale
vertices = torch.stack((x,y,z), dim=2)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/orthogonal.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Function
import numpy as np
import soft_renderer.cuda.soft_rasterize as soft_rasterize_cuda
class SoftRasterizeFunction(Function):
@staticmethod
def forward(ctx, face_vertices, textures, image_size=256,
background_color=[0, 0, 0], near=1, far=100,
fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
# face_vertices: [nb, nf, 9]
# textures: [nb, nf, 9]
func_dist_map = {'hard': 0, 'barycentric': 1, 'euclidean': 2}
func_rgb_map = {'hard': 0, 'softmax': 1}
func_alpha_map = {'hard': 0, 'sum': 1, 'prod': 2}
func_map_sample = {'surface': 0, 'vertex': 1}
ctx.image_size = image_size
ctx.background_color = background_color
ctx.near = near
ctx.far = far
ctx.eps = eps
ctx.sigma_val = sigma_val
ctx.gamma_val = gamma_val
ctx.func_dist_type = func_dist_map[dist_func]
ctx.dist_eps = np.log(1. / dist_eps - 1.)
ctx.func_rgb_type = func_rgb_map[aggr_func_rgb]
ctx.func_alpha_type = func_alpha_map[aggr_func_alpha]
ctx.texture_type = func_map_sample[texture_type]
ctx.fill_back = fill_back
face_vertices = face_vertices.clone()
textures = textures.clone()
ctx.device = face_vertices.device
ctx.batch_size, ctx.num_faces = face_vertices.shape[:2]
faces_info = torch.FloatTensor(ctx.batch_size, ctx.num_faces, 9*3).fill_(0.0).to(device=ctx.device) # [inv*9, sym*9, obt*3, 0*6]
aggrs_info = torch.FloatTensor(ctx.batch_size, 2, ctx.image_size, ctx.image_size).fill_(0.0).to(device=ctx.device)
soft_colors = torch.FloatTensor(ctx.batch_size, 4, ctx.image_size, ctx.image_size).fill_(1.0).to(device=ctx.device)
soft_colors[:, 0, :, :] *= background_color[0]
soft_colors[:, 1, :, :] *= background_color[1]
soft_colors[:, 2, :, :] *= background_color[2]
faces_info, aggrs_info, soft_colors = \
soft_rasterize_cuda.forward_soft_rasterize(face_vertices, textures,
faces_info, aggrs_info,
soft_colors,
image_size, near, far, eps,
sigma_val, ctx.func_dist_type, ctx.dist_eps,
gamma_val, ctx.func_rgb_type, ctx.func_alpha_type,
ctx.texture_type, fill_back)
ctx.save_for_backward(face_vertices, textures, soft_colors, faces_info, aggrs_info)
return soft_colors
@staticmethod
def backward(ctx, grad_soft_colors):
#print(grad_soft_colors.dtype)
face_vertices, textures, soft_colors, faces_info, aggrs_info = ctx.saved_tensors
image_size = ctx.image_size
background_color = ctx.background_color
near = ctx.near
far = ctx.far
eps = ctx.eps
sigma_val = ctx.sigma_val
dist_eps = ctx.dist_eps
gamma_val = ctx.gamma_val
func_dist_type = ctx.func_dist_type
func_rgb_type = ctx.func_rgb_type
func_alpha_type = ctx.func_alpha_type
texture_type = ctx.texture_type
fill_back = ctx.fill_back
# grad_faces = torch.zeros_like(face_vertices, dtype=torch.float32).to(ctx.device).contiguous()
# grad_textures = torch.zeros_like(textures, dtype=torch.float32).to(ctx.device).contiguous()
grad_faces = torch.zeros_like(face_vertices,dtype=torch.float32,device=ctx.device)
grad_textures = torch.zeros_like(textures,dtype=torch.float32,device=ctx.device)
grad_soft_colors = grad_soft_colors.contiguous()
grad_faces, grad_textures = \
soft_rasterize_cuda.backward_soft_rasterize(face_vertices, textures, soft_colors,
faces_info, aggrs_info,
grad_faces, grad_textures, grad_soft_colors,
image_size, near, far, eps,
sigma_val, func_dist_type, dist_eps,
gamma_val, func_rgb_type, func_alpha_type,
texture_type, fill_back)
return grad_faces, grad_textures, None, None, None, None, None, None, None, None, None, None, None, None, None
def soft_rasterize(face_vertices, textures, image_size=256,
background_color=[0, 0, 0], near=1, far=100,
fill_back=True, eps=1e-3,
sigma_val=1e-5, dist_func='euclidean', dist_eps=1e-4,
gamma_val=1e-4, aggr_func_rgb='softmax', aggr_func_alpha='prod',
texture_type='surface'):
if face_vertices.device == "cpu":
raise TypeError('Rasterize module supports only cuda Tensors')
return SoftRasterizeFunction.apply(face_vertices, textures, image_size,
background_color, near, far,
fill_back, eps,
sigma_val, dist_func, dist_eps,
gamma_val, aggr_func_rgb, aggr_func_alpha,
texture_type)
|
banmo-main
|
third_party/softras/soft_renderer/functional/soft_rasterize.py
|
import torch
def projection(vertices, P, dist_coeffs, orig_size):
'''
Calculate projective transformation of vertices given a projection matrix
P: 3x4 projection matrix
dist_coeffs: vector of distortion coefficients
orig_size: original size of image captured by the camera
'''
vertices = torch.cat([vertices, torch.ones_like(vertices[:, :, None, 0])], dim=-1)
vertices = torch.bmm(vertices, P.transpose(2,1))
x, y, z = vertices[:, :, 0], vertices[:, :, 1], vertices[:, :, 2]
x_ = x / (z + 1e-5)
y_ = y / (z + 1e-5)
# Get distortion coefficients from vector
k1 = dist_coeffs[:, None, 0]
k2 = dist_coeffs[:, None, 1]
p1 = dist_coeffs[:, None, 2]
p2 = dist_coeffs[:, None, 3]
k3 = dist_coeffs[:, None, 4]
# we use x_ for x' and x__ for x'' etc.
r = torch.sqrt(x_ ** 2 + y_ ** 2)
x__ = x_*(1 + k1*(r**2) + k2*(r**4) + k3*(r**6)) + 2*p1*x_*y_ + p2*(r**2 + 2*x_**2)
y__ = y_*(1 + k1*(r**2) + k2*(r**4) + k3 *(r**6)) + p1*(r**2 + 2*y_**2) + 2*p2*x_*y_
x__ = 2 * (x__ - orig_size / 2.) / orig_size
y__ = 2 * (y__ - orig_size / 2.) / orig_size
vertices = torch.stack([x__,y__,z], dim=-1)
return vertices
|
banmo-main
|
third_party/softras/soft_renderer/functional/projection.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.