python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
from setuptools import setup
from Cython.Build import cythonize
import numpy
# python setup.py build_ext --inplace
setup(
ext_modules=cythonize(
['cooc_count.pyx'],
annotate=True),
include_dirs=[numpy.get_include()]
)
|
coocmap-main
|
fast/setup.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import torch
import random
import numpy as np
from tqdm import tqdm
import os
import matplotlib.pyplot as plt
import sys
sys.path.append("fisher_information_loss")
import models
import dataloading
def recons_attack(model, X, y, lam, link_func):
"""
Runs the Balle et al. GLM attack https://arxiv.org/abs/2201.04845.
"""
def compute_grad(model, X, y):
return ((X @ model.theta).sigmoid() - y)[:, None] * X
n = len(y)
grad = compute_grad(model, X, y)
B1 = (grad.sum(0)[None, :] - grad)[:, 0]
denom = B1 + n * lam * model.theta[0][None]
X_hat = (grad.sum(0)[None, :] - grad + n * lam * model.theta[None, :]) / denom[:, None]
y_hat = link_func(X_hat @ model.theta) + denom
return X_hat, y_hat
def compute_correct_ratio(etas, num_bins, predictions, target):
order = etas.argsort()
bin_size = len(target) // num_bins + 1
bin_accs = []
for prediction in predictions:
prediction = np.array(prediction)
correct = (prediction == target)
bin_accs.append([correct[order[lower:lower + bin_size]].mean()
or lower in range(0, len(correct), bin_size)])
return np.array(bin_accs)
parser = argparse.ArgumentParser(description="Evaluate GLM reconstruction attack.")
parser.add_argument("--data_folder", default="data/", type=str,
help="folder in which to store data")
parser.add_argument("--num_trials", default=10000, type=int,
help="Number of trials")
parser.add_argument("--lam", default=0.01, type=float,
help="regularization parameter for logistic regression")
parser.add_argument("--sigma", default=1e-5, type=float,
help="Gaussian noise parameter for output perturbation")
args = parser.parse_args()
train_data = dataloading.load_dataset(
name="mnist", split="train", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
test_data = dataloading.load_dataset(
name="mnist", split="test", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
train_data['features'] = torch.cat([torch.ones(len(train_data['targets']), 1), train_data['features']], 1)
test_data['features'] = torch.cat([torch.ones(len(test_data['targets']), 1), test_data['features']], 1)
model = models.get_model("logistic")
model.train(train_data, l2=args.lam, weights=None)
true_theta = model.theta.clone()
predictions = model.predict(train_data["features"])
acc = ((predictions == train_data["targets"]).float()).mean()
print(f"Training accuracy of classifier {acc.item():.3f}")
predictions = model.predict(test_data["features"])
acc = ((predictions == test_data["targets"]).float()).mean()
print(f"Test accuracy of classifier {acc.item():.3f}")
J = model.influence_jacobian(train_data)[:, :, 1:-1] / args.sigma
etas = J.pow(2).sum(1).mean(1)
X = train_data["features"]
y = train_data["targets"].float()
n, d = X.size(0), X.size(1) - 1
link_func = torch.sigmoid
X_means = torch.zeros(X.shape)
errors = torch.zeros(len(y))
with torch.no_grad():
print('Running reconstruction attack for %d trials:' % args.num_trials)
for i in tqdm(range(args.num_trials)):
model.theta = true_theta + args.sigma * torch.randn(true_theta.size())
X_hat, y_hat = recons_attack(model, X, y, args.lam, link_func)
X_means += X_hat / args.num_trials
errors += (X_hat[:, 1:] - X[:, 1:]).pow(2).sum(1) / (d * args.num_trials)
X_means = X_means[:, 1:]
# filter out examples that the attack failed on
mask = torch.logical_not(torch.isnan(errors))
etas = etas[mask]
errors = errors[mask]
_, order = etas.reciprocal().sort()
# plot MSE lower bound vs. true MSE
plt.figure(figsize=(8,5))
below_bound = etas.reciprocal() < errors
plt.scatter(etas[below_bound].reciprocal().detach(), errors[below_bound].detach(), s=10)
plt.scatter(etas[torch.logical_not(below_bound)].reciprocal().detach(), errors[torch.logical_not(below_bound)].detach(),
s=10, color='indianred')
plt.plot(np.power(10, np.arange(-5.5, 3, 0.1)), np.power(10, np.arange(-5.5, 3, 0.1)), 'k', label='Lower bound')
plt.axvline(x=1, color='k', linestyle=':')
plt.xticks(fontsize=20)
plt.xlim([1e-6, 1e4])
plt.xlabel('Predicted MSE', fontsize=20)
plt.xscale('log')
plt.yticks(fontsize=20)
plt.ylabel('Recons. attack MSE', fontsize=20)
plt.yscale('log')
plt.legend(loc='lower right', fontsize=20)
os.makedirs("figs", exist_ok=True)
plt.savefig("figs/recons_mse.pdf", bbox_inches="tight")
# plot reconstructed samples
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X[mask][order[i], 1:].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/orig_highest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X_means[mask][order[i]].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/recons_highest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X[mask][order[-i-1], 1:].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/orig_lowest8.pdf", bbox_inches="tight")
plt.figure(figsize=(48, 6))
for i in range(8):
plt.subplot(1, 8, i+1)
plt.imshow(X_means[mask][order[-i-1]].clamp(0, 1).view(28, 28).detach())
plt.axis('off')
plt.savefig("figs/recons_lowest8.pdf", bbox_inches="tight")
|
bounding_data_reconstruction-main
|
mnist_logistic_reconstruction.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import array
import gzip
import logging
import os
from os import path
import struct
import math
import urllib.request
from torchvision import datasets as torch_datasets
from torchvision import transforms
import numpy as np
import numpy.random as npr
from sklearn.decomposition import PCA
_DATA_FOLDER = "data/"
def _download(url, data_folder, filename):
"""
Download a URL to a file in the temporary data directory, if it does not
already exist.
"""
if not path.exists(data_folder):
os.makedirs(data_folder)
out_file = path.join(data_folder, filename)
if not path.isfile(out_file):
urllib.request.urlretrieve(url, out_file)
logging.info(f"Downloaded {url} to {data_folder}")
def _partial_flatten(x):
"""
Flatten all but the first dimension of an ndarray.
"""
return np.reshape(x, (x.shape[0], -1))
def _one_hot(x, k, dtype=np.float32):
"""
Create a one-hot encoding of x of size k.
"""
return np.array(x[:, None] == np.arange(k), dtype)
def mnist_raw(dataset):
"""
Download and parse the raw MNIST dataset.
"""
if dataset == "mnist":
# mirror of http://yann.lecun.com/exdb/mnist/:
base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
elif dataset == "fmnist":
base_url = "http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/"
elif dataset == "kmnist":
base_url = "http://codh.rois.ac.jp/kmnist/dataset/kmnist/"
else:
raise RuntimeError("Unknown dataset: " + dataset)
data_folder = path.join(_DATA_FOLDER, dataset)
def parse_labels(filename):
"""
Parses labels in MNIST raw label file.
"""
with gzip.open(filename, "rb") as fh:
_ = struct.unpack(">II", fh.read(8))
return np.array(array.array("B", fh.read()), dtype=np.uint8)
def parse_images(filename):
"""
Parses images in MNIST raw label file.
"""
with gzip.open(filename, "rb") as fh:
_, num_DATA_FOLDER, rows, cols = struct.unpack(">IIII", fh.read(16))
return np.array(array.array("B", fh.read()), dtype=np.uint8).reshape(
num_DATA_FOLDER, rows, cols
)
# download all MNIST files:
for filename in [
"train-images-idx3-ubyte.gz",
"train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz",
"t10k-labels-idx1-ubyte.gz",
]:
_download(base_url + filename, data_folder, filename)
# parse all images and labels:
train_images = parse_images(path.join(data_folder, "train-images-idx3-ubyte.gz"))
train_labels = parse_labels(path.join(data_folder, "train-labels-idx1-ubyte.gz"))
test_images = parse_images(path.join(data_folder, "t10k-images-idx3-ubyte.gz"))
test_labels = parse_labels(path.join(data_folder, "t10k-labels-idx1-ubyte.gz"))
return train_images, train_labels, test_images, test_labels
def preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims):
if binary:
num_labels = 2
train_mask = np.logical_or(train_labels == 0, train_labels == 1)
test_mask = np.logical_or(test_labels == 0, test_labels == 1)
train_images, train_labels = train_images[train_mask], train_labels[train_mask]
test_images, test_labels = test_images[test_mask], test_labels[test_mask]
else:
num_labels = np.max(test_labels) + 1
train_labels = _one_hot(train_labels, num_labels)
test_labels = _one_hot(test_labels, num_labels)
if pca_dims > 0:
pca = PCA(n_components=pca_dims, svd_solver='full')
pca.fit(train_images)
train_images = pca.transform(train_images)
test_images = pca.transform(test_images)
if normalize:
train_images /= np.linalg.norm(train_images, 2, 1)[:, None]
test_images /= np.linalg.norm(test_images, 2, 1)[:, None]
# permute training data:
if permute_train:
perm = np.random.RandomState(0).permutation(train_images.shape[0])
train_images = train_images[perm]
train_labels = train_labels[perm]
return train_images, train_labels, test_images, test_labels
def mnist(dataset="mnist", binary=False, permute_train=False, normalize=False, pca_dims=0):
"""
Download, parse and process MNIST data to unit scale and one-hot labels.
"""
# obtain raw MNIST data:
train_images, train_labels, test_images, test_labels = mnist_raw(dataset)
# flatten and normalize images, create one-hot labels:
train_images = _partial_flatten(train_images) / np.float32(255.0)
test_images = _partial_flatten(test_images) / np.float32(255.0)
return preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims)
def cifar(dataset="cifar10", binary=False, permute_train=False, normalize=False, pca_dims=0):
data_folder = path.join(_DATA_FOLDER, dataset)
normalizer = transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
train_transforms = transforms.Compose([transforms.ToTensor(), normalizer])
if dataset == "cifar10":
train_set = torch_datasets.CIFAR10(root=data_folder, train=True, transform=train_transforms, download=True)
test_set = torch_datasets.CIFAR10(root=data_folder, train=False, transform=train_transforms, download=True)
elif dataset == "cifar100":
train_set = torch_datasets.CIFAR100(root=data_folder, train=True, transform=train_transforms, download=True)
test_set = torch_datasets.CIFAR100(root=data_folder, train=False, transform=train_transforms, download=True)
train_images = []
train_labels = []
for (x, y) in train_set:
train_images.append(np.rollaxis(x.numpy(), 0, 3).flatten())
train_labels.append(y)
train_images = np.stack(train_images)
train_labels = np.array(train_labels)
test_images = []
test_labels = []
for (x, y) in test_set:
test_images.append(np.rollaxis(x.numpy(), 0, 3).flatten())
test_labels.append(y)
test_images = np.stack(test_images)
test_labels = np.array(test_labels)
return preprocess_data(train_images, train_labels, test_images, test_labels,
binary, permute_train, normalize, pca_dims)
def get_datastream(images, labels, batch_size, permutation=False, last_batch=True):
"""
Returns a data stream of `images` and corresponding `labels` in batches of
size `batch_size`. Also returns the number of batches per epoch, `num_batches`.
To loop through the whole dataset in permuted order, set `permutation` to `True`.
To not return the last batch, set `last_batch` to `False`.
"""
# compute number of batches to return:
num_images = images.shape[0]
def permutation_datastream():
"""
Data stream iterator that returns randomly permuted images until eternity.
"""
while True:
perm = npr.permutation(num_images)
for i in range(num_batches):
batch_idx = perm[i * batch_size : (i + 1) * batch_size]
yield images[batch_idx], labels[batch_idx], batch_idx
def random_sampler_datastream():
"""
Data stream iterator that returns a uniformly random batch of images until eternity.
"""
while True:
batch_idx = npr.permutation(num_images)[:batch_size]
yield images[batch_idx], labels[batch_idx], batch_idx
# return iterator factory:
if permutation:
num_batches = int((math.ceil if last_batch else math.floor)(float(num_images) / float(batch_size)))
return random_sampler_datastream, num_batches
else:
num_complete_batches, leftover = divmod(num_images, batch_size)
num_batches = num_complete_batches + (last_batch and bool(leftover))
return permutation_datastream, num_batches
|
bounding_data_reconstruction-main
|
datasets.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import math
import torch
import numpy as np
import os
import matplotlib.pyplot as plt
import sys
sys.path.append("fisher_information_loss")
import models
import dataloading
parser = argparse.ArgumentParser(description="MNIST training with FIL.")
parser.add_argument("--data_folder", default="data/", type=str,
help="folder in which to store data")
parser.add_argument("--num_trials", default=10, type=int,
help="number of repeated trials")
parser.add_argument("--lam", default=0.01, type=float,
help="l2 regularization parameter")
parser.add_argument("--sigma", default=0.01, type=float,
help="Gaussian noise multiplier")
args = parser.parse_args()
train_data = dataloading.load_dataset(
name="mnist", split="train", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
test_data = dataloading.load_dataset(
name="mnist", split="test", normalize=False,
num_classes=2, root=args.data_folder, regression=False)
n = len(train_data["targets"])
all_etas, all_epsilons, all_rdp_epsilons = [], [], []
for i in range(args.num_trials):
model = models.get_model("logistic")
model.train(train_data, l2=args.lam, weights=None)
# Renyi-DP accounting
rdp_eps = 4 / (n * args.lam * args.sigma) ** 2
# FIL accounting
J = model.influence_jacobian(train_data)[:, :, :-1] / args.sigma
etas = J.pow(2).sum(1).mean(1).sqrt()
print(f"Trial {i+1:d}: RDP epsilon = {rdp_eps:.4f}, Max FIL eta = {etas.max():.4f}")
model.theta = model.theta + args.sigma * torch.randn_like(model.theta)
all_etas.append(etas.detach().numpy())
all_rdp_epsilons.append(rdp_eps)
predictions = model.predict(train_data["features"])
acc = ((predictions == train_data["targets"]).float()).mean()
print(f"Training accuracy of classifier {acc.item():.3f}")
predictions = model.predict(test_data["features"])
acc = ((predictions == test_data["targets"]).float()).mean()
print(f"Test accuracy of classifier {acc.item():.3f}")
all_etas = np.stack(all_etas, 0)
all_rdp_epsilons = np.stack(all_rdp_epsilons, 0)
fil_bound = 1 / np.power(all_etas, 2).mean(0)
rdp_bound = 0.25 / (math.exp(all_rdp_epsilons.mean()) - 1)
plt.figure(figsize=(8,5))
_ = plt.hist(np.log10(fil_bound), bins=100, label='dFIL bound', color='silver', edgecolor='black', linewidth=0.3)
plt.axvline(x=np.log10(rdp_bound), color='k', linestyle='--', label='RDP bound')
plt.axvline(x=0, color='k', linestyle=':')
plt.xlabel('MSE lower bound', fontsize=20)
plt.ylabel('Count', fontsize=20)
plt.xticks(np.arange(-1, 11, 2), labels=['$10^{%d}$' % t for t in np.arange(-1, 11, 2)], fontsize=20)
plt.yticks(fontsize=20)
plt.legend(loc='upper left', fontsize=20)
os.makedirs("figs", exist_ok=True)
plt.savefig("figs/mnist_linear_hist.pdf", bbox_inches="tight")
|
bounding_data_reconstruction-main
|
mnist_logistic_regression.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax
import jax.numpy as jnp
from jax.experimental import stax
DTYPE_MAPPING = {
"float32": "f32",
"float64": "f64",
"int32": "s32",
"int64": "s64",
"uint32": "u32",
"uint64": "u64",
}
def _l2_normalize(x, eps=1e-7):
return x * jax.lax.rsqrt((x ** 2).sum() + eps)
def estimate_spectral_norm(f, input_shape, seed=0, n_steps=20):
input_shape = tuple([1] + [input_shape[i] for i in range(1, len(input_shape))])
rng = jax.random.PRNGKey(seed)
u0 = jax.random.normal(rng, input_shape)
v0 = jnp.zeros_like(f(u0))
def fun(carry, _):
u, v = carry
v, f_vjp = jax.vjp(f, u)
v = _l2_normalize(v)
u, = f_vjp(v)
u = _l2_normalize(u)
return (u, v), None
(u, v), _ = jax.lax.scan(fun, (u0, v0), xs=None, length=n_steps)
return jnp.vdot(v, f(u))
def accuracy(predictions, targets):
"""
Compute accuracy of `predictions` given the associated `targets`.
"""
target_class = jnp.argmax(targets, axis=-1)
predicted_class = jnp.argmax(predictions, axis=-1)
return jnp.mean(predicted_class == target_class)
def get_model(rng, model_name, input_shape, num_labels):
"""
Returns model specified by `model_name`. Model is initialized using the
specified random number generator `rng`.
Optionally, the input image `height` and `width` can be specified as well.
"""
# initialize convolutional network:
if model_name == "cnn":
init_random_params, predict = stax.serial(
stax.Conv(16, (8, 8), padding="SAME", strides=(2, 2)),
stax.Gelu,
stax.AvgPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding="VALID", strides=(2, 2)),
stax.Gelu,
stax.AvgPool((2, 2), (1, 1)),
stax.Flatten,
stax.Dense(32),
stax.Gelu,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "cnn_tanh":
init_random_params, predict = stax.serial(
stax.Conv(16, (8, 8), padding="SAME", strides=(2, 2)),
stax.Tanh,
stax.AvgPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding="VALID", strides=(2, 2)),
stax.Tanh,
stax.AvgPool((2, 2), (1, 1)),
stax.Flatten,
stax.Dense(32),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "cnn_cifar":
init_random_params, predict = stax.serial(
stax.Conv(32, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(32, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Conv(64, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(64, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Conv(128, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.Conv(128, (3, 3), padding="SAME", strides=(1, 1)),
stax.Tanh,
stax.AvgPool((2, 2), (2, 2)),
stax.Flatten,
stax.Dense(128),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
# initialize multi-layer perceptron:
elif model_name == "mlp":
init_random_params, predict = stax.serial(
stax.Dense(256),
stax.Gelu,
stax.Dense(256),
stax.Gelu,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
elif model_name == "mlp_tanh":
init_random_params, predict = stax.serial(
stax.Dense(256),
stax.Tanh,
stax.Dense(256),
stax.Tanh,
stax.Dense(num_labels),
)
_, init_params = init_random_params(rng, input_shape)
# initialize linear model:
elif model_name == "linear":
init_random_params, predict_raw = stax.Dense(num_labels)
def predict(params, inputs):
logits = predict_raw(params, inputs)
return jnp.hstack([logits, jnp.zeros(logits.shape)])
_, init_params = init_random_params(rng, input_shape)
else:
raise ValueError(f"Unknown model: {model_name}")
# return initial model parameters and prediction function:
return init_params, predict
|
bounding_data_reconstruction-main
|
utils.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax.numpy as jnp
import jax.random as jnr
from jax import jit, grad, vmap, nn
from jax.tree_util import tree_flatten, tree_unflatten
import math
def get_loss_func(predict):
"""
Returns the loss function for the specified `predict`ion function.
"""
@jit
def loss(params, inputs, targets):
"""
Multi-class loss entropy loss function for model with parameters `params`
and the specified `inputs` and one-hot `targets`.
"""
predictions = nn.log_softmax(predict(params, inputs))
if predictions.ndim == 1:
return -jnp.sum(predictions * targets)
return -jnp.mean(jnp.sum(predictions * targets, axis=-1))
return loss
def get_grad_func(loss, norm_clip=0, soft_clip=False):
@jit
def clipped_grad(params, inputs, targets):
grads = grad(loss)(params, inputs, targets)
if norm_clip == 0:
return grads
else:
nonempty_grads, tree_def = tree_flatten(grads)
total_grad_norm = jnp.add(jnp.linalg.norm(
[jnp.linalg.norm(neg.ravel()) for neg in nonempty_grads]), 1e-7)
if soft_clip:
divisor = nn.gelu(total_grad_norm / norm_clip - 1) + 1
else:
divisor = jnp.maximum(total_grad_norm / norm_clip, 1.)
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_unflatten(tree_def, normalized_nonempty_grads)
return clipped_grad
def get_update_func(get_params, grad_func, opt_update, norm_clip=0, reshape=True):
"""
Returns the parameter update function for the specified `predict`ion function.
"""
@jit
def update(i, rng, opt_state, batch, sigma, weight_decay):
"""
Function that performs `i`-th model update using the specified `batch` on
optimizer state `opt_state`. Updates are privatized by noise addition
with variance `sigma`.
"""
# compute parameter gradient:
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
params = get_params(opt_state)
multiplier = 1 if norm_clip == 0 else norm_clip
# add noise to gradients:
grads = vmap(grad_func, in_axes=(None, 0, 0))(params, inputs, targets)
grads_flat, grads_treedef = tree_flatten(grads)
grads_flat = [g.sum(0) for g in grads_flat]
rngs = jnr.split(rng, len(grads_flat))
noisy_grads = [
(g + multiplier * sigma * jnr.normal(r, g.shape)) / len(targets)
for r, g in zip(rngs, grads_flat)
]
# weight decay
params_flat, _ = tree_flatten(params)
noisy_grads = [
g + weight_decay * param
for g, param in zip(noisy_grads, params_flat)
]
noisy_grads = tree_unflatten(grads_treedef, noisy_grads)
# perform parameter update:
return opt_update(i, noisy_grads, opt_state)
return update
|
bounding_data_reconstruction-main
|
trainer.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import itertools
import logging
import jax
import jax.numpy as jnp
import jax.random as jnr
import hydra
from jax import grad
from jax.experimental import optimizers
from jax.tree_util import tree_flatten, tree_unflatten
import math
import accountant
import datasets
import trainer
import utils
import time
def batch_predict(predict, params, images, batch_size):
num_images = images.shape[0]
num_batches = int(math.ceil(float(num_images) / float(batch_size)))
predictions = []
for i in range(num_batches):
lower = i * batch_size
upper = min((i+1) * batch_size, num_images)
predictions.append(predict(params, images[lower:upper]))
return jnp.concatenate(predictions)
@hydra.main(config_path="configs", config_name="mnist")
def main(cfg):
# set up random number generator:
logging.info(f"Running using JAX {jax.__version__}...")
rng = jnr.PRNGKey(int(time.time()))
# create dataloader for MNIST dataset:
if cfg.dataset.startswith("cifar"):
num_channels = 3
image_size = 32
train_images, train_labels, test_images, test_labels = datasets.cifar(
dataset=cfg.dataset, binary=cfg.binary, pca_dims=cfg.pca_dims)
else:
num_channels = 1
image_size = 28
train_images, train_labels, test_images, test_labels = datasets.mnist(
dataset=cfg.dataset, binary=cfg.binary, pca_dims=cfg.pca_dims)
logging.info(f"Training set max variance: %.4f" % train_images.var(0).max())
num_samples, d = train_images.shape
num_labels = train_labels.shape[1]
if num_labels == 2:
num_labels = 1
if cfg.model.startswith("cnn"):
assert cfg.pca_dims == 0, f"Cannot use PCA with {cfg.model} model."
image_shape = (-1, image_size, image_size, num_channels)
train_images = jnp.reshape(train_images, image_shape)
test_images = jnp.reshape(test_images, image_shape)
data_stream, num_batches = datasets.get_datastream(
train_images, train_labels, cfg.batch_size
)
batches = data_stream()
# set up model:
if cfg.model.startswith("cnn"):
input_shape = (-1, image_size, image_size, num_channels)
else:
input_shape = (-1, d)
init_params, predict = utils.get_model(rng, cfg.model, input_shape, num_labels)
num_params = sum(p.size for p in tree_flatten(init_params)[0])
# create optimizer:
if cfg.optimizer == "sgd":
opt_init, opt_update, get_params = optimizers.momentum(
cfg.step_size, cfg.momentum_mass
)
elif cfg.optimizer == "adam":
opt_init, opt_update, get_params = optimizers.adam(cfg.step_size)
else:
raise ValueError(f"Unknown optimizer: {cfg.optimizer}")
opt_state = opt_init(init_params)
# get loss function and update functions:
loss = trainer.get_loss_func(predict)
grad_func = trainer.get_grad_func(loss, norm_clip=cfg.norm_clip, soft_clip=True)
update = trainer.get_update_func(
get_params, grad_func, opt_update, norm_clip=cfg.norm_clip,
reshape=cfg.model.startswith("cnn")
)
# get function that computes the Jacobian norms for privacy accounting:
gelu_approx = 1.115
fil_accountant = accountant.get_grad_jacobian_trace_func(
grad_func, get_params, reshape=cfg.model.startswith("cnn"),
label_privacy=cfg.label_privacy
)
dp_accountant = accountant.get_dp_accounting_func(cfg.batch_size, cfg.sigma / gelu_approx)
# compute subsampling factor
if cfg.sigma > 0:
eps = math.sqrt(2 * math.log(1.25 / cfg.delta)) * 2 * gelu_approx / cfg.sigma
q = float(cfg.batch_size) / num_samples
subsampling_factor = q / (q + (1-q) * math.exp(-eps))
else:
subsampling_factor = 0
logging.info(f"Subsampling factor is {subsampling_factor:.4f}")
# train the model:
logging.info(f"Training {cfg.model} model with {num_params} parameters using {cfg.optimizer}...")
etas_squared = jnp.zeros((cfg.num_epochs, train_images.shape[0]))
epsilons = jnp.zeros(cfg.num_epochs)
rdp_epsilons = jnp.zeros(cfg.num_epochs)
train_accs = jnp.zeros(cfg.num_epochs)
test_accs = jnp.zeros(cfg.num_epochs)
num_iters = 0
for epoch in range(cfg.num_epochs):
# perform full training sweep through the data:
itercount = itertools.count()
if epoch > 0:
etas_squared = etas_squared.at[epoch].set(etas_squared[epoch-1])
for batch_counter in range(num_batches):
# get next batch:
num_iters += 1
i = next(itercount)
rng = jnr.fold_in(rng, i)
images, labels, batch_idx = next(batches)
batch = (images, labels)
# update privacy loss:
if cfg.sigma > 0 and cfg.do_accounting:
etas_batch = fil_accountant(rng, opt_state, batch) / cfg.sigma / cfg.norm_clip
etas_squared = etas_squared.at[epoch, batch_idx].add(
subsampling_factor * jnp.power(etas_batch, 2), unique_indices=True
)
# perform private parameter update:
opt_state = update(i, rng, opt_state, batch, cfg.sigma, cfg.weight_decay)
# measure training and test accuracy, and average privacy loss:
params = get_params(opt_state)
spectral_norm = utils.estimate_spectral_norm(lambda x: predict(params, x), input_shape)
train_predictions = batch_predict(predict, params, train_images, cfg.batch_size)
test_predictions = batch_predict(predict, params, test_images, cfg.batch_size)
train_accuracy = utils.accuracy(train_predictions, train_labels)
test_accuracy = utils.accuracy(test_predictions, test_labels)
train_accs = train_accs.at[epoch].set(train_accuracy)
test_accs = test_accs.at[epoch].set(test_accuracy)
params, _ = tree_flatten(params)
params_norm = math.sqrt(sum([jnp.power(p, 2).sum() for p in params]))
if cfg.sigma > 0 and cfg.do_accounting:
median_eta = jnp.median(jnp.sqrt(etas_squared[epoch]))
max_eta = jnp.sqrt(etas_squared[epoch]).max()
delta = 1e-5
epsilon = dp_accountant(num_iters, len(train_labels), delta)
epsilons = epsilons.at[epoch].set(epsilon)
rdp_epsilon = dp_accountant(num_iters, len(train_labels), delta, alpha=2)
rdp_epsilons = rdp_epsilons.at[epoch].set(rdp_epsilon)
# print out progress:
logging.info(f"Epoch {epoch + 1}:")
logging.info(f" -> training accuracy = {train_accuracy:.4f}")
logging.info(f" -> test accuracy = {test_accuracy:.4f}")
logging.info(f" -> parameter norm = {params_norm:.4f}, spectral norm = {spectral_norm:.4f}")
if cfg.sigma > 0 and cfg.do_accounting:
logging.info(f" -> Median FIL privacy loss = {median_eta:.4f}")
logging.info(f" -> Max FIL privacy loss = {max_eta:.4f}")
logging.info(f" -> DP privacy loss = ({epsilon:.4f}, {delta:.2e})")
logging.info(f" -> 2-RDP privacy loss = {rdp_epsilon:.4f}")
etas = jnp.sqrt(etas_squared) if cfg.sigma > 0 and cfg.do_accounting else float("inf")
return etas, epsilons, rdp_epsilons, train_accs, test_accs
# run all the things:
if __name__ == "__main__":
main()
|
bounding_data_reconstruction-main
|
train_classifier.py
|
#!/usr/bin/env python3
#
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import jax.numpy as jnp
import jax.random as jnr
from jax import jit, jvp, vjp, jacrev, vmap, nn
from jax.tree_util import tree_flatten
import trainer
from tensorflow_privacy.privacy.analysis.rdp_accountant import compute_rdp, get_privacy_spent
def get_grad_jacobian_norm_func(grad_func, get_params, method="jvp", reshape=True, label_privacy=False):
"""
Returns a function that computes norm of the Jacobian of the parameter
gradients for the specified `loss` function for an optimizer in which the
`get_params` function returns the model parameters.
"""
# assertions:
assert method in ["jvp", "full"], f"Unknown method: {method}"
@jit
def compute_power_iteration_jvp(params, w, inputs, targets):
"""
Computes a single power iteration via the JVP method. Does not include
Jacobian w.r.t. targets.
"""
# compute JVP of per-example parameter gradient Jacobian with w:
if label_privacy:
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, inputs, x
)
_, w = jvp(perex_grad, (targets,), (w,))
else:
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, x, targets
)
_, w = jvp(perex_grad, (inputs,), (w,))
# compute norm of the JVP:
w_flattened, _ = tree_flatten(w)
norms = [
jnp.power(jnp.reshape(v, (v.shape[0], -1)), 2).sum(axis=1)
for v in w_flattened
]
norms = jnp.sqrt(sum(norms) + 1e-7)
# compute VJP of per-example parameter gradient Jacobian with w:
if label_privacy:
_, f_vjp = vjp(perex_grad, targets)
else:
_, f_vjp = vjp(perex_grad, inputs)
w_out = f_vjp(w)[0]
return norms, w_out
@jit
def compute_power_iteration_full(params, w, inputs, targets):
"""
Computes a single power iteration by computing the full Jacobian and
right-multiplying it. Does not include Jacobian w.r.t. targets.
"""
# compute per-example parameter gradient Jacobian:
J = jacrev(grad_func, 1)(params, inputs, targets)
J_flattened, _ = tree_flatten(J)
# compute JVP with w:
jvp_exact = [(v * w).sum(-1) for v in J_flattened]
# compute norm of the JVP:
norms = [
jnp.power(jnp.reshape(v, (-1, v.shape[-1])), 2).sum(axis=0)
for v in jvp_exact
]
norms = jnp.sqrt(sum(norms))
# compute VJP of per-example parameter gradient Jacobian with w:
vjp_exact = [
J_flattened[i] * jnp.expand_dims(jvp_exact[i], -1)
for i in jnp.arange(len(jvp_exact))
]
w_out = sum(
[jnp.reshape(v, (-1, v.shape[-2], v.shape[-1])).sum(0) for v in vjp_exact]
)
return norms, w_out
@jit
def grad_jacobian_norm(rng, opt_state, batch, num_iters=20):
"""
Computes norm of the Jacobian of the parameter gradients. The function
performs `num_iters` power iterations.
"""
# initialize power iterates:
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
w = jnr.normal(rng, shape=(targets.shape if label_privacy else inputs.shape))
w_norm = jnp.sqrt(jnp.power(w.reshape(w.shape[0], -1), 2).sum(axis=1) + 1e-7)
w = w / jnp.expand_dims(w_norm, tuple(range(1, len(w.shape))))
# perform power iterations:
params = get_params(opt_state)
for i in jnp.arange(num_iters):
if method == "jvp":
norms, w = compute_power_iteration_jvp(params, w, inputs, targets)
elif method == "full":
norms, w = compute_power_iteration_full(params, w, inputs, targets)
w_norm = jnp.sqrt(jnp.power(w.reshape(w.shape[0], -1), 2).sum(axis=1) + 1e-7)
w = w / jnp.expand_dims(w_norm, tuple(range(1, len(w.shape))))
# set nan values to 0 because gradient is 0
norms = jnp.nan_to_num(norms)
return norms
# return the function:
return grad_jacobian_norm
def get_grad_jacobian_trace_func(grad_func, get_params, reshape=True, label_privacy=False):
"""
Returns a function that computes the (square root of the) trace of the Jacobian
of the parameters.
"""
@jit
def grad_jacobian_trace(rng, opt_state, batch, num_iters=50):
params = get_params(opt_state)
inputs, targets = batch
if reshape:
inputs = jnp.expand_dims(inputs, 1)
if label_privacy:
flattened_shape = jnp.reshape(targets, (targets.shape[0], -1)).shape
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, inputs, x
)
else:
flattened_shape = jnp.reshape(inputs, (inputs.shape[0], -1)).shape
perex_grad = lambda x: vmap(grad_func, in_axes=(None, 0, 0))(
params, x, targets
)
num_iters = targets.shape[1] if label_privacy else num_iters
rngs = jnr.split(rng, num_iters)
trace = jnp.zeros(inputs.shape[0])
for i, g in zip(jnp.arange(num_iters), rngs):
indices = jnr.categorical(g, jnp.ones(shape=flattened_shape))
if label_privacy:
indices = i * jnp.ones(flattened_shape[0])
w = jnp.reshape(nn.one_hot(indices, flattened_shape[1]), targets.shape)
_, w = jvp(perex_grad, (targets,), (w,))
else:
indices = jnr.categorical(rng, jnp.ones(shape=flattened_shape))
w = jnp.reshape(nn.one_hot(indices, flattened_shape[1]), inputs.shape)
_, w = jvp(perex_grad, (inputs,), (w,))
# compute norm of the JVP:
w_flattened, _ = tree_flatten(w)
norms = [
jnp.power(jnp.reshape(v, (v.shape[0], -1)), 2).sum(axis=1)
for v in w_flattened
]
trace = trace + sum(norms) / num_iters
# set nan values to 0 because gradient is 0
trace = jnp.nan_to_num(trace)
return jnp.sqrt(trace + 1e-7)
# return the function:
return grad_jacobian_trace
def get_dp_accounting_func(batch_size, sigma):
"""
Returns the (eps, delta)-DP accountant if alpha=None,
or the (alpha, eps)-RDP accountant otherwise.
"""
def compute_epsilon(steps, num_examples, target_delta=1e-5, alpha=None):
if num_examples * target_delta > 1.:
warnings.warn('Your delta might be too high.')
q = batch_size / float(num_examples)
if alpha is None:
orders = list(jnp.linspace(1.1, 10.9, 99)) + list(range(11, 64))
rdp_const = compute_rdp(q, sigma, steps, orders)
eps, _, _ = get_privacy_spent(orders, rdp_const, target_delta=target_delta)
else:
eps = compute_rdp(q, sigma, steps, alpha)
return eps
return compute_epsilon
|
bounding_data_reconstruction-main
|
accountant.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import csv
import logging
import pickle
import numpy as np
import torch
import transformers
import src.slurm
import src.contriever
import src.utils
import src.data
import src.normalize_text
def embed_passages(args, passages, model, tokenizer):
total = 0
allids, allembeddings = [], []
batch_ids, batch_text = [], []
with torch.no_grad():
for k, p in enumerate(passages):
batch_ids.append(p["id"])
if args.no_title or not "title" in p:
text = p["text"]
else:
text = p["title"] + " " + p["text"]
if args.lowercase:
text = text.lower()
if args.normalize_text:
text = src.normalize_text.normalize(text)
batch_text.append(text)
if len(batch_text) == args.per_gpu_batch_size or k == len(passages) - 1:
encoded_batch = tokenizer.batch_encode_plus(
batch_text,
return_tensors="pt",
max_length=args.passage_maxlength,
padding=True,
truncation=True,
)
encoded_batch = {k: v.cuda() for k, v in encoded_batch.items()}
embeddings = model(**encoded_batch)
embeddings = embeddings.cpu()
total += len(batch_ids)
allids.extend(batch_ids)
allembeddings.append(embeddings)
batch_text = []
batch_ids = []
if k % 100000 == 0 and k > 0:
print(f"Encoded passages {total}")
allembeddings = torch.cat(allembeddings, dim=0).numpy()
return allids, allembeddings
def main(args):
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
print(f"Model loaded from {args.model_name_or_path}.", flush=True)
model.eval()
model = model.cuda()
if not args.no_fp16:
model = model.half()
passages = src.data.load_passages(args.passages)
shard_size = len(passages) // args.num_shards
start_idx = args.shard_id * shard_size
end_idx = start_idx + shard_size
if args.shard_id == args.num_shards - 1:
end_idx = len(passages)
passages = passages[start_idx:end_idx]
print(f"Embedding generation for {len(passages)} passages from idx {start_idx} to {end_idx}.")
allids, allembeddings = embed_passages(args, passages, model, tokenizer)
save_file = os.path.join(args.output_dir, args.prefix + f"_{args.shard_id:02d}")
os.makedirs(args.output_dir, exist_ok=True)
print(f"Saving {len(allids)} passage embeddings to {save_file}.")
with open(save_file, mode="wb") as f:
pickle.dump((allids, allembeddings), f)
print(f"Total passages processed {len(allids)}. Written to {save_file}.")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--passages", type=str, default=None, help="Path to passages (.tsv file)")
parser.add_argument("--output_dir", type=str, default="wikipedia_embeddings", help="dir path to save embeddings")
parser.add_argument("--prefix", type=str, default="passages", help="prefix path to save embeddings")
parser.add_argument("--shard_id", type=int, default=0, help="Id of the current shard")
parser.add_argument("--num_shards", type=int, default=1, help="Total number of shards")
parser.add_argument(
"--per_gpu_batch_size", type=int, default=512, help="Batch size for the passage encoder forward pass"
)
parser.add_argument("--passage_maxlength", type=int, default=512, help="Maximum number of tokens in a passage")
parser.add_argument(
"--model_name_or_path", type=str, help="path to directory containing model weights and config file"
)
parser.add_argument("--no_fp16", action="store_true", help="inference in fp32")
parser.add_argument("--no_title", action="store_true", help="title not added to the passage body")
parser.add_argument("--lowercase", action="store_true", help="lowercase text before encoding")
parser.add_argument("--normalize_text", action="store_true", help="lowercase text before encoding")
args = parser.parse_args()
src.slurm.init_distributed_mode(args)
main(args)
|
contriever-main
|
generate_passage_embeddings.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import sys
import argparse
import torch
import logging
import json
import numpy as np
import os
import src.slurm
import src.contriever
import src.beir_utils
import src.utils
import src.dist_utils
import src.contriever
logger = logging.getLogger(__name__)
def main(args):
src.slurm.init_distributed_mode(args)
src.slurm.init_signal_handler()
os.makedirs(args.output_dir, exist_ok=True)
logger = src.utils.init_logger(args)
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
model = model.cuda()
model.eval()
query_encoder = model
doc_encoder = model
logger.info("Start indexing")
metrics = src.beir_utils.evaluate_model(
query_encoder=query_encoder,
doc_encoder=doc_encoder,
tokenizer=tokenizer,
dataset=args.dataset,
batch_size=args.per_gpu_batch_size,
norm_query=args.norm_query,
norm_doc=args.norm_doc,
is_main=src.dist_utils.is_main(),
split="dev" if args.dataset == "msmarco" else "test",
score_function=args.score_function,
beir_dir=args.beir_dir,
save_results_path=args.save_results_path,
lower_case=args.lower_case,
normalize_text=args.normalize_text,
)
if src.dist_utils.is_main():
for key, value in metrics.items():
logger.info(f"{args.dataset} : {key}: {value:.1f}")
if __name__ == "__main__":
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--dataset", type=str, help="Evaluation dataset from the BEIR benchmark")
parser.add_argument("--beir_dir", type=str, default="./", help="Directory to save and load beir datasets")
parser.add_argument("--text_maxlength", type=int, default=512, help="Maximum text length")
parser.add_argument("--per_gpu_batch_size", default=128, type=int, help="Batch size per GPU/CPU for indexing.")
parser.add_argument("--output_dir", type=str, default="./my_experiment", help="Output directory")
parser.add_argument("--model_name_or_path", type=str, help="Model name or path")
parser.add_argument(
"--score_function", type=str, default="dot", help="Metric used to compute similarity between two embeddings"
)
parser.add_argument("--norm_query", action="store_true", help="Normalize query representation")
parser.add_argument("--norm_doc", action="store_true", help="Normalize document representation")
parser.add_argument("--lower_case", action="store_true", help="lowercase query and document text")
parser.add_argument(
"--normalize_text", action="store_true", help="Apply function to normalize some common characters"
)
parser.add_argument("--save_results_path", type=str, default=None, help="Path to save result object")
parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
parser.add_argument("--main_port", type=int, default=-1, help="Main port (for multi-node SLURM jobs)")
args, _ = parser.parse_known_args()
main(args)
|
contriever-main
|
eval_beir.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import argparse
import torch
import transformers
from src.normalize_text import normalize
def save(tensor, split_path):
if not os.path.exists(os.path.dirname(split_path)):
os.makedirs(os.path.dirname(split_path))
with open(split_path, 'wb') as fout:
torch.save(tensor, fout)
def apply_tokenizer(path, tokenizer, normalize_text=False):
alltokens = []
lines = []
with open(path, "r", encoding="utf-8") as fin:
for k, line in enumerate(fin):
if normalize_text:
line = normalize(line)
lines.append(line)
if len(lines) > 1000000:
tokens = tokenizer.batch_encode_plus(lines, add_special_tokens=False)['input_ids']
tokens = [torch.tensor(x, dtype=torch.int) for x in tokens]
alltokens.extend(tokens)
lines = []
tokens = tokenizer.batch_encode_plus(lines, add_special_tokens=False)['input_ids']
tokens = [torch.tensor(x, dtype=torch.int) for x in tokens]
alltokens.extend(tokens)
alltokens = torch.cat(alltokens)
return alltokens
def tokenize_file(args):
filename = os.path.basename(args.datapath)
savepath = os.path.join(args.outdir, f"{filename}.pkl")
if os.path.exists(savepath):
if args.overwrite:
print(f"File {savepath} already exists, overwriting")
else:
print(f"File {savepath} already exists, exiting")
return
try:
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer, local_files_only=True)
except:
tokenizer = transformers.AutoTokenizer.from_pretrained(args.tokenizer, local_files_only=False)
print(f"Encoding {args.datapath}...")
tokens = apply_tokenizer(args.datapath, tokenizer, normalize_text=args.normalize_text)
print(f"Saving at {savepath}...")
save(tokens, savepath)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--datapath", type=str)
parser.add_argument("--outdir", type=str)
parser.add_argument("--tokenizer", type=str)
parser.add_argument("--overwrite", action="store_true")
parser.add_argument("--normalize_text", action="store_true")
args, _ = parser.parse_known_args()
tokenize_file(args)
|
contriever-main
|
preprocess.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import argparse
import json
import logging
import glob
import numpy as np
import torch
import src.utils
from src.evaluation import calculate_matches
logger = logging.getLogger(__name__)
def validate(data, workers_num):
match_stats = calculate_matches(data, workers_num)
top_k_hits = match_stats.top_k_hits
#logger.info('Validation results: top k documents hits %s', top_k_hits)
top_k_hits = [v / len(data) for v in top_k_hits]
#logger.info('Validation results: top k documents hits accuracy %s', top_k_hits)
return top_k_hits
def main(opt):
logger = src.utils.init_logger(opt, stdout_only=True)
datapaths = glob.glob(args.data)
r20, r100 = [], []
for path in datapaths:
data = []
with open(path, 'r') as fin:
for line in fin:
data.append(json.loads(line))
#data = json.load(fin)
answers = [ex['answers'] for ex in data]
top_k_hits = validate(data, args.validation_workers)
message = f"Evaluate results from {path}:"
for k in [5, 10, 20, 100]:
if k <= len(top_k_hits):
recall = 100 * top_k_hits[k-1]
if k == 20:
r20.append(f"{recall:.1f}")
if k == 100:
r100.append(f"{recall:.1f}")
message += f' R@{k}: {recall:.1f}'
logger.info(message)
print(datapaths)
print('\t'.join(r20))
print('\t'.join(r100))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--data', required=True, type=str, default=None)
parser.add_argument('--validation_workers', type=int, default=16,
help="Number of parallel processes to validate results")
args = parser.parse_args()
main(args)
|
contriever-main
|
evaluate_retrieved_passages.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import pdb
import os
import time
import sys
import torch
from torch.utils.tensorboard import SummaryWriter
import logging
import json
import numpy as np
import torch.distributed as dist
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from src.options import Options
from src import data, beir_utils, slurm, dist_utils, utils, contriever, finetuning_data, inbatch
import train
os.environ["TOKENIZERS_PARALLELISM"] = "false"
logger = logging.getLogger(__name__)
def finetuning(opt, model, optimizer, scheduler, tokenizer, step):
run_stats = utils.WeightedAvgStats()
tb_logger = utils.init_tb_logger(opt.output_dir)
if hasattr(model, "module"):
eval_model = model.module
else:
eval_model = model
eval_model = eval_model.get_encoder()
train_dataset = finetuning_data.Dataset(
datapaths=opt.train_data,
negative_ctxs=opt.negative_ctxs,
negative_hard_ratio=opt.negative_hard_ratio,
negative_hard_min_idx=opt.negative_hard_min_idx,
normalize=opt.eval_normalize_text,
global_rank=dist_utils.get_rank(),
world_size=dist_utils.get_world_size(),
maxload=opt.maxload,
training=True,
)
collator = finetuning_data.Collator(tokenizer, passage_maxlength=opt.chunk_length)
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=True,
num_workers=opt.num_workers,
collate_fn=collator,
)
train.eval_model(opt, eval_model, None, tokenizer, tb_logger, step)
evaluate(opt, eval_model, tokenizer, tb_logger, step)
epoch = 1
model.train()
prev_ids, prev_mask = None, None
while step < opt.total_steps:
logger.info(f"Start epoch {epoch}, number of batches: {len(train_dataloader)}")
for i, batch in enumerate(train_dataloader):
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
step += 1
train_loss, iter_stats = model(**batch, stats_prefix="train")
train_loss.backward()
if opt.optim == "sam" or opt.optim == "asam":
optimizer.first_step(zero_grad=True)
sam_loss, _ = model(**batch, stats_prefix="train/sam_opt")
sam_loss.backward()
optimizer.second_step(zero_grad=True)
else:
optimizer.step()
scheduler.step()
optimizer.zero_grad()
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3f}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.3g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
train.eval_model(opt, eval_model, None, tokenizer, tb_logger, step)
evaluate(opt, eval_model, tokenizer, tb_logger, step)
if step % opt.save_freq == 0 and dist_utils.get_rank() == 0:
utils.save(
eval_model,
optimizer,
scheduler,
step,
opt,
opt.output_dir,
f"step-{step}",
)
model.train()
if step >= opt.total_steps:
break
epoch += 1
def evaluate(opt, model, tokenizer, tb_logger, step):
dataset = finetuning_data.Dataset(
datapaths=opt.eval_data,
normalize=opt.eval_normalize_text,
global_rank=dist_utils.get_rank(),
world_size=dist_utils.get_world_size(),
maxload=opt.maxload,
training=False,
)
collator = finetuning_data.Collator(tokenizer, passage_maxlength=opt.chunk_length)
sampler = SequentialSampler(dataset)
dataloader = DataLoader(
dataset,
sampler=sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=False,
num_workers=opt.num_workers,
collate_fn=collator,
)
model.eval()
if hasattr(model, "module"):
model = model.module
correct_samples, total_samples, total_step = 0, 0, 0
all_q, all_g, all_n = [], [], []
with torch.no_grad():
for i, batch in enumerate(dataloader):
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
all_tokens = torch.cat([batch["g_tokens"], batch["n_tokens"]], dim=0)
all_mask = torch.cat([batch["g_mask"], batch["n_mask"]], dim=0)
q_emb = model(input_ids=batch["q_tokens"], attention_mask=batch["q_mask"], normalize=opt.norm_query)
all_emb = model(input_ids=all_tokens, attention_mask=all_mask, normalize=opt.norm_doc)
g_emb, n_emb = torch.split(all_emb, [len(batch["g_tokens"]), len(batch["n_tokens"])])
all_q.append(q_emb)
all_g.append(g_emb)
all_n.append(n_emb)
all_q = torch.cat(all_q, dim=0)
all_g = torch.cat(all_g, dim=0)
all_n = torch.cat(all_n, dim=0)
labels = torch.arange(0, len(all_q), device=all_q.device, dtype=torch.long)
all_sizes = dist_utils.get_varsize(all_g)
all_g = dist_utils.varsize_gather_nograd(all_g)
all_n = dist_utils.varsize_gather_nograd(all_n)
labels = labels + sum(all_sizes[: dist_utils.get_rank()])
scores_pos = torch.einsum("id, jd->ij", all_q, all_g)
scores_neg = torch.einsum("id, jd->ij", all_q, all_n)
scores = torch.cat([scores_pos, scores_neg], dim=-1)
argmax_idx = torch.argmax(scores, dim=1)
sorted_scores, indices = torch.sort(scores, descending=True)
isrelevant = indices == labels[:, None]
rs = [r.cpu().numpy().nonzero()[0] for r in isrelevant]
mrr = np.mean([1.0 / (r[0] + 1) if r.size else 0.0 for r in rs])
acc = (argmax_idx == labels).sum() / all_q.size(0)
acc, total = dist_utils.weighted_average(acc, all_q.size(0))
mrr, _ = dist_utils.weighted_average(mrr, all_q.size(0))
acc = 100 * acc
message = []
if dist_utils.is_main():
message = [f"eval acc: {acc:.2f}%", f"eval mrr: {mrr:.3f}"]
logger.info(" | ".join(message))
if tb_logger is not None:
tb_logger.add_scalar(f"eval_acc", acc, step)
tb_logger.add_scalar(f"mrr", mrr, step)
def main():
logger.info("Start")
options = Options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
directory_exists = os.path.isdir(opt.output_dir)
if dist.is_initialized():
dist.barrier()
os.makedirs(opt.output_dir, exist_ok=True)
if not directory_exists and dist_utils.is_main():
options.print_options(opt)
if dist.is_initialized():
dist.barrier()
utils.init_logger(opt)
step = 0
retriever, tokenizer, retriever_model_id = contriever.load_retriever(opt.model_path, opt.pooling, opt.random_init)
opt.retriever_model_id = retriever_model_id
model = inbatch.InBatch(opt, retriever, tokenizer)
model = model.cuda()
optimizer, scheduler = utils.set_optim(opt, model)
# if dist_utils.is_main():
# utils.save(model, optimizer, scheduler, global_step, 0., opt, opt.output_dir, f"step-{0}")
logger.info(utils.get_parameters(model))
for name, module in model.named_modules():
if isinstance(module, torch.nn.Dropout):
module.p = opt.dropout
if torch.distributed.is_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=False,
)
logger.info("Start training")
finetuning(opt, model, optimizer, scheduler, tokenizer, step)
if __name__ == "__main__":
main()
|
contriever-main
|
finetuning.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import time
import sys
import torch
import logging
import json
import numpy as np
import random
import pickle
import torch.distributed as dist
from torch.utils.data import DataLoader, RandomSampler
from src.options import Options
from src import data, beir_utils, slurm, dist_utils, utils
from src import moco, inbatch
logger = logging.getLogger(__name__)
def train(opt, model, optimizer, scheduler, step):
run_stats = utils.WeightedAvgStats()
tb_logger = utils.init_tb_logger(opt.output_dir)
logger.info("Data loading")
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
tokenizer = model.module.tokenizer
else:
tokenizer = model.tokenizer
collator = data.Collator(opt=opt)
train_dataset = data.load_data(opt, tokenizer)
logger.warning(f"Data loading finished for rank {dist_utils.get_rank()}")
train_sampler = RandomSampler(train_dataset)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=opt.per_gpu_batch_size,
drop_last=True,
num_workers=opt.num_workers,
collate_fn=collator,
)
epoch = 1
model.train()
while step < opt.total_steps:
train_dataset.generate_offset()
logger.info(f"Start epoch {epoch}")
for i, batch in enumerate(train_dataloader):
step += 1
batch = {key: value.cuda() if isinstance(value, torch.Tensor) else value for key, value in batch.items()}
train_loss, iter_stats = model(**batch, stats_prefix="train")
train_loss.backward()
optimizer.step()
scheduler.step()
model.zero_grad()
run_stats.update(iter_stats)
if step % opt.log_freq == 0:
log = f"{step} / {opt.total_steps}"
for k, v in sorted(run_stats.average_stats.items()):
log += f" | {k}: {v:.3f}"
if tb_logger:
tb_logger.add_scalar(k, v, step)
log += f" | lr: {scheduler.get_last_lr()[0]:0.3g}"
log += f" | Memory: {torch.cuda.max_memory_allocated()//1e9} GiB"
logger.info(log)
run_stats.reset()
if step % opt.eval_freq == 0:
if isinstance(model, torch.nn.parallel.DistributedDataParallel):
encoder = model.module.get_encoder()
else:
encoder = model.get_encoder()
eval_model(
opt, query_encoder=encoder, doc_encoder=encoder, tokenizer=tokenizer, tb_logger=tb_logger, step=step
)
if dist_utils.is_main():
utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"lastlog")
model.train()
if dist_utils.is_main() and step % opt.save_freq == 0:
utils.save(model, optimizer, scheduler, step, opt, opt.output_dir, f"step-{step}")
if step > opt.total_steps:
break
epoch += 1
def eval_model(opt, query_encoder, doc_encoder, tokenizer, tb_logger, step):
for datasetname in opt.eval_datasets:
metrics = beir_utils.evaluate_model(
query_encoder,
doc_encoder,
tokenizer,
dataset=datasetname,
batch_size=opt.per_gpu_eval_batch_size,
norm_doc=opt.norm_doc,
norm_query=opt.norm_query,
beir_dir=opt.eval_datasets_dir,
score_function=opt.score_function,
lower_case=opt.lower_case,
normalize_text=opt.eval_normalize_text,
)
message = []
if dist_utils.is_main():
for metric in ["NDCG@10", "Recall@10", "Recall@100"]:
message.append(f"{datasetname}/{metric}: {metrics[metric]:.2f}")
if tb_logger is not None:
tb_logger.add_scalar(f"{datasetname}/{metric}", metrics[metric], step)
logger.info(" | ".join(message))
if __name__ == "__main__":
logger.info("Start")
options = Options()
opt = options.parse()
torch.manual_seed(opt.seed)
slurm.init_distributed_mode(opt)
slurm.init_signal_handler()
directory_exists = os.path.isdir(opt.output_dir)
if dist.is_initialized():
dist.barrier()
os.makedirs(opt.output_dir, exist_ok=True)
if not directory_exists and dist_utils.is_main():
options.print_options(opt)
if dist.is_initialized():
dist.barrier()
utils.init_logger(opt)
os.environ["TOKENIZERS_PARALLELISM"] = "false"
if opt.contrastive_mode == "moco":
model_class = moco.MoCo
elif opt.contrastive_mode == "inbatch":
model_class = inbatch.InBatch
else:
raise ValueError(f"contrastive mode: {opt.contrastive_mode} not recognised")
if not directory_exists and opt.model_path == "none":
model = model_class(opt)
model = model.cuda()
optimizer, scheduler = utils.set_optim(opt, model)
step = 0
elif directory_exists:
model_path = os.path.join(opt.output_dir, "checkpoint", "latest")
model, optimizer, scheduler, opt_checkpoint, step = utils.load(
model_class,
model_path,
opt,
reset_params=False,
)
logger.info(f"Model loaded from {opt.output_dir}")
else:
model, optimizer, scheduler, opt_checkpoint, step = utils.load(
model_class,
opt.model_path,
opt,
reset_params=False if opt.continue_training else True,
)
if not opt.continue_training:
step = 0
logger.info(f"Model loaded from {opt.model_path}")
logger.info(utils.get_parameters(model))
if dist.is_initialized():
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[opt.local_rank],
output_device=opt.local_rank,
find_unused_parameters=False,
)
dist.barrier()
logger.info("Start training")
train(opt, model, optimizer, scheduler, step)
|
contriever-main
|
train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import argparse
import csv
import json
import logging
import pickle
import time
import glob
from pathlib import Path
import numpy as np
import torch
import transformers
import src.index
import src.contriever
import src.utils
import src.slurm
import src.data
from src.evaluation import calculate_matches
import src.normalize_text
os.environ["TOKENIZERS_PARALLELISM"] = "true"
def embed_queries(args, queries, model, tokenizer):
model.eval()
embeddings, batch_question = [], []
with torch.no_grad():
for k, q in enumerate(queries):
if args.lowercase:
q = q.lower()
if args.normalize_text:
q = src.normalize_text.normalize(q)
batch_question.append(q)
if len(batch_question) == args.per_gpu_batch_size or k == len(queries) - 1:
encoded_batch = tokenizer.batch_encode_plus(
batch_question,
return_tensors="pt",
max_length=args.question_maxlength,
padding=True,
truncation=True,
)
encoded_batch = {k: v.cuda() for k, v in encoded_batch.items()}
output = model(**encoded_batch)
embeddings.append(output.cpu())
batch_question = []
embeddings = torch.cat(embeddings, dim=0)
print(f"Questions embeddings shape: {embeddings.size()}")
return embeddings.numpy()
def index_encoded_data(index, embedding_files, indexing_batch_size):
allids = []
allembeddings = np.array([])
for i, file_path in enumerate(embedding_files):
print(f"Loading file {file_path}")
with open(file_path, "rb") as fin:
ids, embeddings = pickle.load(fin)
allembeddings = np.vstack((allembeddings, embeddings)) if allembeddings.size else embeddings
allids.extend(ids)
while allembeddings.shape[0] > indexing_batch_size:
allembeddings, allids = add_embeddings(index, allembeddings, allids, indexing_batch_size)
while allembeddings.shape[0] > 0:
allembeddings, allids = add_embeddings(index, allembeddings, allids, indexing_batch_size)
print("Data indexing completed.")
def add_embeddings(index, embeddings, ids, indexing_batch_size):
end_idx = min(indexing_batch_size, embeddings.shape[0])
ids_toadd = ids[:end_idx]
embeddings_toadd = embeddings[:end_idx]
ids = ids[end_idx:]
embeddings = embeddings[end_idx:]
index.index_data(ids_toadd, embeddings_toadd)
return embeddings, ids
def validate(data, workers_num):
match_stats = calculate_matches(data, workers_num)
top_k_hits = match_stats.top_k_hits
print("Validation results: top k documents hits %s", top_k_hits)
top_k_hits = [v / len(data) for v in top_k_hits]
message = ""
for k in [5, 10, 20, 100]:
if k <= len(top_k_hits):
message += f"R@{k}: {top_k_hits[k-1]} "
print(message)
return match_stats.questions_doc_hits
def add_passages(data, passages, top_passages_and_scores):
# add passages to original data
merged_data = []
assert len(data) == len(top_passages_and_scores)
for i, d in enumerate(data):
results_and_scores = top_passages_and_scores[i]
docs = [passages[doc_id] for doc_id in results_and_scores[0]]
scores = [str(score) for score in results_and_scores[1]]
ctxs_num = len(docs)
d["ctxs"] = [
{
"id": results_and_scores[0][c],
"title": docs[c]["title"],
"text": docs[c]["text"],
"score": scores[c],
}
for c in range(ctxs_num)
]
def add_hasanswer(data, hasanswer):
# add hasanswer to data
for i, ex in enumerate(data):
for k, d in enumerate(ex["ctxs"]):
d["hasanswer"] = hasanswer[i][k]
def load_data(data_path):
if data_path.endswith(".json"):
with open(data_path, "r") as fin:
data = json.load(fin)
elif data_path.endswith(".jsonl"):
data = []
with open(data_path, "r") as fin:
for k, example in enumerate(fin):
example = json.loads(example)
data.append(example)
return data
def main(args):
print(f"Loading model from: {args.model_name_or_path}")
model, tokenizer, _ = src.contriever.load_retriever(args.model_name_or_path)
model.eval()
model = model.cuda()
if not args.no_fp16:
model = model.half()
index = src.index.Indexer(args.projection_size, args.n_subquantizers, args.n_bits)
# index all passages
input_paths = glob.glob(args.passages_embeddings)
input_paths = sorted(input_paths)
embeddings_dir = os.path.dirname(input_paths[0])
index_path = os.path.join(embeddings_dir, "index.faiss")
if args.save_or_load_index and os.path.exists(index_path):
index.deserialize_from(embeddings_dir)
else:
print(f"Indexing passages from files {input_paths}")
start_time_indexing = time.time()
index_encoded_data(index, input_paths, args.indexing_batch_size)
print(f"Indexing time: {time.time()-start_time_indexing:.1f} s.")
if args.save_or_load_index:
index.serialize(embeddings_dir)
# load passages
passages = src.data.load_passages(args.passages)
passage_id_map = {x["id"]: x for x in passages}
data_paths = glob.glob(args.data)
alldata = []
for path in data_paths:
data = load_data(path)
output_path = os.path.join(args.output_dir, os.path.basename(path))
queries = [ex["question"] for ex in data]
questions_embedding = embed_queries(args, queries, model, tokenizer)
# get top k results
start_time_retrieval = time.time()
top_ids_and_scores = index.search_knn(questions_embedding, args.n_docs)
print(f"Search time: {time.time()-start_time_retrieval:.1f} s.")
add_passages(data, passage_id_map, top_ids_and_scores)
hasanswer = validate(data, args.validation_workers)
add_hasanswer(data, hasanswer)
os.makedirs(os.path.dirname(output_path), exist_ok=True)
with open(output_path, "w") as fout:
for ex in data:
json.dump(ex, fout, ensure_ascii=False)
fout.write("\n")
print(f"Saved results to {output_path}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--data",
required=True,
type=str,
default=None,
help=".json file containing question and answers, similar format to reader data",
)
parser.add_argument("--passages", type=str, default=None, help="Path to passages (.tsv file)")
parser.add_argument("--passages_embeddings", type=str, default=None, help="Glob path to encoded passages")
parser.add_argument(
"--output_dir", type=str, default=None, help="Results are written to outputdir with data suffix"
)
parser.add_argument("--n_docs", type=int, default=100, help="Number of documents to retrieve per questions")
parser.add_argument(
"--validation_workers", type=int, default=32, help="Number of parallel processes to validate results"
)
parser.add_argument("--per_gpu_batch_size", type=int, default=64, help="Batch size for question encoding")
parser.add_argument(
"--save_or_load_index", action="store_true", help="If enabled, save index and load index if it exists"
)
parser.add_argument(
"--model_name_or_path", type=str, help="path to directory containing model weights and config file"
)
parser.add_argument("--no_fp16", action="store_true", help="inference in fp32")
parser.add_argument("--question_maxlength", type=int, default=512, help="Maximum number of tokens in a question")
parser.add_argument(
"--indexing_batch_size", type=int, default=1000000, help="Batch size of the number of passages indexed"
)
parser.add_argument("--projection_size", type=int, default=768)
parser.add_argument(
"--n_subquantizers",
type=int,
default=0,
help="Number of subquantizer used for vector quantization, if 0 flat index is used",
)
parser.add_argument("--n_bits", type=int, default=8, help="Number of bits per subquantizer")
parser.add_argument("--lang", nargs="+")
parser.add_argument("--dataset", type=str, default="none")
parser.add_argument("--lowercase", action="store_true", help="lowercase text before encoding")
parser.add_argument("--normalize_text", action="store_true", help="normalize text")
args = parser.parse_args()
src.slurm.init_distributed_mode(args)
main(args)
|
contriever-main
|
passage_retrieval.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import os
import csv
import json
def convert2beir(data_path, output_path):
splits = ['test', 'dev', 'train']
queries_path = os.path.join(output_path, "queries.jsonl")
corpus_path = os.path.join(output_path, "corpus.jsonl")
os.makedirs(os.path.dirname(corpus_path), exist_ok=True)
queries = []
with open(queries_path, "w", encoding="utf-8") as fout:
with open(os.path.join(data_path, f"topic.tsv"), "r", encoding="utf-8") as fin:
reader = csv.reader(fin, delimiter="\t")
for x in reader:
qdict = {
"_id": x[0],
"text": x[1]
}
json.dump(qdict, fout, ensure_ascii=False)
fout.write('\n')
with open(os.path.join(data_path, "collection", "docs.jsonl"), "r") as fin:
with open(corpus_path, "w", encoding="utf-8") as fout:
for line in fin:
x = json.loads(line)
x["_id"] = x["id"]
x["text"] = x["contents"]
x["title"] = ""
del x["id"]
del x["contents"]
json.dump(x, fout, ensure_ascii=False)
fout.write('\n')
for split in splits:
qrels_path = os.path.join(output_path, "qrels", f"{split}.tsv")
os.makedirs(os.path.dirname(qrels_path), exist_ok=True)
with open(os.path.join(data_path, f"qrels.{split}.txt"), "r", encoding="utf-8") as fin:
with open(qrels_path, "w", encoding="utf-8") as fout:
writer = csv.writer(fout, delimiter='\t')
writer.writerow(["query-id", "corpus-id", "score"])
for line in fin:
line = line.strip()
el = line.split()
qid = el[0]
i = el[2]
s = el[3]
writer.writerow([qid, i, s])
if __name__ == '__main__':
convert2beir(sys.argv[1], sys.argv[2])
|
contriever-main
|
data_scripts/convertmrtydi2beir.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import sys
import os
import json
from collections import defaultdict
def preprocess_xmkqa(input_path, output_dir):
os.makedirs(output_dir, exist_ok=True)
mkqa = []
with open(input_path, 'r') as fin:
for line in fin:
ex = json.loads(line)
mkqa.append(ex)
mkqadict = {ex['example_id']:ex for ex in mkqa}
langs = ['en', 'ar', 'fi', 'ja', 'ko', 'ru', 'es', 'sv', 'he', 'th', \
'da', 'de', 'fr', 'it', 'nl', 'pl', 'pt', 'hu', 'vi', 'ms', \
'km', 'no', 'tr', 'zh_cn', 'zh_hk', 'zh_tw']
langdata = defaultdict(list)
for ex in mkqa:
answers = []
for a in ex['answers']['en']:
flag = False
if not (a['type'] == 'unanswerable' or a['type'] == 'binary' or a['type'] == 'long_answer'):
flag = True
answers.extend(a.get("aliases", []))
answers.append(a.get("text"))
if flag:
for lang in langs:
langex = {
'id': ex['example_id'],
'lang': lang,
'question': ex['queries'][lang], #question in specific languages
'answers': answers #english answers
}
langdata[lang].append(langex)
for lang, data in langdata.items():
with open(os.path.join(output_dir, f'{lang}.jsonl'), 'w') as fout:
for ex in data:
json.dump(ex, fout, ensure_ascii=False)
fout.write('\n')
if __name__ == '__main__':
preprocess_xmkqa(sys.argv[1], sys.argv[2])
|
contriever-main
|
data_scripts/preprocess_xmkqa.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import torch
import transformers
from transformers import BertModel, XLMRobertaModel
from src import utils
class Contriever(BertModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0)
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1)
return emb
class XLMRetriever(XLMRobertaModel):
def __init__(self, config, pooling="average", **kwargs):
super().__init__(config, add_pooling_layer=False)
if not hasattr(config, "pooling"):
self.config.pooling = pooling
def forward(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
encoder_hidden_states=None,
encoder_attention_mask=None,
output_attentions=None,
output_hidden_states=None,
normalize=False,
):
model_output = super().forward(
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
encoder_hidden_states=encoder_hidden_states,
encoder_attention_mask=encoder_attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
)
last_hidden = model_output["last_hidden_state"]
last_hidden = last_hidden.masked_fill(~attention_mask[..., None].bool(), 0.0)
if self.config.pooling == "average":
emb = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]
elif self.config.pooling == "cls":
emb = last_hidden[:, 0]
if normalize:
emb = torch.nn.functional.normalize(emb, dim=-1)
return emb
def load_retriever(model_path, pooling="average", random_init=False):
# try: check if model exists locally
path = os.path.join(model_path, "checkpoint.pth")
if os.path.exists(path):
pretrained_dict = torch.load(path, map_location="cpu")
opt = pretrained_dict["opt"]
if hasattr(opt, "retriever_model_id"):
retriever_model_id = opt.retriever_model_id
else:
# retriever_model_id = "bert-base-uncased"
retriever_model_id = "bert-base-multilingual-cased"
tokenizer = utils.load_hf(transformers.AutoTokenizer, retriever_model_id)
cfg = utils.load_hf(transformers.AutoConfig, retriever_model_id)
if "xlm" in retriever_model_id:
model_class = XLMRetriever
else:
model_class = Contriever
retriever = model_class(cfg)
pretrained_dict = pretrained_dict["model"]
if any("encoder_q." in key for key in pretrained_dict.keys()): # test if model is defined with moco class
pretrained_dict = {k.replace("encoder_q.", ""): v for k, v in pretrained_dict.items() if "encoder_q." in k}
elif any("encoder." in key for key in pretrained_dict.keys()): # test if model is defined with inbatch class
pretrained_dict = {k.replace("encoder.", ""): v for k, v in pretrained_dict.items() if "encoder." in k}
retriever.load_state_dict(pretrained_dict, strict=False)
else:
retriever_model_id = model_path
if "xlm" in retriever_model_id:
model_class = XLMRetriever
else:
model_class = Contriever
cfg = utils.load_hf(transformers.AutoConfig, model_path)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_path)
retriever = utils.load_hf(model_class, model_path)
return retriever, tokenizer, retriever_model_id
|
contriever-main
|
src/contriever.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import argparse
import os
class Options:
def __init__(self):
self.parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
self.initialize()
def initialize(self):
# basic parameters
self.parser.add_argument(
"--output_dir", type=str, default="./checkpoint/my_experiments", help="models are saved here"
)
self.parser.add_argument(
"--train_data",
nargs="+",
default=[],
help="Data used for training, passed as a list of directories splitted into tensor files.",
)
self.parser.add_argument(
"--eval_data",
nargs="+",
default=[],
help="Data used for evaluation during finetuning, this option is not used during contrastive pre-training.",
)
self.parser.add_argument(
"--eval_datasets", nargs="+", default=[], help="List of datasets used for evaluation, in BEIR format"
)
self.parser.add_argument(
"--eval_datasets_dir", type=str, default="./", help="Directory where eval datasets are stored"
)
self.parser.add_argument("--model_path", type=str, default="none", help="path for retraining")
self.parser.add_argument("--continue_training", action="store_true")
self.parser.add_argument("--num_workers", type=int, default=5)
self.parser.add_argument("--chunk_length", type=int, default=256)
self.parser.add_argument("--loading_mode", type=str, default="split")
self.parser.add_argument("--lower_case", action="store_true", help="perform evaluation after lowercasing")
self.parser.add_argument(
"--sampling_coefficient",
type=float,
default=0.0,
help="coefficient used for sampling between different datasets during training, \
by default sampling is uniform over datasets",
)
self.parser.add_argument("--augmentation", type=str, default="none")
self.parser.add_argument("--prob_augmentation", type=float, default=0.0)
self.parser.add_argument("--dropout", type=float, default=0.1)
self.parser.add_argument("--rho", type=float, default=0.05)
self.parser.add_argument("--contrastive_mode", type=str, default="moco")
self.parser.add_argument("--queue_size", type=int, default=65536)
self.parser.add_argument("--temperature", type=float, default=1.0)
self.parser.add_argument("--momentum", type=float, default=0.999)
self.parser.add_argument("--moco_train_mode_encoder_k", action="store_true")
self.parser.add_argument("--eval_normalize_text", action="store_true")
self.parser.add_argument("--norm_query", action="store_true")
self.parser.add_argument("--norm_doc", action="store_true")
self.parser.add_argument("--projection_size", type=int, default=768)
self.parser.add_argument("--ratio_min", type=float, default=0.1)
self.parser.add_argument("--ratio_max", type=float, default=0.5)
self.parser.add_argument("--score_function", type=str, default="dot")
self.parser.add_argument("--retriever_model_id", type=str, default="bert-base-uncased")
self.parser.add_argument("--pooling", type=str, default="average")
self.parser.add_argument("--random_init", action="store_true", help="init model with random weights")
# dataset parameters
self.parser.add_argument("--per_gpu_batch_size", default=64, type=int, help="Batch size per GPU for training.")
self.parser.add_argument(
"--per_gpu_eval_batch_size", default=256, type=int, help="Batch size per GPU for evaluation."
)
self.parser.add_argument("--total_steps", type=int, default=1000)
self.parser.add_argument("--warmup_steps", type=int, default=-1)
self.parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
self.parser.add_argument("--main_port", type=int, default=10001, help="Master port (for multi-node SLURM jobs)")
self.parser.add_argument("--seed", type=int, default=0, help="random seed for initialization")
# training parameters
self.parser.add_argument("--optim", type=str, default="adamw")
self.parser.add_argument("--scheduler", type=str, default="linear")
self.parser.add_argument("--lr", type=float, default=1e-4, help="learning rate")
self.parser.add_argument(
"--lr_min_ratio",
type=float,
default=0.0,
help="minimum learning rate at the end of the optimization schedule as a ratio of the learning rate",
)
self.parser.add_argument("--weight_decay", type=float, default=0.01, help="learning rate")
self.parser.add_argument("--beta1", type=float, default=0.9, help="beta1")
self.parser.add_argument("--beta2", type=float, default=0.98, help="beta2")
self.parser.add_argument("--eps", type=float, default=1e-6, help="eps")
self.parser.add_argument(
"--log_freq", type=int, default=100, help="log train stats every <log_freq> steps during training"
)
self.parser.add_argument(
"--eval_freq", type=int, default=500, help="evaluate model every <eval_freq> steps during training"
)
self.parser.add_argument("--save_freq", type=int, default=50000)
self.parser.add_argument("--maxload", type=int, default=None)
self.parser.add_argument("--label_smoothing", type=float, default=0.0)
# finetuning options
self.parser.add_argument("--negative_ctxs", type=int, default=1)
self.parser.add_argument("--negative_hard_min_idx", type=int, default=0)
self.parser.add_argument("--negative_hard_ratio", type=float, default=0.0)
def print_options(self, opt):
message = ""
for k, v in sorted(vars(opt).items()):
comment = ""
default = self.parser.get_default(k)
if v != default:
comment = f"\t[default: %s]" % str(default)
message += f"{str(k):>40}: {str(v):<40}{comment}\n"
print(message, flush=True)
model_dir = os.path.join(opt.output_dir, "models")
if not os.path.exists(model_dir):
os.makedirs(os.path.join(opt.output_dir, "models"))
file_name = os.path.join(opt.output_dir, "opt.txt")
with open(file_name, "wt") as opt_file:
opt_file.write(message)
opt_file.write("\n")
def parse(self):
opt, _ = self.parser.parse_known_args()
# opt = self.parser.parse_args()
return opt
|
contriever-main
|
src/options.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import numpy as np
import math
import random
import transformers
import logging
import torch.distributed as dist
from src import contriever, dist_utils, utils
logger = logging.getLogger(__name__)
class InBatch(nn.Module):
def __init__(self, opt, retriever=None, tokenizer=None):
super(InBatch, self).__init__()
self.opt = opt
self.norm_doc = opt.norm_doc
self.norm_query = opt.norm_query
self.label_smoothing = opt.label_smoothing
if retriever is None or tokenizer is None:
retriever, tokenizer = self._load_retriever(
opt.retriever_model_id, pooling=opt.pooling, random_init=opt.random_init
)
self.tokenizer = tokenizer
self.encoder = retriever
def _load_retriever(self, model_id, pooling, random_init):
cfg = utils.load_hf(transformers.AutoConfig, model_id)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_id)
if "xlm" in model_id:
model_class = contriever.XLMRetriever
else:
model_class = contriever.Contriever
if random_init:
retriever = model_class(cfg)
else:
retriever = utils.load_hf(model_class, model_id)
if "bert-" in model_id:
if tokenizer.bos_token_id is None:
tokenizer.bos_token = "[CLS]"
if tokenizer.eos_token_id is None:
tokenizer.eos_token = "[SEP]"
retriever.config.pooling = pooling
return retriever, tokenizer
def get_encoder(self):
return self.encoder
def forward(self, q_tokens, q_mask, k_tokens, k_mask, stats_prefix="", iter_stats={}, **kwargs):
bsz = len(q_tokens)
labels = torch.arange(0, bsz, dtype=torch.long, device=q_tokens.device)
qemb = self.encoder(input_ids=q_tokens, attention_mask=q_mask, normalize=self.norm_query)
kemb = self.encoder(input_ids=k_tokens, attention_mask=k_mask, normalize=self.norm_doc)
gather_fn = dist_utils.gather
gather_kemb = gather_fn(kemb)
labels = labels + dist_utils.get_rank() * len(kemb)
scores = torch.einsum("id, jd->ij", qemb / self.opt.temperature, gather_kemb)
loss = torch.nn.functional.cross_entropy(scores, labels, label_smoothing=self.label_smoothing)
# log stats
if len(stats_prefix) > 0:
stats_prefix = stats_prefix + "/"
iter_stats[f"{stats_prefix}loss"] = (loss.item(), bsz)
predicted_idx = torch.argmax(scores, dim=-1)
accuracy = 100 * (predicted_idx == labels).float().mean()
stdq = torch.std(qemb, dim=0).mean().item()
stdk = torch.std(kemb, dim=0).mean().item()
iter_stats[f"{stats_prefix}accuracy"] = (accuracy, bsz)
iter_stats[f"{stats_prefix}stdq"] = (stdq, bsz)
iter_stats[f"{stats_prefix}stdk"] = (stdk, bsz)
return loss, iter_stats
|
contriever-main
|
src/inbatch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import pickle
from typing import List, Tuple
import faiss
import numpy as np
from tqdm import tqdm
class Indexer(object):
def __init__(self, vector_sz, n_subquantizers=0, n_bits=8):
if n_subquantizers > 0:
self.index = faiss.IndexPQ(vector_sz, n_subquantizers, n_bits, faiss.METRIC_INNER_PRODUCT)
else:
self.index = faiss.IndexFlatIP(vector_sz)
#self.index_id_to_db_id = np.empty((0), dtype=np.int64)
self.index_id_to_db_id = []
def index_data(self, ids, embeddings):
self._update_id_mapping(ids)
embeddings = embeddings.astype('float32')
if not self.index.is_trained:
self.index.train(embeddings)
self.index.add(embeddings)
print(f'Total data indexed {len(self.index_id_to_db_id)}')
def search_knn(self, query_vectors: np.array, top_docs: int, index_batch_size: int = 2048) -> List[Tuple[List[object], List[float]]]:
query_vectors = query_vectors.astype('float32')
result = []
nbatch = (len(query_vectors)-1) // index_batch_size + 1
for k in tqdm(range(nbatch)):
start_idx = k*index_batch_size
end_idx = min((k+1)*index_batch_size, len(query_vectors))
q = query_vectors[start_idx: end_idx]
scores, indexes = self.index.search(q, top_docs)
# convert to external ids
db_ids = [[str(self.index_id_to_db_id[i]) for i in query_top_idxs] for query_top_idxs in indexes]
result.extend([(db_ids[i], scores[i]) for i in range(len(db_ids))])
return result
def serialize(self, dir_path):
index_file = os.path.join(dir_path, 'index.faiss')
meta_file = os.path.join(dir_path, 'index_meta.faiss')
print(f'Serializing index to {index_file}, meta data to {meta_file}')
faiss.write_index(self.index, index_file)
with open(meta_file, mode='wb') as f:
pickle.dump(self.index_id_to_db_id, f)
def deserialize_from(self, dir_path):
index_file = os.path.join(dir_path, 'index.faiss')
meta_file = os.path.join(dir_path, 'index_meta.faiss')
print(f'Loading index from {index_file}, meta data from {meta_file}')
self.index = faiss.read_index(index_file)
print('Loaded index of type %s and size %d', type(self.index), self.index.ntotal)
with open(meta_file, "rb") as reader:
self.index_id_to_db_id = pickle.load(reader)
assert len(
self.index_id_to_db_id) == self.index.ntotal, 'Deserialized index_id_to_db_id should match faiss index size'
def _update_id_mapping(self, db_ids: List):
#new_ids = np.array(db_ids, dtype=np.int64)
#self.index_id_to_db_id = np.concatenate((self.index_id_to_db_id, new_ids), axis=0)
self.index_id_to_db_id.extend(db_ids)
|
contriever-main
|
src/index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
from logging import getLogger
import os
import sys
import torch
import socket
import signal
import subprocess
logger = getLogger()
def sig_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
prod_id = int(os.environ['SLURM_PROCID'])
logger.warning("Host: %s - Global rank: %i" % (socket.gethostname(), prod_id))
if prod_id == 0:
logger.warning("Requeuing job " + os.environ['SLURM_JOB_ID'])
os.system('scontrol requeue ' + os.environ['SLURM_JOB_ID'])
else:
logger.warning("Not the main process, no need to requeue.")
sys.exit(-1)
def term_handler(signum, frame):
logger.warning("Signal handler called with signal " + str(signum))
logger.warning("Bypassing SIGTERM.")
def init_signal_handler():
"""
Handle signals sent by SLURM for time limit / pre-emption.
"""
signal.signal(signal.SIGUSR1, sig_handler)
signal.signal(signal.SIGTERM, term_handler)
def init_distributed_mode(params):
"""
Handle single and multi-GPU / multi-node / SLURM jobs.
Initialize the following variables:
- local_rank
- global_rank
- world_size
"""
is_slurm_job = 'SLURM_JOB_ID' in os.environ and not 'WORLD_SIZE' in os.environ
has_local_rank = hasattr(params, 'local_rank')
# SLURM job without torch.distributed.launch
if is_slurm_job and has_local_rank:
assert params.local_rank == -1 # on the cluster, this is handled by SLURM
# local rank on the current node / global rank
params.local_rank = int(os.environ['SLURM_LOCALID'])
params.global_rank = int(os.environ['SLURM_PROCID'])
params.world_size = int(os.environ['SLURM_NTASKS'])
# define master address and master port
hostnames = subprocess.check_output(['scontrol', 'show', 'hostnames', os.environ['SLURM_JOB_NODELIST']])
params.main_addr = hostnames.split()[0].decode('utf-8')
assert 10001 <= params.main_port <= 20000 or params.world_size == 1
# set environment variables for 'env://'
os.environ['MASTER_ADDR'] = params.main_addr
os.environ['MASTER_PORT'] = str(params.main_port)
os.environ['WORLD_SIZE'] = str(params.world_size)
os.environ['RANK'] = str(params.global_rank)
is_distributed = True
# multi-GPU job (local or multi-node) - jobs started with torch.distributed.launch
elif has_local_rank and params.local_rank != -1:
assert params.main_port == -1
# read environment variables
params.global_rank = int(os.environ['RANK'])
params.world_size = int(os.environ['WORLD_SIZE'])
is_distributed = True
# local job (single GPU)
else:
params.local_rank = 0
params.global_rank = 0
params.world_size = 1
is_distributed = False
# set GPU device
torch.cuda.set_device(params.local_rank)
# initialize multi-GPU
if is_distributed:
# http://pytorch.apachecn.org/en/0.3.0/distributed.html#environment-variable-initialization
# 'env://' will read these environment variables:
# MASTER_PORT - required; has to be a free port on machine with rank 0
# MASTER_ADDR - required (except for rank 0); address of rank 0 node
# WORLD_SIZE - required; can be set either here, or in a call to init function
# RANK - required; can be set either here, or in a call to init function
#print("Initializing PyTorch distributed ...")
torch.distributed.init_process_group(
init_method='env://',
backend='nccl',
#world_size=params.world_size,
#rank=params.global_rank,
)
|
contriever-main
|
src/slurm.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import collections
import logging
import regex
import string
import unicodedata
from functools import partial
from multiprocessing import Pool as ProcessPool
from typing import Tuple, List, Dict
import numpy as np
"""
Evaluation code from DPR: https://github.com/facebookresearch/DPR
"""
class SimpleTokenizer(object):
ALPHA_NUM = r'[\p{L}\p{N}\p{M}]+'
NON_WS = r'[^\p{Z}\p{C}]'
def __init__(self):
"""
Args:
annotators: None or empty set (only tokenizes).
"""
self._regexp = regex.compile(
'(%s)|(%s)' % (self.ALPHA_NUM, self.NON_WS),
flags=regex.IGNORECASE + regex.UNICODE + regex.MULTILINE
)
def tokenize(self, text, uncased=False):
matches = [m for m in self._regexp.finditer(text)]
if uncased:
tokens = [m.group().lower() for m in matches]
else:
tokens = [m.group() for m in matches]
return tokens
logger = logging.getLogger(__name__)
QAMatchStats = collections.namedtuple('QAMatchStats', ['top_k_hits', 'questions_doc_hits'])
def calculate_matches(data: List, workers_num: int):
"""
Evaluates answers presence in the set of documents. This function is supposed to be used with a large collection of
documents and results. It internally forks multiple sub-processes for evaluation and then merges results
:param all_docs: dictionary of the entire documents database. doc_id -> (doc_text, title)
:param answers: list of answers's list. One list per question
:param closest_docs: document ids of the top results along with their scores
:param workers_num: amount of parallel threads to process data
:param match_type: type of answer matching. Refer to has_answer code for available options
:return: matching information tuple.
top_k_hits - a list where the index is the amount of top documents retrieved and the value is the total amount of
valid matches across an entire dataset.
questions_doc_hits - more detailed info with answer matches for every question and every retrieved document
"""
logger.info('Matching answers in top docs...')
tokenizer = SimpleTokenizer()
get_score_partial = partial(check_answer, tokenizer=tokenizer)
processes = ProcessPool(processes=workers_num)
scores = processes.map(get_score_partial, data)
logger.info('Per question validation results len=%d', len(scores))
n_docs = len(data[0]['ctxs'])
top_k_hits = [0] * n_docs
for question_hits in scores:
best_hit = next((i for i, x in enumerate(question_hits) if x), None)
if best_hit is not None:
top_k_hits[best_hit:] = [v + 1 for v in top_k_hits[best_hit:]]
return QAMatchStats(top_k_hits, scores)
def check_answer(example, tokenizer) -> List[bool]:
"""Search through all the top docs to see if they have any of the answers."""
answers = example['answers']
ctxs = example['ctxs']
hits = []
for i, doc in enumerate(ctxs):
text = doc['text']
if text is None: # cannot find the document for some reason
logger.warning("no doc in db")
hits.append(False)
continue
hits.append(has_answer(answers, text, tokenizer))
return hits
def has_answer(answers, text, tokenizer) -> bool:
"""Check if a document contains an answer string."""
text = _normalize(text)
text = tokenizer.tokenize(text, uncased=True)
for answer in answers:
answer = _normalize(answer)
answer = tokenizer.tokenize(answer, uncased=True)
for i in range(0, len(text) - len(answer) + 1):
if answer == text[i: i + len(answer)]:
return True
return False
#################################################
######## READER EVALUATION ########
#################################################
def _normalize(text):
return unicodedata.normalize('NFD', text)
#Normalization and score functions from SQuAD evaluation script https://worksheets.codalab.org/rest/bundles/0x6b567e1cf2e041ec80d7098f031c5c9e/contents/blob/
def normalize_answer(s):
def remove_articles(text):
return regex.sub(r'\b(a|an|the)\b', ' ', text)
def white_space_fix(text):
return ' '.join(text.split())
def remove_punc(text):
exclude = set(string.punctuation)
return ''.join(ch for ch in text if ch not in exclude)
def lower(text):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(s))))
def em(prediction, ground_truth):
return normalize_answer(prediction) == normalize_answer(ground_truth)
def f1(prediction, ground_truth):
prediction_tokens = normalize_answer(prediction).split()
ground_truth_tokens = normalize_answer(ground_truth).split()
common = Counter(prediction_tokens) & Counter(ground_truth_tokens)
num_same = sum(common.values())
if num_same == 0:
return 0
precision = 1.0 * num_same / len(prediction_tokens)
recall = 1.0 * num_same / len(ground_truth_tokens)
f1 = (2 * precision * recall) / (precision + recall)
return f1
def f1_score(prediction, ground_truths):
return max([f1(prediction, gt) for gt in ground_truths])
def exact_match_score(prediction, ground_truths):
return max([em(prediction, gt) for gt in ground_truths])
####################################################
######## RETRIEVER EVALUATION ########
####################################################
def eval_batch(scores, inversions, avg_topk, idx_topk):
for k, s in enumerate(scores):
s = s.cpu().numpy()
sorted_idx = np.argsort(-s)
score(sorted_idx, inversions, avg_topk, idx_topk)
def count_inversions(arr):
inv_count = 0
lenarr = len(arr)
for i in range(lenarr):
for j in range(i + 1, lenarr):
if (arr[i] > arr[j]):
inv_count += 1
return inv_count
def score(x, inversions, avg_topk, idx_topk):
x = np.array(x)
inversions.append(count_inversions(x))
for k in avg_topk:
# ratio of passages in the predicted top-k that are
# also in the topk given by gold score
avg_pred_topk = (x[:k]<k).mean()
avg_topk[k].append(avg_pred_topk)
for k in idx_topk:
below_k = (x<k)
# number of passages required to obtain all passages from gold top-k
idx_gold_topk = len(x) - np.argmax(below_k[::-1])
idx_topk[k].append(idx_gold_topk)
|
contriever-main
|
src/evaluation.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import random
import json
import sys
import numpy as np
from src import normalize_text
class Dataset(torch.utils.data.Dataset):
def __init__(
self,
datapaths,
negative_ctxs=1,
negative_hard_ratio=0.0,
negative_hard_min_idx=0,
training=False,
global_rank=-1,
world_size=-1,
maxload=None,
normalize=False,
):
self.negative_ctxs = negative_ctxs
self.negative_hard_ratio = negative_hard_ratio
self.negative_hard_min_idx = negative_hard_min_idx
self.training = training
self.normalize_fn = normalize_text.normalize if normalize_text else lambda x: x
self._load_data(datapaths, global_rank, world_size, maxload)
def __len__(self):
return len(self.data)
def __getitem__(self, index):
example = self.data[index]
question = example["question"]
if self.training:
gold = random.choice(example["positive_ctxs"])
n_hard_negatives, n_random_negatives = self.sample_n_hard_negatives(example)
negatives = []
if n_random_negatives > 0:
random_negatives = random.sample(example["negative_ctxs"], n_random_negatives)
negatives += random_negatives
if n_hard_negatives > 0:
hard_negatives = random.sample(
example["hard_negative_ctxs"][self.negative_hard_min_idx :], n_hard_negatives
)
negatives += hard_negatives
else:
gold = example["positive_ctxs"][0]
nidx = 0
if "negative_ctxs" in example:
negatives = [example["negative_ctxs"][nidx]]
else:
negatives = []
gold = gold["title"] + " " + gold["text"] if "title" in gold and len(gold["title"]) > 0 else gold["text"]
negatives = [
n["title"] + " " + n["text"] if ("title" in n and len(n["title"]) > 0) else n["text"] for n in negatives
]
example = {
"query": self.normalize_fn(question),
"gold": self.normalize_fn(gold),
"negatives": [self.normalize_fn(n) for n in negatives],
}
return example
def _load_data(self, datapaths, global_rank, world_size, maxload):
counter = 0
self.data = []
for path in datapaths:
path = str(path)
if path.endswith(".jsonl"):
file_data, counter = self._load_data_jsonl(path, global_rank, world_size, counter, maxload)
elif path.endswith(".json"):
file_data, counter = self._load_data_json(path, global_rank, world_size, counter, maxload)
self.data.extend(file_data)
if maxload is not None and maxload > 0 and counter >= maxload:
break
def _load_data_json(self, path, global_rank, world_size, counter, maxload=None):
examples = []
with open(path, "r") as fin:
data = json.load(fin)
for example in data:
counter += 1
if global_rank > -1 and not counter % world_size == global_rank:
continue
examples.append(example)
if maxload is not None and maxload > 0 and counter == maxload:
break
return examples, counter
def _load_data_jsonl(self, path, global_rank, world_size, counter, maxload=None):
examples = []
with open(path, "r") as fin:
for line in fin:
counter += 1
if global_rank > -1 and not counter % world_size == global_rank:
continue
example = json.loads(line)
examples.append(example)
if maxload is not None and maxload > 0 and counter == maxload:
break
return examples, counter
def sample_n_hard_negatives(self, ex):
if "hard_negative_ctxs" in ex:
n_hard_negatives = sum([random.random() < self.negative_hard_ratio for _ in range(self.negative_ctxs)])
n_hard_negatives = min(n_hard_negatives, len(ex["hard_negative_ctxs"][self.negative_hard_min_idx :]))
else:
n_hard_negatives = 0
n_random_negatives = self.negative_ctxs - n_hard_negatives
if "negative_ctxs" in ex:
n_random_negatives = min(n_random_negatives, len(ex["negative_ctxs"]))
else:
n_random_negatives = 0
return n_hard_negatives, n_random_negatives
class Collator(object):
def __init__(self, tokenizer, passage_maxlength=200):
self.tokenizer = tokenizer
self.passage_maxlength = passage_maxlength
def __call__(self, batch):
queries = [ex["query"] for ex in batch]
golds = [ex["gold"] for ex in batch]
negs = [item for ex in batch for item in ex["negatives"]]
allpassages = golds + negs
qout = self.tokenizer.batch_encode_plus(
queries,
max_length=self.passage_maxlength,
truncation=True,
padding=True,
add_special_tokens=True,
return_tensors="pt",
)
kout = self.tokenizer.batch_encode_plus(
allpassages,
max_length=self.passage_maxlength,
truncation=True,
padding=True,
add_special_tokens=True,
return_tensors="pt",
)
q_tokens, q_mask = qout["input_ids"], qout["attention_mask"].bool()
k_tokens, k_mask = kout["input_ids"], kout["attention_mask"].bool()
g_tokens, g_mask = k_tokens[: len(golds)], k_mask[: len(golds)]
n_tokens, n_mask = k_tokens[len(golds) :], k_mask[len(golds) :]
batch = {
"q_tokens": q_tokens,
"q_mask": q_mask,
"k_tokens": k_tokens,
"k_mask": k_mask,
"g_tokens": g_tokens,
"g_mask": g_mask,
"n_tokens": n_tokens,
"n_mask": n_mask,
}
return batch
|
contriever-main
|
src/finetuning_data.py
|
contriever-main
|
src/__init__.py
|
|
"""
adapted from chemdataextractor.text.normalize
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tools for normalizing text.
https://github.com/mcs07/ChemDataExtractor
:copyright: Copyright 2016 by Matt Swain.
:license: MIT
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
'Software'), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
#: Control characters.
CONTROLS = {
'\u0001', '\u0002', '\u0003', '\u0004', '\u0005', '\u0006', '\u0007', '\u0008', '\u000e', '\u000f', '\u0011',
'\u0012', '\u0013', '\u0014', '\u0015', '\u0016', '\u0017', '\u0018', '\u0019', '\u001a', '\u001b',
}
# There are further control characters, but they are instead replaced with a space by unicode normalization
# '\u0009', '\u000a', '\u000b', '\u000c', '\u000d', '\u001c', '\u001d', '\u001e', '\u001f'
#: Hyphen and dash characters.
HYPHENS = {
'-', # \u002d Hyphen-minus
'‐', # \u2010 Hyphen
'‑', # \u2011 Non-breaking hyphen
'⁃', # \u2043 Hyphen bullet
'‒', # \u2012 figure dash
'–', # \u2013 en dash
'—', # \u2014 em dash
'―', # \u2015 horizontal bar
}
#: Minus characters.
MINUSES = {
'-', # \u002d Hyphen-minus
'−', # \u2212 Minus
'-', # \uff0d Full-width Hyphen-minus
'⁻', # \u207b Superscript minus
}
#: Plus characters.
PLUSES = {
'+', # \u002b Plus
'+', # \uff0b Full-width Plus
'⁺', # \u207a Superscript plus
}
#: Slash characters.
SLASHES = {
'/', # \u002f Solidus
'⁄', # \u2044 Fraction slash
'∕', # \u2215 Division slash
}
#: Tilde characters.
TILDES = {
'~', # \u007e Tilde
'˜', # \u02dc Small tilde
'⁓', # \u2053 Swung dash
'∼', # \u223c Tilde operator #in mbert vocab
'∽', # \u223d Reversed tilde
'∿', # \u223f Sine wave
'〜', # \u301c Wave dash #in mbert vocab
'~', # \uff5e Full-width tilde #in mbert vocab
}
#: Apostrophe characters.
APOSTROPHES = {
"'", # \u0027
'’', # \u2019
'՚', # \u055a
'Ꞌ', # \ua78b
'ꞌ', # \ua78c
''', # \uff07
}
#: Single quote characters.
SINGLE_QUOTES = {
"'", # \u0027
'‘', # \u2018
'’', # \u2019
'‚', # \u201a
'‛', # \u201b
}
#: Double quote characters.
DOUBLE_QUOTES = {
'"', # \u0022
'“', # \u201c
'”', # \u201d
'„', # \u201e
'‟', # \u201f
}
#: Accent characters.
ACCENTS = {
'`', # \u0060
'´', # \u00b4
}
#: Prime characters.
PRIMES = {
'′', # \u2032
'″', # \u2033
'‴', # \u2034
'‵', # \u2035
'‶', # \u2036
'‷', # \u2037
'⁗', # \u2057
}
#: Quote characters, including apostrophes, single quotes, double quotes, accents and primes.
QUOTES = APOSTROPHES | SINGLE_QUOTES | DOUBLE_QUOTES | ACCENTS | PRIMES
def normalize(text):
for control in CONTROLS:
text = text.replace(control, '')
text = text.replace('\u000b', ' ').replace('\u000c', ' ').replace(u'\u0085', ' ')
for hyphen in HYPHENS | MINUSES:
text = text.replace(hyphen, '-')
text = text.replace('\u00ad', '')
for double_quote in DOUBLE_QUOTES:
text = text.replace(double_quote, '"') # \u0022
for single_quote in (SINGLE_QUOTES | APOSTROPHES | ACCENTS):
text = text.replace(single_quote, "'") # \u0027
text = text.replace('′', "'") # \u2032 prime
text = text.replace('‵', "'") # \u2035 reversed prime
text = text.replace('″', "''") # \u2033 double prime
text = text.replace('‶', "''") # \u2036 reversed double prime
text = text.replace('‴', "'''") # \u2034 triple prime
text = text.replace('‷', "'''") # \u2037 reversed triple prime
text = text.replace('⁗', "''''") # \u2057 quadruple prime
text = text.replace('…', '...').replace(' . . . ', ' ... ') # \u2026
for slash in SLASHES:
text = text.replace(slash, '/')
#for tilde in TILDES:
# text = text.replace(tilde, '~')
return text
|
contriever-main
|
src/normalize_text.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import sys
import logging
import torch
import errno
from typing import Union, Tuple, List, Dict
from collections import defaultdict
from src import dist_utils
Number = Union[float, int]
logger = logging.getLogger(__name__)
def init_logger(args, stdout_only=False):
if torch.distributed.is_initialized():
torch.distributed.barrier()
stdout_handler = logging.StreamHandler(sys.stdout)
handlers = [stdout_handler]
if not stdout_only:
file_handler = logging.FileHandler(filename=os.path.join(args.output_dir, "run.log"))
handlers.append(file_handler)
logging.basicConfig(
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO if dist_utils.is_main() else logging.WARN,
format="[%(asctime)s] {%(filename)s:%(lineno)d} %(levelname)s - %(message)s",
handlers=handlers,
)
return logger
def symlink_force(target, link_name):
try:
os.symlink(target, link_name)
except OSError as e:
if e.errno == errno.EEXIST:
os.remove(link_name)
os.symlink(target, link_name)
else:
raise e
def save(model, optimizer, scheduler, step, opt, dir_path, name):
model_to_save = model.module if hasattr(model, "module") else model
path = os.path.join(dir_path, "checkpoint")
epoch_path = os.path.join(path, name) # "step-%s" % step)
os.makedirs(epoch_path, exist_ok=True)
cp = os.path.join(path, "latest")
fp = os.path.join(epoch_path, "checkpoint.pth")
checkpoint = {
"step": step,
"model": model_to_save.state_dict(),
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
"opt": opt,
}
torch.save(checkpoint, fp)
symlink_force(epoch_path, cp)
if not name == "lastlog":
logger.info(f"Saving model to {epoch_path}")
def load(model_class, dir_path, opt, reset_params=False):
epoch_path = os.path.realpath(dir_path)
checkpoint_path = os.path.join(epoch_path, "checkpoint.pth")
logger.info(f"loading checkpoint {checkpoint_path}")
checkpoint = torch.load(checkpoint_path, map_location="cpu")
opt_checkpoint = checkpoint["opt"]
state_dict = checkpoint["model"]
model = model_class(opt_checkpoint)
model.load_state_dict(state_dict, strict=True)
model = model.cuda()
step = checkpoint["step"]
if not reset_params:
optimizer, scheduler = set_optim(opt_checkpoint, model)
scheduler.load_state_dict(checkpoint["scheduler"])
optimizer.load_state_dict(checkpoint["optimizer"])
else:
optimizer, scheduler = set_optim(opt, model)
return model, optimizer, scheduler, opt_checkpoint, step
############ OPTIM
class WarmupLinearScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(WarmupLinearScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return (1 - self.ratio) * step / float(max(1, self.warmup))
return max(
0.0,
1.0 + (self.ratio - 1) * (step - self.warmup) / float(max(1.0, self.total - self.warmup)),
)
class CosineScheduler(torch.optim.lr_scheduler.LambdaLR):
def __init__(self, optimizer, warmup, total, ratio=0.1, last_epoch=-1):
self.warmup = warmup
self.total = total
self.ratio = ratio
super(CosineScheduler, self).__init__(optimizer, self.lr_lambda, last_epoch=last_epoch)
def lr_lambda(self, step):
if step < self.warmup:
return float(step) / self.warmup
s = float(step - self.warmup) / (self.total - self.warmup)
return self.ratio + (1.0 - self.ratio) * math.cos(0.5 * math.pi * s)
def set_optim(opt, model):
if opt.optim == "adamw":
optimizer = torch.optim.AdamW(
model.parameters(), lr=opt.lr, betas=(opt.beta1, opt.beta2), eps=opt.eps, weight_decay=opt.weight_decay
)
else:
raise NotImplementedError("optimizer class not implemented")
scheduler_args = {
"warmup": opt.warmup_steps,
"total": opt.total_steps,
"ratio": opt.lr_min_ratio,
}
if opt.scheduler == "linear":
scheduler_class = WarmupLinearScheduler
elif opt.scheduler == "cosine":
scheduler_class = CosineScheduler
else:
raise ValueError
scheduler = scheduler_class(optimizer, **scheduler_args)
return optimizer, scheduler
def get_parameters(net, verbose=False):
num_params = 0
for param in net.parameters():
num_params += param.numel()
message = "[Network] Total number of parameters : %.6f M" % (num_params / 1e6)
return message
class WeightedAvgStats:
"""provides an average over a bunch of stats"""
def __init__(self):
self.raw_stats: Dict[str, float] = defaultdict(float)
self.total_weights: Dict[str, float] = defaultdict(float)
def update(self, vals: Dict[str, Tuple[Number, Number]]) -> None:
for key, (value, weight) in vals.items():
self.raw_stats[key] += value * weight
self.total_weights[key] += weight
@property
def stats(self) -> Dict[str, float]:
return {x: self.raw_stats[x] / self.total_weights[x] for x in self.raw_stats.keys()}
@property
def tuple_stats(self) -> Dict[str, Tuple[float, float]]:
return {x: (self.raw_stats[x] / self.total_weights[x], self.total_weights[x]) for x in self.raw_stats.keys()}
def reset(self) -> None:
self.raw_stats = defaultdict(float)
self.total_weights = defaultdict(float)
@property
def average_stats(self) -> Dict[str, float]:
keys = sorted(self.raw_stats.keys())
if torch.distributed.is_initialized():
torch.distributed.broadcast_object_list(keys, src=0)
global_dict = {}
for k in keys:
if not k in self.total_weights:
v = 0.0
else:
v = self.raw_stats[k] / self.total_weights[k]
v, _ = dist_utils.weighted_average(v, self.total_weights[k])
global_dict[k] = v
return global_dict
def load_hf(object_class, model_name):
try:
obj = object_class.from_pretrained(model_name, local_files_only=True)
except:
obj = object_class.from_pretrained(model_name, local_files_only=False)
return obj
def init_tb_logger(output_dir):
try:
from torch.utils import tensorboard
if dist_utils.is_main():
tb_logger = tensorboard.SummaryWriter(output_dir)
else:
tb_logger = None
except:
logger.warning("Tensorboard is not available.")
tb_logger = None
return tb_logger
|
contriever-main
|
src/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.nn as nn
import logging
import copy
import transformers
from src import contriever, dist_utils, utils
logger = logging.getLogger(__name__)
class MoCo(nn.Module):
def __init__(self, opt):
super(MoCo, self).__init__()
self.queue_size = opt.queue_size
self.momentum = opt.momentum
self.temperature = opt.temperature
self.label_smoothing = opt.label_smoothing
self.norm_doc = opt.norm_doc
self.norm_query = opt.norm_query
self.moco_train_mode_encoder_k = opt.moco_train_mode_encoder_k # apply the encoder on keys in train mode
retriever, tokenizer = self._load_retriever(
opt.retriever_model_id, pooling=opt.pooling, random_init=opt.random_init
)
self.tokenizer = tokenizer
self.encoder_q = retriever
self.encoder_k = copy.deepcopy(retriever)
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data.copy_(param_q.data)
param_k.requires_grad = False
# create the queue
self.register_buffer("queue", torch.randn(opt.projection_size, self.queue_size))
self.queue = nn.functional.normalize(self.queue, dim=0)
self.register_buffer("queue_ptr", torch.zeros(1, dtype=torch.long))
def _load_retriever(self, model_id, pooling, random_init):
cfg = utils.load_hf(transformers.AutoConfig, model_id)
tokenizer = utils.load_hf(transformers.AutoTokenizer, model_id)
if "xlm" in model_id:
model_class = contriever.XLMRetriever
else:
model_class = contriever.Contriever
if random_init:
retriever = model_class(cfg)
else:
retriever = utils.load_hf(model_class, model_id)
if "bert-" in model_id:
if tokenizer.bos_token_id is None:
tokenizer.bos_token = "[CLS]"
if tokenizer.eos_token_id is None:
tokenizer.eos_token = "[SEP]"
retriever.config.pooling = pooling
return retriever, tokenizer
def get_encoder(self, return_encoder_k=False):
if return_encoder_k:
return self.encoder_k
else:
return self.encoder_q
def _momentum_update_key_encoder(self):
"""
Update of the key encoder
"""
for param_q, param_k in zip(self.encoder_q.parameters(), self.encoder_k.parameters()):
param_k.data = param_k.data * self.momentum + param_q.data * (1.0 - self.momentum)
@torch.no_grad()
def _dequeue_and_enqueue(self, keys):
# gather keys before updating queue
keys = dist_utils.gather_nograd(keys.contiguous())
batch_size = keys.shape[0]
ptr = int(self.queue_ptr)
assert self.queue_size % batch_size == 0, f"{batch_size}, {self.queue_size}" # for simplicity
# replace the keys at ptr (dequeue and enqueue)
self.queue[:, ptr : ptr + batch_size] = keys.T
ptr = (ptr + batch_size) % self.queue_size # move pointer
self.queue_ptr[0] = ptr
def _compute_logits(self, q, k):
l_pos = torch.einsum("nc,nc->n", [q, k]).unsqueeze(-1)
l_neg = torch.einsum("nc,ck->nk", [q, self.queue.clone().detach()])
logits = torch.cat([l_pos, l_neg], dim=1)
return logits
def forward(self, q_tokens, q_mask, k_tokens, k_mask, stats_prefix="", iter_stats={}, **kwargs):
bsz = q_tokens.size(0)
q = self.encoder_q(input_ids=q_tokens, attention_mask=q_mask, normalize=self.norm_query)
# compute key features
with torch.no_grad(): # no gradient to keys
self._momentum_update_key_encoder() # update the key encoder
if not self.encoder_k.training and not self.moco_train_mode_encoder_k:
self.encoder_k.eval()
k = self.encoder_k(input_ids=k_tokens, attention_mask=k_mask, normalize=self.norm_doc)
logits = self._compute_logits(q, k) / self.temperature
# labels: positive key indicators
labels = torch.zeros(bsz, dtype=torch.long).cuda()
loss = torch.nn.functional.cross_entropy(logits, labels, label_smoothing=self.label_smoothing)
self._dequeue_and_enqueue(k)
# log stats
if len(stats_prefix) > 0:
stats_prefix = stats_prefix + "/"
iter_stats[f"{stats_prefix}loss"] = (loss.item(), bsz)
predicted_idx = torch.argmax(logits, dim=-1)
accuracy = 100 * (predicted_idx == labels).float().mean()
stdq = torch.std(q, dim=0).mean().item()
stdk = torch.std(k, dim=0).mean().item()
iter_stats[f"{stats_prefix}accuracy"] = (accuracy, bsz)
iter_stats[f"{stats_prefix}stdq"] = (stdq, bsz)
iter_stats[f"{stats_prefix}stdk"] = (stdk, bsz)
return loss, iter_stats
|
contriever-main
|
src/moco.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
import glob
import torch
import random
import json
import csv
import numpy as np
import numpy.random
import logging
from collections import defaultdict
import torch.distributed as dist
from src import dist_utils
logger = logging.getLogger(__name__)
def load_data(opt, tokenizer):
datasets = {}
for path in opt.train_data:
data = load_dataset(path, opt.loading_mode)
if data is not None:
datasets[path] = Dataset(data, opt.chunk_length, tokenizer, opt)
dataset = MultiDataset(datasets)
dataset.set_prob(coeff=opt.sampling_coefficient)
return dataset
def load_dataset(data_path, loading_mode):
files = glob.glob(os.path.join(data_path, "*.p*"))
files.sort()
tensors = []
if loading_mode == "split":
files_split = list(np.array_split(files, dist_utils.get_world_size()))[dist_utils.get_rank()]
for filepath in files_split:
try:
tensors.append(torch.load(filepath, map_location="cpu"))
except:
logger.warning(f"Unable to load file {filepath}")
elif loading_mode == "full":
for fin in files:
tensors.append(torch.load(fin, map_location="cpu"))
elif loading_mode == "single":
tensors.append(torch.load(files[0], map_location="cpu"))
if len(tensors) == 0:
return None
tensor = torch.cat(tensors)
return tensor
class MultiDataset(torch.utils.data.Dataset):
def __init__(self, datasets):
self.datasets = datasets
self.prob = [1 / len(self.datasets) for _ in self.datasets]
self.dataset_ids = list(self.datasets.keys())
def __len__(self):
return sum([len(dataset) for dataset in self.datasets.values()])
def __getitem__(self, index):
dataset_idx = numpy.random.choice(range(len(self.prob)), 1, p=self.prob)[0]
did = self.dataset_ids[dataset_idx]
index = random.randint(0, len(self.datasets[did]) - 1)
sample = self.datasets[did][index]
sample["dataset_id"] = did
return sample
def generate_offset(self):
for dataset in self.datasets.values():
dataset.generate_offset()
def set_prob(self, coeff=0.0):
prob = np.array([float(len(dataset)) for _, dataset in self.datasets.items()])
prob /= prob.sum()
prob = np.array([p**coeff for p in prob])
prob /= prob.sum()
self.prob = prob
class Dataset(torch.utils.data.Dataset):
"""Monolingual dataset based on a list of paths"""
def __init__(self, data, chunk_length, tokenizer, opt):
self.data = data
self.chunk_length = chunk_length
self.tokenizer = tokenizer
self.opt = opt
self.generate_offset()
def __len__(self):
return (self.data.size(0) - self.offset) // self.chunk_length
def __getitem__(self, index):
start_idx = self.offset + index * self.chunk_length
end_idx = start_idx + self.chunk_length
tokens = self.data[start_idx:end_idx]
q_tokens = randomcrop(tokens, self.opt.ratio_min, self.opt.ratio_max)
k_tokens = randomcrop(tokens, self.opt.ratio_min, self.opt.ratio_max)
q_tokens = apply_augmentation(q_tokens, self.opt)
q_tokens = add_bos_eos(q_tokens, self.tokenizer.bos_token_id, self.tokenizer.eos_token_id)
k_tokens = apply_augmentation(k_tokens, self.opt)
k_tokens = add_bos_eos(k_tokens, self.tokenizer.bos_token_id, self.tokenizer.eos_token_id)
return {"q_tokens": q_tokens, "k_tokens": k_tokens}
def generate_offset(self):
self.offset = random.randint(0, self.chunk_length - 1)
class Collator(object):
def __init__(self, opt):
self.opt = opt
def __call__(self, batch_examples):
batch = defaultdict(list)
for example in batch_examples:
for k, v in example.items():
batch[k].append(v)
q_tokens, q_mask = build_mask(batch["q_tokens"])
k_tokens, k_mask = build_mask(batch["k_tokens"])
batch["q_tokens"] = q_tokens
batch["q_mask"] = q_mask
batch["k_tokens"] = k_tokens
batch["k_mask"] = k_mask
return batch
def randomcrop(x, ratio_min, ratio_max):
ratio = random.uniform(ratio_min, ratio_max)
length = int(len(x) * ratio)
start = random.randint(0, len(x) - length)
end = start + length
crop = x[start:end].clone()
return crop
def build_mask(tensors):
shapes = [x.shape for x in tensors]
maxlength = max([len(x) for x in tensors])
returnmasks = []
ids = []
for k, x in enumerate(tensors):
returnmasks.append(torch.tensor([1] * len(x) + [0] * (maxlength - len(x))))
ids.append(torch.cat((x, torch.tensor([0] * (maxlength - len(x))))))
ids = torch.stack(ids, dim=0).long()
returnmasks = torch.stack(returnmasks, dim=0).bool()
return ids, returnmasks
def add_token(x, token):
x = torch.cat((torch.tensor([token]), x))
return x
def deleteword(x, p=0.1):
mask = np.random.rand(len(x))
x = [e for e, m in zip(x, mask) if m > p]
return x
def replaceword(x, min_random, max_random, p=0.1):
mask = np.random.rand(len(x))
x = [e if m > p else random.randint(min_random, max_random) for e, m in zip(x, mask)]
return x
def maskword(x, mask_id, p=0.1):
mask = np.random.rand(len(x))
x = [e if m > p else mask_id for e, m in zip(x, mask)]
return x
def shuffleword(x, p=0.1):
count = (np.random.rand(len(x)) < p).sum()
"""Shuffles any n number of values in a list"""
indices_to_shuffle = random.sample(range(len(x)), k=count)
to_shuffle = [x[i] for i in indices_to_shuffle]
random.shuffle(to_shuffle)
for index, value in enumerate(to_shuffle):
old_index = indices_to_shuffle[index]
x[old_index] = value
return x
def apply_augmentation(x, opt):
if opt.augmentation == "mask":
return torch.tensor(maskword(x, mask_id=opt.mask_id, p=opt.prob_augmentation))
elif opt.augmentation == "replace":
return torch.tensor(
replaceword(x, min_random=opt.start_id, max_random=opt.vocab_size - 1, p=opt.prob_augmentation)
)
elif opt.augmentation == "delete":
return torch.tensor(deleteword(x, p=opt.prob_augmentation))
elif opt.augmentation == "shuffle":
return torch.tensor(shuffleword(x, p=opt.prob_augmentation))
else:
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
return x
def add_bos_eos(x, bos_token_id, eos_token_id):
if not isinstance(x, torch.Tensor):
x = torch.Tensor(x)
if bos_token_id is None and eos_token_id is not None:
x = torch.cat([x.clone().detach(), torch.tensor([eos_token_id])])
elif bos_token_id is not None and eos_token_id is None:
x = torch.cat([torch.tensor([bos_token_id]), x.clone().detach()])
elif bos_token_id is None and eos_token_id is None:
pass
else:
x = torch.cat([torch.tensor([bos_token_id]), x.clone().detach(), torch.tensor([eos_token_id])])
return x
# Used for passage retrieval
def load_passages(path):
if not os.path.exists(path):
logger.info(f"{path} does not exist")
return
logger.info(f"Loading passages from: {path}")
passages = []
with open(path) as fin:
if path.endswith(".jsonl"):
for k, line in enumerate(fin):
ex = json.loads(line)
passages.append(ex)
else:
reader = csv.reader(fin, delimiter="\t")
for k, row in enumerate(reader):
if not row[0] == "id":
ex = {"id": row[0], "title": row[2], "text": row[1]}
passages.append(ex)
return passages
|
contriever-main
|
src/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import torch
import torch.distributed as dist
class Gather(torch.autograd.Function):
@staticmethod
def forward(ctx, x: torch.tensor):
output = [torch.zeros_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(output, x)
return tuple(output)
@staticmethod
def backward(ctx, *grads):
all_gradients = torch.stack(grads)
dist.all_reduce(all_gradients)
return all_gradients[dist.get_rank()]
def gather(x: torch.tensor):
if not dist.is_initialized():
return x
x_gather = Gather.apply(x)
x_gather = torch.cat(x_gather, dim=0)
return x_gather
@torch.no_grad()
def gather_nograd(x: torch.tensor):
if not dist.is_initialized():
return x
x_gather = [torch.ones_like(x) for _ in range(dist.get_world_size())]
dist.all_gather(x_gather, x, async_op=False)
x_gather = torch.cat(x_gather, dim=0)
return x_gather
@torch.no_grad()
def varsize_gather_nograd(x: torch.Tensor):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return x
# determine max size
size = torch.tensor([x.shape[0]], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
max_size = max([size.cpu().max() for size in allsizes])
padded = torch.empty(max_size, *x.shape[1:], dtype=x.dtype, device=x.device)
padded[: x.shape[0]] = x
output = [torch.zeros_like(padded) for _ in range(dist.get_world_size())]
dist.all_gather(output, padded)
output = [tensor[: allsizes[k]] for k, tensor in enumerate(output)]
output = torch.cat(output, dim=0)
return output
@torch.no_grad()
def get_varsize(x: torch.Tensor):
"""gather tensors of different sizes along the first dimension"""
if not dist.is_initialized():
return [x.shape[0]]
# determine max size
size = torch.tensor([x.shape[0]], device=x.device, dtype=torch.int)
allsizes = [torch.zeros_like(size) for _ in range(dist.get_world_size())]
dist.all_gather(allsizes, size)
allsizes = torch.cat(allsizes)
return allsizes
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main():
return get_rank() == 0
def get_world_size():
if not dist.is_initialized():
return 1
else:
return dist.get_world_size()
def barrier():
if dist.is_initialized():
dist.barrier()
def average_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
if is_main():
x = x / dist.get_world_size()
return x
def sum_main(x):
if not dist.is_initialized():
return x
if dist.is_initialized() and dist.get_world_size() > 1:
dist.reduce(x, 0, op=dist.ReduceOp.SUM)
return x
def weighted_average(x, count):
if not dist.is_initialized():
if isinstance(x, torch.Tensor):
x = x.item()
return x, count
t_loss = torch.tensor([x * count]).cuda()
t_total = torch.tensor([count]).cuda()
t_loss = sum_main(t_loss)
t_total = sum_main(t_total)
return (t_loss / t_total).item(), t_total.item()
|
contriever-main
|
src/dist_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
import os
from collections import defaultdict
from typing import List, Dict
import numpy as np
import torch
import torch.distributed as dist
import beir.util
from beir.datasets.data_loader import GenericDataLoader
from beir.retrieval.evaluation import EvaluateRetrieval
from beir.retrieval.search.dense import DenseRetrievalExactSearch
from beir.reranking.models import CrossEncoder
from beir.reranking import Rerank
import src.dist_utils as dist_utils
from src import normalize_text
class DenseEncoderModel:
def __init__(
self,
query_encoder,
doc_encoder=None,
tokenizer=None,
max_length=512,
add_special_tokens=True,
norm_query=False,
norm_doc=False,
lower_case=False,
normalize_text=False,
**kwargs,
):
self.query_encoder = query_encoder
self.doc_encoder = doc_encoder
self.tokenizer = tokenizer
self.max_length = max_length
self.add_special_tokens = add_special_tokens
self.norm_query = norm_query
self.norm_doc = norm_doc
self.lower_case = lower_case
self.normalize_text = normalize_text
def encode_queries(self, queries: List[str], batch_size: int, **kwargs) -> np.ndarray:
if dist.is_initialized():
idx = np.array_split(range(len(queries)), dist.get_world_size())[dist.get_rank()]
else:
idx = range(len(queries))
queries = [queries[i] for i in idx]
if self.normalize_text:
queries = [normalize_text.normalize(q) for q in queries]
if self.lower_case:
queries = [q.lower() for q in queries]
allemb = []
nbatch = (len(queries) - 1) // batch_size + 1
with torch.no_grad():
for k in range(nbatch):
start_idx = k * batch_size
end_idx = min((k + 1) * batch_size, len(queries))
qencode = self.tokenizer.batch_encode_plus(
queries[start_idx:end_idx],
max_length=self.max_length,
padding=True,
truncation=True,
add_special_tokens=self.add_special_tokens,
return_tensors="pt",
)
qencode = {key: value.cuda() for key, value in qencode.items()}
emb = self.query_encoder(**qencode, normalize=self.norm_query)
allemb.append(emb.cpu())
allemb = torch.cat(allemb, dim=0)
allemb = allemb.cuda()
if dist.is_initialized():
allemb = dist_utils.varsize_gather_nograd(allemb)
allemb = allemb.cpu().numpy()
return allemb
def encode_corpus(self, corpus: List[Dict[str, str]], batch_size: int, **kwargs):
if dist.is_initialized():
idx = np.array_split(range(len(corpus)), dist.get_world_size())[dist.get_rank()]
else:
idx = range(len(corpus))
corpus = [corpus[i] for i in idx]
corpus = [c["title"] + " " + c["text"] if len(c["title"]) > 0 else c["text"] for c in corpus]
if self.normalize_text:
corpus = [normalize_text.normalize(c) for c in corpus]
if self.lower_case:
corpus = [c.lower() for c in corpus]
allemb = []
nbatch = (len(corpus) - 1) // batch_size + 1
with torch.no_grad():
for k in range(nbatch):
start_idx = k * batch_size
end_idx = min((k + 1) * batch_size, len(corpus))
cencode = self.tokenizer.batch_encode_plus(
corpus[start_idx:end_idx],
max_length=self.max_length,
padding=True,
truncation=True,
add_special_tokens=self.add_special_tokens,
return_tensors="pt",
)
cencode = {key: value.cuda() for key, value in cencode.items()}
emb = self.doc_encoder(**cencode, normalize=self.norm_doc)
allemb.append(emb.cpu())
allemb = torch.cat(allemb, dim=0)
allemb = allemb.cuda()
if dist.is_initialized():
allemb = dist_utils.varsize_gather_nograd(allemb)
allemb = allemb.cpu().numpy()
return allemb
def evaluate_model(
query_encoder,
doc_encoder,
tokenizer,
dataset,
batch_size=128,
add_special_tokens=True,
norm_query=False,
norm_doc=False,
is_main=True,
split="test",
score_function="dot",
beir_dir="BEIR/datasets",
save_results_path=None,
lower_case=False,
normalize_text=False,
):
metrics = defaultdict(list) # store final results
if hasattr(query_encoder, "module"):
query_encoder = query_encoder.module
query_encoder.eval()
if doc_encoder is not None:
if hasattr(doc_encoder, "module"):
doc_encoder = doc_encoder.module
doc_encoder.eval()
else:
doc_encoder = query_encoder
dmodel = DenseRetrievalExactSearch(
DenseEncoderModel(
query_encoder=query_encoder,
doc_encoder=doc_encoder,
tokenizer=tokenizer,
add_special_tokens=add_special_tokens,
norm_query=norm_query,
norm_doc=norm_doc,
lower_case=lower_case,
normalize_text=normalize_text,
),
batch_size=batch_size,
)
retriever = EvaluateRetrieval(dmodel, score_function=score_function)
data_path = os.path.join(beir_dir, dataset)
if not os.path.isdir(data_path) and is_main:
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
data_path = beir.util.download_and_unzip(url, beir_dir)
dist_utils.barrier()
if not dataset == "cqadupstack":
corpus, queries, qrels = GenericDataLoader(data_folder=data_path).load(split=split)
results = retriever.retrieve(corpus, queries)
if is_main:
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
for metric in (ndcg, _map, recall, precision, "mrr", "recall_cap", "hole"):
if isinstance(metric, str):
metric = retriever.evaluate_custom(qrels, results, retriever.k_values, metric=metric)
for key, value in metric.items():
metrics[key].append(value)
if save_results_path is not None:
torch.save(results, f"{save_results_path}")
elif dataset == "cqadupstack": # compute macroaverage over datasets
paths = glob.glob(data_path)
for path in paths:
corpus, queries, qrels = GenericDataLoader(data_folder=data_folder).load(split=split)
results = retriever.retrieve(corpus, queries)
if is_main:
ndcg, _map, recall, precision = retriever.evaluate(qrels, results, retriever.k_values)
for metric in (ndcg, _map, recall, precision, "mrr", "recall_cap", "hole"):
if isinstance(metric, str):
metric = retriever.evaluate_custom(qrels, results, retriever.k_values, metric=metric)
for key, value in metric.items():
metrics[key].append(value)
for key, value in metrics.items():
assert (
len(value) == 12
), f"cqadupstack includes 12 datasets, only {len(value)} values were compute for the {key} metric"
metrics = {key: 100 * np.mean(value) for key, value in metrics.items()}
return metrics
|
contriever-main
|
src/beir_utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import cv2
import numpy as np
from utils.meshutils import read_mesh, process_head_model
from utils.strandsutils import smooth_strands, downsample_strands, duplicate_strands, merge_strands
from datautils.datautils import load_bin_strands, save_bin_strands
from modules.neural_strands import NeuralStrands
def neural_interp(conf):
output_folder = os.path.join(conf['output']['dir'], conf['output']['name']) # for synthesized data
if not os.path.exists(output_folder):
os.makedirs(output_folder)
# prepressing
strands = load_bin_strands(conf['strands']['guide_strds'])
strands = smooth_strands(strands, lap_constraint=4.0, pos_constraint=2.0)
# strands = downsample_strands(strands)
# fit head model
head_mesh = read_mesh(conf['head']['head_path'])
head_texture = cv2.imread(conf['head']['head_scalp_tex'])
head_mesh, scalp_mesh, scalp_faces_idx = process_head_model(head_mesh, head_texture,
conf['head']['roots_path'],
np.array(conf['head']['target_face_base'], dtype=np.float32),
is_deformation=True)
head_write = head_mesh.export('%s/%sface_reg.ply'%(output_folder, conf['output']['name']))
neural_strands = NeuralStrands(is_resampled=False)
neural_strands.prep_strands_data(strands, head_mesh, scalp_mesh, scalp_faces_idx)
# interpolation
neural_strands.get_neural_representations(iter_opt=0)
# neural_strands.get_neural_representations(iter_opt=300, lr=1e-2) # new trained model fits very well, no need to fit again
denoised_strds_idxs = neural_strands.denoise_neural_texture(num_del_cls=0, do_denoise=False)
texel_roots_mask = cv2.imread(conf['head']['head_scalp_tex'], 2) / 255.
neural_strands.interpolation_knn(texel_roots_mask, interp_kernel_size=5, interp_neig_pts=3)
interp_strds = neural_strands.world_strands_from_texels(neural_strands.interp_neural_texture, neural_strands.interp_strds_idx_map)
# save results
save_bin_strands('%s/%s_interp.bin'%(output_folder, conf['output']['name']), interp_strds.detach().cpu().numpy().astype(np.float32))
merged_strands = merge_strands([neural_strands.original_strands, interp_strds.detach().cpu().numpy().astype(np.float32)])
merged_strands = downsample_strands(merged_strands) # TODO use neural spline with GPU to speed up.
merged_strands = duplicate_strands(merged_strands, ratio=4)
save_bin_strands('%s/%s_merged.bin'%(output_folder, conf['output']['name']), merged_strands)
print('Saving done!')
|
CT2Hair-main
|
CT2Hair/interp.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from utils.pcutils import load_pc
from utils.strandsutils import strandspc2strands, smooth_strands
from datautils.datautils import load_bin_strands, save_bin_strandspc, save_bin_strands
from modules.strands_opt import StrandsOptimizerNeuralCubic
def strands_opt(conf):
input_strands = load_bin_strands(conf['strands']['interp_strds'])
print("Load strands finished!")
# target_pc = load_pc(conf['pc']['pc_path'], load_color=False, load_normal=False)
target_pc, target_pc_colors = load_pc(conf['pc']['pc_path'], load_color=True, load_normal=False)
print("Load point cloud finished!")
strands_opt = StrandsOptimizerNeuralCubic(input_strands, target_pc, target_pc_colors[:, 0], num_strd_pts=64)
ori_splined_pts, opted_splined_pts, strands_seps, opted_strands_pc, input_num_strds_pts = strands_opt.optimization()
output_folder = os.path.join(conf['output']['dir'], conf['output']['name'])
if not os.path.exists(output_folder):
os.makedirs(output_folder)
opted_strands = smooth_strands(strandspc2strands(opted_splined_pts, sep=strands_seps))
save_bin_strands('%s/%s_opted.bin'%(output_folder, conf['output']['name']), opted_strands)
|
CT2Hair-main
|
CT2Hair/optim.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import numpy as np
import torch.utils.data as th_data
from utils.strandsutils import spline_strand, pad_strand
class TbnStrandsBinDataset(th_data.Dataset):
def __init__(self, tbn_strands, is_resampled=True, num_strds_points=100):
self.num_strands = len(tbn_strands)
self.tbn_strands = tbn_strands
self.batch_size = 300
self.num_workers = 12
self.num_strds_points = num_strds_points
self.is_resampled = is_resampled
def __len__(self):
return self.num_strands
def __getitem__(self, idx):
strand = self.tbn_strands[idx].astype(np.float32)
out_dict = {}
if not self.is_resampled:
if strand.shape[0] > self.num_strds_points:
strand = spline_strand(strand, num_strand_points=self.num_strds_points)
strand, time = pad_strand(strand, num_strand_points=self.num_strds_points)
out_dict['times'] = torch.tensor(time).float()
assert strand.shape[0] == self.num_strds_points, 'Need resample strands to a fixed number.'
# Scale unit from mm to m
strand = strand / 1000.
out_dict['points'] = torch.tensor(strand).float()
return out_dict
def get_dataloader(self):
return th_data.DataLoader(dataset=self, batch_size=self.batch_size, shuffle=False, num_workers=self.num_workers, drop_last=False)
|
CT2Hair-main
|
CT2Hair/datautils/dataloaders.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import pathlib
import struct
import numpy as np
from utils.pcutils import pc_voxelization, save_pc
def load_raw(path, raw_shape=[2048, 2048, 2048], offset=0, drop_masks=[], crop=[[0, -1], [0, -1], [0, -1]], is_downsample=True, downsample_ratio=2):
start_time = time.time()
if pathlib.Path(path).suffix == '.npy':
raw_data = np.load(path)
else:
raw_data = np.fromfile(path, dtype=np.ushort)
raw_data = raw_data[offset:]
raw_data = raw_data.reshape(raw_shape)
for i_del in range(len(drop_masks)):
drop_mask = drop_masks[i_del]
raw_data[drop_mask[0][0]:drop_mask[0][1], drop_mask[1][0]:drop_mask[1][1], drop_mask[2][0]:drop_mask[2][1]] = 0
raw_data = raw_data[crop[0][0]:crop[0][1], crop[1][0]:crop[1][1], crop[2][0]:crop[2][1]]
current_shape = np.array(raw_data.shape)
if is_downsample:
downsample_shape = current_shape // downsample_ratio
x_even = (np.arange(downsample_shape[0]) * downsample_ratio).astype(np.int16)
y_even = (np.arange(downsample_shape[1]) * downsample_ratio).astype(np.int16)
z_even = (np.arange(downsample_shape[2]) * downsample_ratio).astype(np.int16)
raw_data = raw_data[x_even]
raw_data = raw_data[:, y_even]
raw_data = raw_data[:, :, z_even]
path_ds = path.replace(pathlib.Path(path).suffix, '_ds.npy')
np.save(path_ds, raw_data)
print('Finish load the volume, used %fs. Original and final shapes are: '%(time.time() - start_time), current_shape, np.array(raw_data.shape))
return raw_data
def crop_hair(raw_data, hair_density_range):
start_time = time.time()
hair_mask = (raw_data > hair_density_range[0]) & (raw_data < hair_density_range[1])
cropped_hair = raw_data * hair_mask
print('Finish crop hair, used %fs.'%(time.time() - start_time))
return cropped_hair, hair_mask
def crop_scalp(raw_data, scalp_range):
start_time = time.time()
scalp_mask = (raw_data > scalp_range[0]) & (raw_data < scalp_range[1])
cropped_hair = raw_data * scalp_mask
print('Finish crop scalp, used %fs.'%(time.time() - start_time))
return cropped_hair, scalp_mask
def get_hair_mask(raw_data, hair_density_range):
hair_mask = (raw_data > hair_density_range[0]) & (raw_data < hair_density_range[1])
return hair_mask
def expand_vidx(vidx_x, vidx_y, vidx_z, scale_rate=3):
size = int(2 ** scale_rate)
o_x, o_y, o_z = np.meshgrid(np.linspace(0, size - 1, size),
np.linspace(0, size - 1, size),
np.linspace(0, size - 1, size))
vidx_x = vidx_x[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_y = vidx_y[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_z = vidx_z[:, None].repeat(size ** 3, axis=-1).reshape(-1, size, size, size)
vidx_x = vidx_x * size + o_x[None, ...]
vidx_y = vidx_y * size + o_y[None, ...]
vidx_z = vidx_z * size + o_z[None, ...]
vidx_x = vidx_x.reshape(-1).astype(np.uint16)
vidx_y = vidx_y.reshape(-1).astype(np.uint16)
vidx_z = vidx_z.reshape(-1).astype(np.uint16)
return vidx_x, vidx_y, vidx_z
def del_wig_net(hair_data, scalp_mesh, voxel_size, scale_rate=3):
start_time = time.time()
print('Start delete wig net...')
hair_voxel_shape = hair_data.shape
scalp_voxel_shape = np.array(hair_voxel_shape) / (2 ** scale_rate)
scalp_points = scalp_mesh.sample(100000) * (1 / voxel_size[None, :]) / (2 ** scale_rate)
vidx_x, vidx_y, vidx_z = pc_voxelization(scalp_points, scalp_voxel_shape.astype(np.uint16))
vidx_x, vidx_y, vidx_z = expand_vidx(vidx_x, vidx_y, vidx_z, scale_rate)
hair_data[vidx_x, vidx_y, vidx_z] = 0
print('Delete wig net finished, used %fs.'%(time.time() - start_time))
return hair_data
def save_raw(data, path):
data.astype('int16').tofile(path)
def get_slide(data, id=0, axis='x', range=1):
# x switch z
if axis == 'z':
slide = data[(id - range + 1):(id + range), :, :]
slide = np.sum(slide, axis=0, keepdims=False)
return slide
elif axis == 'y':
slide = data[:, (id - range + 1):(id + range), :]
slide = np.sum(slide, axis=1, keepdims=False)
return slide
elif axis == 'x':
slide = data[:, :, (id - range + 1):(id + range)]
slide = np.sum(slide, axis=2, keepdims=False)
return slide
def load_bin_strands(bin_path):
file = open(bin_path, 'rb')
num_strands = struct.unpack('i', file.read(4))[0]
strands = []
max_strds_pts = 0
for i in range(num_strands):
num_verts = struct.unpack('i', file.read(4))[0]
strand = np.zeros((num_verts, 6), dtype=np.float32)
for j in range(num_verts):
x = struct.unpack('f', file.read(4))[0]
y = struct.unpack('f', file.read(4))[0]
z = struct.unpack('f', file.read(4))[0]
nx = struct.unpack('f', file.read(4))[0]
ny = struct.unpack('f', file.read(4))[0]
nz = struct.unpack('f', file.read(4))[0]
label = struct.unpack('f', file.read(4))[0]
strand[j][0] = x
strand[j][1] = y
strand[j][2] = z
strand[j][3] = nx
strand[j][4] = ny
strand[j][5] = nz
if np.isnan(np.sum(strand)): # FIXME why did I save some nan data
continue
if num_verts < 5:
continue
if max_strds_pts < num_verts:
max_strds_pts = num_verts
strands.append(strand)
strands = np.array(strands, dtype=object)
return strands
def load_usc_data_strands(data_path):
file = open(data_path, 'rb')
num_strands = struct.unpack('i', file.read(4))[0]
strands = []
for i in range(num_strands):
num_verts = struct.unpack('i', file.read(4))[0]
strand = np.zeros((num_verts, 3), dtype=np.float32)
for j in range(num_verts):
x = struct.unpack('f', file.read(4))[0]
y = struct.unpack('f', file.read(4))[0]
z = struct.unpack('f', file.read(4))[0]
strand[j][0] = x
strand[j][1] = y
strand[j][2] = z
if num_verts <= 1:
continue
if np.isnan(np.sum(strand)):
continue
strands.append(strand)
strands = np.array(strands, dtype=object)
return strands
def save_bin_strands(filepath, strands, tangents=None):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
if tangents is None:
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
else:
file.write(struct.pack('f', tangents[i_strand][j_point, 0]))
file.write(struct.pack('f', tangents[i_strand][j_point, 1]))
file.write(struct.pack('f', tangents[i_strand][j_point, 2]))
file.write(struct.pack('f', 0.0))
# save strands with colors (PCA features mapping)
def save_color_strands(filepath, strands, colors=None):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
if colors is not None:
file.write(struct.pack('f', colors[i_strand, 0]))
file.write(struct.pack('f', colors[i_strand, 1]))
file.write(struct.pack('f', colors[i_strand, 2]))
else:
assert strands[i_strand].shape[1] == 6, 'DataUtils::DataUtils No color of strands.'
file.write(struct.pack('f', strands[i_strand][j_point, 3]))
file.write(struct.pack('f', strands[i_strand][j_point, 4]))
file.write(struct.pack('f', strands[i_strand][j_point, 5]))
file.write(struct.pack('f', 0.0))
def save_bin_strandspc(filepath, pc, sep, tangents=None):
num_strands = len(sep)
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
point_count = 0
for i_strand in range(num_strands):
num_points = int(sep[i_strand])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', pc[point_count, 0]))
file.write(struct.pack('f', pc[point_count, 1]))
file.write(struct.pack('f', pc[point_count, 2]))
if tangents is None:
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
file.write(struct.pack('f', 0.0))
else:
file.write(struct.pack('f', tangents[point_count, 0]))
file.write(struct.pack('f', tangents[point_count, 1]))
file.write(struct.pack('f', tangents[point_count, 2]))
file.write(struct.pack('f', 0.0))
point_count += 1
def merge_save_bin_strands(filepath, strands_list, tangents_list=None):
num_all_strands = 0
num_groups = len(strands_list)
for i_group in range(num_groups):
num_all_strands += strands_list[i_group].shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_all_strands))
for i_group in range(num_groups):
strands = strands_list[i_group]
num_strands = strands.shape[0]
if tangents_list is None:
tangents = strands
else:
tangents = tangents_list[i_group]
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
file.write(struct.pack('f', tangents[i_strand][j_point, 0]))
file.write(struct.pack('f', tangents[i_strand][j_point, 1]))
file.write(struct.pack('f', tangents[i_strand][j_point, 2]))
file.write(struct.pack('f', 0.0))
def save_usc_strands(filepath, strands):
num_strands = strands.shape[0]
file = open(filepath, 'wb')
file.write(struct.pack('i', num_strands))
for i_strand in range(num_strands):
num_points = int(strands[i_strand].shape[0])
file.write(struct.pack('i', num_points))
for j_point in range(num_points):
file.write(struct.pack('f', strands[i_strand][j_point, 0]))
file.write(struct.pack('f', strands[i_strand][j_point, 1]))
file.write(struct.pack('f', strands[i_strand][j_point, 2]))
|
CT2Hair-main
|
CT2Hair/datautils/datautils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import splines
import torch
import numpy as np
from tqdm import tqdm
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import spsolve
from utils.pcutils import get_bbox
def scale_roots_positions(roots_points, scale_ratio):
_, bbox_center = get_bbox(roots_points)
temp_points = (roots_points - bbox_center) * scale_ratio + bbox_center
roots_points = temp_points
return roots_points
def get_roots_normals(roots_points):
_, bbox_center = get_bbox(roots_points)
normals = roots_points - bbox_center
normals = normals / np.linalg.norm(normals, axis=1)[:, None]
return normals
def get_strand_length(strand):
delta = strand[:-1] - strand[1:]
delta_length = np.sqrt(np.sum(delta**2, axis=1, keepdims=False))
length = np.sum(delta_length, axis=0, keepdims=False)
return length, delta_length
def get_strands_length(strands):
deltas = strands[:, :-1] - strands[:, 1:]
if torch.torch.is_tensor(strands):
delta_lengths = torch.sqrt(torch.sum(deltas**2, dim=2, keepdim=False))
lengths = torch.sum(delta_lengths, dim=1, keepdim=False)
else:
delta_lengths = np.sqrt(np.sum(deltas**2, axis=2, keepdims=False))
lengths = np.sum(delta_lengths, axis=1, keepdims=False)
return lengths, delta_lengths
def get_strands_roots(strands, scale_ratio=1.0):
roots = []
num_strands = strands.shape[0]
for i_strand in range(num_strands):
roots.append(strands[i_strand][0][:3])
points = np.array(roots)
if not scale_ratio == 1.0:
points = scale_roots_positions(points, scale_ratio)
normals = get_roots_normals(points)
return points, normals
def line_interpolate(start_point, end_point, interp_count):
interped_points = []
if interp_count == 0:
return interped_points
delta = end_point - start_point
delta_length = math.sqrt(np.sum(delta**2, axis=0, keepdims=True))
step_dir = delta / delta_length
step_size = delta_length / (interp_count + 1)
for i in range(interp_count):
interped_points.append(start_point + step_dir * (i + 1) * step_size)
return interped_points
def resample_strand(strand, tangents=None, num_strand_points=200):
num_ori_points = strand.shape[0]
assert num_ori_points < num_strand_points, "number of resampled points must larger than the original one"
strand_length, delta_length = get_strand_length(strand)
step_length = strand_length / (num_strand_points - 1)
resampled_strand = []
if tangents is None:
interp_idxs = np.where(delta_length > step_length)[0]
interp_segs = delta_length[interp_idxs]
interp_segs_rank_idxs = np.argsort(-1 * interp_segs)
new_step_length = np.sum(interp_segs) / (num_strand_points - (num_ori_points - interp_idxs.shape[0]))
interp_counts = np.clip((interp_segs / new_step_length).astype(np.int16) - 1, 0, num_strand_points - 1) # supposed to always be postive or zero
interp_counts_sum = np.sum(interp_counts, axis=0, keepdims=False) # supposed to always less than num_strand_points
assert interp_counts_sum + num_ori_points <= num_strand_points, "utils:strandsutils.py, FIXME, strand resample error, Interp counts: %d, Original Counts: %d"%(interp_counts_sum, num_ori_points)
num_ext_interp = num_strand_points - num_ori_points - interp_counts_sum
ext_interp_segs = interp_segs_rank_idxs[:num_ext_interp]
interp_counts[ext_interp_segs] += 1 # Interpolate one more point in this segs
interp_delta_count = 0
for i_delta in range(num_ori_points - 1):
resampled_strand.append(strand[i_delta])
if delta_length[i_delta] > step_length:
interped_points = line_interpolate(strand[i_delta], strand[i_delta + 1], interp_counts[interp_delta_count])
resampled_strand.extend(interped_points)
interp_delta_count += 1
resampled_strand.append(strand[num_ori_points - 1])
resampled_strand = np.array(resampled_strand)
assert resampled_strand.shape[0] == 200, "interpolation failed, number of resampled: %d."%(resampled_strand.shape[0])
return resampled_strand
def augment_strand(strand, aug_config):
if aug_config["rotation_z_max_angle"] > 0:
theta_z = aug_config["rotation_z_max_angle"]
rtheta = (np.random.rand() * 2. - 1.) * theta_z * np.pi / 180.
rot_mat = np.asarray([[np.cos(rtheta), -np.sin(rtheta), 0.],
[np.sin(rtheta), np.cos(rtheta), 0.],
[ 0., 0., 1.]], dtype=np.float32)
strand = (rot_mat[:, :] @ strand.T).T
if np.sum(aug_config["random_stretch_xyz_magnitude"]) > 0:
sc = np.random.rand(3) * 2 - 1
sc = 1 + np.asarray(aug_config["random_stretch_xyz_magnitude"]) * sc
strand = strand * sc
return strand
def spline_strand(strand, num_strand_points=100):
num_ori_points = strand.shape[0]
interp_spline = splines.CatmullRom(strand)
interp_idx = np.arange(num_strand_points) / (num_strand_points / (num_ori_points - 1))
interp_strand = interp_spline.evaluate(interp_idx)
assert interp_strand.shape[0] == num_strand_points, "Spline error."
return interp_strand
def pad_strand(strand, num_strand_points=100):
num_ori_points = strand.shape[0]
if num_ori_points > num_strand_points:
return strand[:num_strand_points]
num_pad = num_strand_points - num_ori_points
last_delta = strand[-1] - strand[-2]
offsets = np.arange(num_pad) + 1
offsets = offsets[:, None]
last_delta = last_delta[None, :]
offsets = offsets * last_delta
# padded_strand = np.zeros_like(offsets) + strand[-1]
padded_strand = offsets + strand[-1]
padded_strand = np.concatenate((strand, padded_strand), axis=0)
ori_time = np.linspace(0, 1, num_ori_points)
strd_len, delta_len = get_strand_length(strand) # modify time by length
ori_time[1:] = delta_len / strd_len
ori_time = np.add.accumulate(ori_time)
padded_time = 1. + (np.arange(num_pad) + 1) * (1. / num_ori_points)
padded_time = np.concatenate((ori_time, padded_time), axis=0)
return padded_strand, padded_time
def tridiagonal_solve(b, A_upper, A_diagonal, A_lower):
A_upper, _ = torch.broadcast_tensors(A_upper[:, None, :], b[..., :-1])
A_lower, _ = torch.broadcast_tensors(A_lower[:, None, :], b[..., :-1])
A_diagonal, b = torch.broadcast_tensors(A_diagonal[:, None, :], b)
channels = b.size(-1)
new_b = np.empty(channels, dtype=object)
new_A_diagonal = np.empty(channels, dtype=object)
outs = np.empty(channels, dtype=object)
new_b[0] = b[..., 0]
new_A_diagonal[0] = A_diagonal[..., 0]
for i in range(1, channels):
w = A_lower[..., i - 1] / new_A_diagonal[i - 1]
new_A_diagonal[i] = A_diagonal[..., i] - w * A_upper[..., i - 1]
new_b[i] = b[..., i] - w * new_b[i - 1]
outs[channels - 1] = new_b[channels - 1] / new_A_diagonal[channels - 1]
for i in range(channels - 2, -1, -1):
outs[i] = (new_b[i] - A_upper[..., i] * outs[i + 1]) / new_A_diagonal[i]
return torch.stack(outs.tolist(), dim=-1)
def cubic_spline_coeffs(t, x):
# x should be a tensor of shape (..., length)
# Will return the b, two_c, three_d coefficients of the derivative of the cubic spline interpolating the path.
length = x.size(-1)
if length < 2:
# In practice this should always already be caught in __init__.
raise ValueError("Must have a time dimension of size at least 2.")
elif length == 2:
a = x[..., :1]
b = (x[..., 1:] - x[..., :1]) / (t[..., 1:] - t[..., :1])
two_c = torch.zeros(*x.shape[:-1], 1, dtype=x.dtype, device=x.device)
three_d = torch.zeros(*x.shape[:-1], 1, dtype=x.dtype, device=x.device)
else:
# Set up some intermediate values
time_diffs = t[..., 1:] - t[..., :-1]
time_diffs_reciprocal = time_diffs.reciprocal()
time_diffs_reciprocal_squared = time_diffs_reciprocal ** 2
three_path_diffs = 3 * (x[..., 1:] - x[..., :-1])
six_path_diffs = 2 * three_path_diffs
path_diffs_scaled = three_path_diffs * time_diffs_reciprocal_squared[:, None, :]
# Solve a tridiagonal linear system to find the derivatives at the knots
system_diagonal = torch.empty((x.shape[0], length), dtype=x.dtype, device=x.device)
system_diagonal[..., :-1] = time_diffs_reciprocal
system_diagonal[..., -1] = 0
system_diagonal[..., 1:] += time_diffs_reciprocal
system_diagonal *= 2
system_rhs = torch.empty_like(x)
system_rhs[..., :-1] = path_diffs_scaled
system_rhs[..., -1] = 0
system_rhs[..., 1:] += path_diffs_scaled
knot_derivatives = tridiagonal_solve(system_rhs, time_diffs_reciprocal,
system_diagonal, time_diffs_reciprocal)
# Do some algebra to find the coefficients of the spline
time_diffs_reciprocal = time_diffs_reciprocal[:, None, :]
time_diffs_reciprocal_squared = time_diffs_reciprocal_squared[:, None, :]
a = x[..., :-1]
b = knot_derivatives[..., :-1]
two_c = (six_path_diffs * time_diffs_reciprocal
- 4 * knot_derivatives[..., :-1]
- 2 * knot_derivatives[..., 1:]) * time_diffs_reciprocal
three_d = (-six_path_diffs * time_diffs_reciprocal
+ 3 * (knot_derivatives[..., :-1]
+ knot_derivatives[..., 1:])) * time_diffs_reciprocal_squared
return a, b, two_c, three_d
def natural_cubic_spline_coeffs(t, x):
a, b, two_c, three_d = cubic_spline_coeffs(t, x.transpose(-1, -2))
# These all have shape (..., length - 1, channels)
a = a.transpose(-1, -2)
b = b.transpose(-1, -2)
c = two_c.transpose(-1, -2) / 2
d = three_d.transpose(-1, -2) / 3
return t, a, b, c, d
class NaturalCubicSpline:
def __init__(self, coeffs, **kwargs):
super(NaturalCubicSpline, self).__init__(**kwargs)
t, a, b, c, d = coeffs
self._t = t
self._a = a
self._b = b
self._c = c
self._d = d
def evaluate(self, t):
maxlen = self._b.size(-2) - 1
inners = torch.zeros((t.shape[0], t.shape[1], 3)).to(t.device)
for i_b in range(self._t.shape[0]):
index = torch.bucketize(t.detach()[i_b], self._t[i_b]) - 1
index = index.clamp(0, maxlen) # clamp because t may go outside of [t[0], t[-1]]; this is fine
# will never access the last element of self._t; this is correct behaviour
fractional_part = t[i_b] - self._t[i_b][index]
fractional_part = fractional_part.unsqueeze(-1)
inner = self._c[i_b, index, :] + self._d[i_b, index, :] * fractional_part
inner = self._b[i_b, index, :] + inner * fractional_part
inner = self._a[i_b, index, :] + inner * fractional_part
inners[i_b] = inner
return inners
def derivative(self, t, order=1):
fractional_part, index = self._interpret_t(t)
fractional_part = fractional_part.unsqueeze(-1)
if order == 1:
inner = 2 * self._c[..., index, :] + 3 * self._d[..., index, :] * fractional_part
deriv = self._b[..., index, :] + inner * fractional_part
elif order == 2:
deriv = 2 * self._c[..., index, :] + 6 * self._d[..., index, :] * fractional_part
else:
raise ValueError('Derivative is not implemented for orders greater than 2.')
return deriv
# post-processing
def merge_strands(strands_list):
strands_all = []
for strands in strands_list:
for i_strand in range(strands.shape[0]):
strands_all.append(strands[i_strand])
strands_all = np.array(strands_all, dtype=object)
return strands_all
def strandspc2strands(strandspc, sep):
num_strands = len(sep)
strands = []
num_pts = 0
for i_strand in range(num_strands):
strands.append(strandspc[num_pts : num_pts + int(sep[i_strand])])
num_pts += sep[i_strand]
strands = np.array(strands, dtype=object)
return strands
def smnooth_strand(strand, lap_constraint=2.0, pos_constraint=1.0, fix_tips=False):
num_pts = strand.shape[0]
num_value = num_pts * 3 - 2 + num_pts
smoothed_strand = np.copy(strand)
# construct laplacian sparse matrix
i, j, v = np.zeros(num_value, dtype=np.int16), np.zeros(num_value, dtype=np.int16), np.zeros(num_value)
i[0], i[1], i[2 + (num_pts - 2) * 3], i[2 + (num_pts - 2) * 3 + 1] = 0, 0, num_pts - 1, num_pts - 1
i[2 : num_pts * 3 - 4] = np.repeat(np.arange(1, num_pts - 1), 3)
i[num_pts * 3 - 2:] = np.arange(num_pts) + num_pts
j[0], j[1], j[2 + (num_pts - 2) * 3], j[2 + (num_pts - 2) * 3 + 1] = 0, 1, num_pts - 2, num_pts - 1
j[2 : num_pts * 3 - 4] = np.repeat(np.arange(1, num_pts - 1), 3) \
+ np.repeat(np.array([-1, 0, 1], dtype=np.int16), num_pts - 2).reshape(num_pts - 2, 3, order='F').ravel()
j[num_pts * 3 - 2:] = np.arange(num_pts)
v[0], v[1], v[2 + (num_pts - 2) * 3], v[2 + (num_pts - 2) * 3 + 1] = 1, -1, -1, 1
v[2 : num_pts * 3 - 4] = np.repeat(np.array([-1, 2, -1], dtype=np.int16), num_pts - 2).reshape(num_pts - 2, 3, order='F').ravel()
v = v * lap_constraint
v[num_pts * 3 - 2:] = pos_constraint
A = coo_matrix((v, (i, j)), shape=(num_pts * 2, num_pts))
At = A.transpose()
AtA = At.dot(A)
# solving
for j_axis in range(3):
b = np.zeros(num_pts * 2)
b[num_pts:] = smoothed_strand[:, j_axis] * pos_constraint
Atb = At.dot(b)
x = spsolve(AtA, Atb)
smoothed_strand[:, j_axis] = x[:num_pts]
if fix_tips:
strand[1:-1] = smoothed_strand[1:-1]
else:
strand = smoothed_strand
return strand
def smooth_strands(strands, lap_constraint=2.0, pos_constraint=1.0, fix_tips=False):
loop = tqdm(range(strands.shape[0]))
loop.set_description("Smoothing strands")
for i_strand in loop:
strands[i_strand] = smnooth_strand(strands[i_strand], lap_constraint, pos_constraint, fix_tips)
return strands
def downsample_strands(strands, min_num_pts=5, tgt_num_pts=64):
loop = tqdm(range(strands.shape[0]))
loop.set_description("Downsampling strands points")
for i_strand in loop:
num_pts = strands[i_strand].shape[0]
downsampled_strand = np.copy(strands[i_strand][:, 0:3])
if num_pts <= 2:
pass
elif num_pts > 2 and num_pts < min_num_pts:
interp_pts = (downsampled_strand[:-1] + downsampled_strand[1:]) / 2.
interp_strand = np.zeros((num_pts * 2 - 1, 3))
interp_strand[::2] = downsampled_strand
interp_strand[1::2] = interp_pts
downsampled_strand = interp_strand
elif num_pts > min_num_pts and num_pts < tgt_num_pts:
pass
else:
interp_spline = splines.CatmullRom(downsampled_strand)
interp_idx = np.arange(tgt_num_pts) / (tgt_num_pts / (num_pts - 1))
downsampled_strand = interp_spline.evaluate(interp_idx)
strands[i_strand] = downsampled_strand
return strands
def duplicate_strands(strands, ratio=5, perturation=1.0):
loop = tqdm(range(strands.shape[0]))
loop.set_description('Duplicating strands')
duplicated_strands_list = []
for i_strand in loop:
strand = strands[i_strand][:, 0:3]
num_pts = strand.shape[0]
duplicated_strands = np.repeat(strand.reshape(1, num_pts, 3), ratio, axis=0)
start_tangent = strand[1] - strand[0]
offsets = np.random.rand(ratio, 3)
offsets[:, 2] = -(offsets[:, 0] * start_tangent[0] + offsets[:, 1] * start_tangent[1]) / (start_tangent[2] + 1e-6)
offsets = offsets / np.linalg.norm(offsets, axis=1, keepdims=True)
offsets[0] *= 0
scale_ratio = np.random.rand(ratio, 1) * perturation + perturation
offsets = offsets * scale_ratio
offsets = np.repeat(offsets.reshape(ratio, 1, 3), num_pts, axis=1)
duplicated_strands = duplicated_strands + offsets
for j in range(ratio):
duplicated_strands_list.append(duplicated_strands[j])
strands = np.array(duplicated_strands_list, dtype=object)
return strands
|
CT2Hair-main
|
CT2Hair/utils/strandsutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import copy
import igl
import trimesh
import numpy as np
from scipy.spatial.transform import Rotation as R
from datautils.datautils import save_bin_strands
from utils.pcutils import load_pc
from utils.utils import translate2mat, homo_rot_mat
def read_mesh(mesh_path):
mesh = trimesh.load(mesh_path, force='mesh', process=True)
return mesh
def write_mesh(mesh_path, mesh):
mesh.export(mesh_path)
def quad2trimesh(quad_faces, vertices):
assert quad_faces.shape[1] == 4, "Mesh is not a quad mesh."
num_quad_faces = quad_faces.shape[0]
num_tri_faces = num_quad_faces * 2
tri_faces = np.zeros((num_tri_faces, 3), dtype=np.uint32)
tri_faces[::2] = quad_faces[:, [0, 1, 2]]
tri_faces[1::2] = quad_faces[:, [0, 2, 3]]
return trimesh.Trimesh(vertices=vertices, faces=tri_faces)
def vertices_pairwise_dis(vertices):
inner_vertices = -2 * (vertices @ vertices.T)
vertices_2 = np.sum(vertices**2, axis=1, keepdims=True)
pairwise_dis = vertices_2 + inner_vertices + vertices_2.T
return pairwise_dis
def mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth=True, smooth_iterations=10, thres_min_movement=10.0):
target_kdtree = target_pc.kdtree
v = head_mesh.vertices
f = head_mesh.faces
u = v.copy()
num_vertices = v.shape[0]
dis, idx = target_kdtree.query(v, 1)
s = np.zeros(num_vertices)
for i_face in range(head_mesh.faces.shape[0]):
if scalp_faces_mask[i_face]:
for i_v in range(3):
v_idx = f[i_face, i_v]
if dis[v_idx] <= thres_min_movement:
s[v_idx] = 1
b = np.array([[t[0] for t in [(i, s[i]) for i in range(0, v.shape[0])] if t[1] > 0]]).T
# Boundary conditions directly on deformed positions
u_bc = np.zeros((b.shape[0], v.shape[1]))
v_bc = np.zeros((b.shape[0], v.shape[1]))
for bi in range(b.shape[0]):
v_bc[bi] = v[b[bi]]
offset = target_pc.vertices[idx[b[bi]]] - v[b[bi]]
u_bc[bi] = v[b[bi]] + offset
u_bc_anim = v_bc + (u_bc - v_bc)
d_bc = u_bc_anim - v_bc
d = igl.harmonic_weights(v, f, b.astype(f.dtype), d_bc, 1)
u = v + d
head_mesh.vertices = u
if smooth:
smoothe_head_mesh = copy.deepcopy(head_mesh)
trimesh.smoothing.filter_mut_dif_laplacian(smoothe_head_mesh, iterations=smooth_iterations)
# trimesh.smoothing.filter_laplacian(head_mesh, iterations=smooth_iterations)
head_mesh.vertices[head_mesh.faces[scalp_faces_mask]] = smoothe_head_mesh.vertices[head_mesh.faces[scalp_faces_mask]]
return head_mesh
def get_alignment_matrix(head_mesh, head_texture, target_roots_pc_path, target_face_base):
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
num_faces = head_mesh.faces.shape[0]
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
scalp_sampled_points = scalp_mesh.sample(50000)
target_points, target_normals = load_pc(target_roots_pc_path, load_color=False, load_normal=True)
source_pc = trimesh.points.PointCloud(scalp_sampled_points)
target_pc = trimesh.points.PointCloud(target_points)
trans_mat = np.eye(4)
# align bound sphere size
scale_ratio = math.pow(target_pc.bounding_sphere.volume / source_pc.bounding_sphere.volume, 1./3.)
scalp_sampled_points = scalp_sampled_points * scale_ratio
trans_offset = [0., 0., 0.] - (source_pc.centroid * scale_ratio)
scalp_sampled_points += trans_offset
trans_mat[0:3] = trans_mat[0:3] * scale_ratio
trans_mat = translate2mat(trans_offset) @ trans_mat
# base rotate to original coord
base_rot = R.from_euler('xyz', [[0., 0., 0.]]) # MannequinHeadB
base_rot_mat = base_rot.as_matrix()[0]
scalp_sampled_points = np.dot(base_rot_mat, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(base_rot_mat) @ trans_mat
# change of basis
# target_face_base = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1]])
target_face_base_inv = target_face_base.T
scalp_sampled_points = np.dot(target_face_base_inv, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(target_face_base_inv) @ trans_mat
# move to same center with target
scalp_sampled_points += target_pc.centroid
trans_mat = translate2mat(target_pc.centroid) @ trans_mat
# registration
reg_mat, reg_points, cost = trimesh.registration.icp(scalp_sampled_points, target_points)
trans_mat = reg_mat @ trans_mat
return trans_mat
def process_head_model(head_mesh, head_texture, target_roots_pc_path, target_face_base, is_deformation=True):
print('Utils::MeshUtils Start processing head model (registration & deformation)...')
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
num_faces = head_mesh.faces.shape[0]
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
scalp_sampled_points = scalp_mesh.sample(50000)
target_points = load_pc(target_roots_pc_path, load_color=False, load_normal=False)
source_pc = trimesh.points.PointCloud(scalp_sampled_points)
target_pc = trimesh.points.PointCloud(target_points)
trans_mat = np.eye(4)
# align bound sphere size
scale_ratio = math.pow(target_pc.bounding_sphere.volume / source_pc.bounding_sphere.volume, 1./3.)
scalp_sampled_points = scalp_sampled_points * scale_ratio
trans_offset = [0., 0., 0.] - (source_pc.centroid * scale_ratio)
scalp_sampled_points += trans_offset
trans_mat = translate2mat(trans_offset) @ trans_mat
# base rotate to original coord
# base_rot = R.from_euler('yzx', [[211. / 180. * np.pi, -8. / 180. * np.pi, 0.]]) # Mugsy Head
# base_rot = R.from_euler('xzy', [[180. / 180. * np.pi, 2. / 180. * np.pi, 3. / 180. * np.pi]]) # old MannequinHeadA
base_rot = R.from_euler('xyz', [[0., 0., 0.]]) # MannequinHeadA and B
base_rot_mat = base_rot.as_matrix()[0]
scalp_sampled_points = np.dot(base_rot_mat, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(base_rot_mat) @ trans_mat
# change of basis
# target_face_base = np.array([[0., 1., 0.], [1., 0., 0.], [0., 0., -1]])
target_face_base_inv = target_face_base.T
scalp_sampled_points = np.dot(target_face_base_inv, scalp_sampled_points.T).T
trans_mat = homo_rot_mat(target_face_base_inv) @ trans_mat
# move to same center with target
scalp_sampled_points += target_pc.centroid
trans_mat = translate2mat(target_pc.centroid) @ trans_mat
# registration
reg_mat, reg_points, cost = trimesh.registration.icp(scalp_sampled_points, target_points) # type: ignore (for avoid pyplace error report)
trans_mat = reg_mat @ trans_mat
# apply transformatioin to the head model
head_mesh.apply_scale(scale_ratio)
head_mesh.apply_transform(trans_mat)
# head_mesh.export('temp/reg_head.ply')
if is_deformation:
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth=False)
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask)
head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth_iterations=12, thres_min_movement=24)
# head_mesh = mesh_deformation(head_mesh, target_pc, scalp_faces_mask, smooth_iterations=6, thres_min_movement=24)
# head_mesh.export('temp/smoothed_deform_reg_head.ply')
# sew vertices
sewed_v = head_mesh.vertices.copy()
for i_v in range(UV_bound_vertices[0].shape[0]):
sewed_v[UV_bound_vertices[0][i_v]] = (head_mesh.vertices[UV_bound_vertices[0][i_v]] + head_mesh.vertices[UV_bound_vertices[1][i_v]]) / 2.
sewed_v[UV_bound_vertices[1][i_v]] = (head_mesh.vertices[UV_bound_vertices[0][i_v]] + head_mesh.vertices[UV_bound_vertices[1][i_v]]) / 2.
head_mesh.vertices = sewed_v
# compute transed & registered & deformed scalp mesh again
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
print('Utils::MeshUtils End processing.')
return head_mesh, scalp_mesh, scalp_faces_idx
def seg_head_model(head_mesh, head_texture):
uv_coords = head_mesh.visual.uv # num_vertices X 2
head_tex_width, head_tex_height, _ = head_texture.shape
head_mesh_pairwise_dis = vertices_pairwise_dis(head_mesh.vertices)
head_mesh_eye = np.eye(head_mesh_pairwise_dis.shape[0])
head_mesh_pairwise_dis = head_mesh_pairwise_dis + head_mesh_eye
UV_bound_vertices = np.where(head_mesh_pairwise_dis < 1e-4) # boundary vertices in UV
# for each face determiner whether it is scalp
face_uv_coords = uv_coords[head_mesh.faces] * [head_tex_height, head_tex_width]
face_uv_coords = np.around(face_uv_coords).astype(np.uint16)
face_uv_coords = np.clip(face_uv_coords, [0, 1], [head_tex_width - 1, head_tex_height])
face_uv_colors = head_texture[head_tex_height - face_uv_coords[:, :, 1], face_uv_coords[:, :, 0], :]
face_avg_colors = np.sum(face_uv_colors, axis=1, keepdims=False)
scalp_faces_mask = face_avg_colors[:, 0] > 255 * 0.3
scalp_faces_idx = np.where(face_avg_colors[:, 0] > 255 * 0.3)[0]
scalp_mesh = copy.deepcopy(head_mesh)
scalp_mesh.update_faces(scalp_faces_mask)
scalp_mesh.remove_unreferenced_vertices()
return head_mesh, scalp_mesh, scalp_faces_idx
import torch
from typing import Union, Tuple
from trimesh import Trimesh
# from trimesh.proximity import closest_point # Too slow
from trimesh.triangles import points_to_barycentric
def closest_point_barycentrics(v, vi, points, filtering=False, filter_dis_thres=2.):
"""Given a 3D mesh and a set of query points, return closest point barycentrics
Args:
v: np.array (float)
[N, 3] mesh vertices
vi: np.array (int)
[N, 3] mesh triangle indices
points: np.array (float)
[M, 3] query points
Returns:
Tuple[approx, barys, interp_idxs, face_idxs]
approx: [M, 3] approximated (closest) points on the mesh
barys: [M, 3] barycentric weights that produce "approx"
interp_idxs: [M, 3] vertex indices for barycentric interpolation
face_idxs: [M] face indices for barycentric interpolation. interp_idxs = vi[face_idxs]
"""
mesh = Trimesh(vertices=v, faces=vi)
# p, distances, face_idxs = closest_point(mesh, points) # Slow, Change to IGL
sqr_distances, face_idxs, p = igl.point_mesh_squared_distance(points, mesh.vertices, mesh.faces) # type: ignore for avoiding pylance error
if filtering:
valid_q_idx = np.where(np.sqrt(sqr_distances) < filter_dis_thres)[0]
p = p[valid_q_idx]
face_idxs = face_idxs[valid_q_idx]
else:
valid_q_idx = np.arange(p.shape[0])
barys = points_to_barycentric(mesh.triangles[face_idxs], p)
b0, b1, b2 = np.split(barys, 3, axis=1)
interp_idxs = vi[face_idxs]
v0 = v[interp_idxs[:, 0]]
v1 = v[interp_idxs[:, 1]]
v2 = v[interp_idxs[:, 2]]
approx = b0 * v0 + b1 * v1 + b2 * v2
return approx, barys, interp_idxs, face_idxs, valid_q_idx
def make_closest_uv_barys(
vt: torch.Tensor,
vti: torch.Tensor,
uv_shape: Union[Tuple[int, int], int],
flip_uv: bool = True,
):
"""Compute a UV-space barycentric map where each texel contains barycentric
coordinates for the closest point on a UV triangle.
Args:
vt: torch.Tensor
Texture coordinates. Shape = [n_texcoords, 2]
vti: torch.Tensor
Face texture coordinate indices. Shape = [n_faces, 3]
uv_shape: Tuple[int, int] or int
Shape of the texture map. (HxW)
flip_uv: bool
Whether or not to flip UV coordinates along the V axis (OpenGL -> numpy/pytorch convention).
Returns:
torch.Tensor: index_img: Face index image, shape [uv_shape[0], uv_shape[1]]
torch.Tensor: Barycentric coordinate map, shape [uv_shape[0], uv_shape[1], 3]Â
"""
if isinstance(uv_shape, int):
uv_shape = (uv_shape, uv_shape)
if flip_uv:
# Flip here because texture coordinates in some of our topo files are
# stored in OpenGL convention with Y=0 on the bottom of the texture
# unlike numpy/torch arrays/tensors.
vt = vt.clone()
vt[:, 1] = 1 - vt[:, 1]
# Texel to UV mapping (as per OpenGL linear filtering)
# https://www.khronos.org/registry/OpenGL/specs/gl/glspec46.core.pdf
# Sect. 8.14, page 261
# uv=(0.5,0.5)/w is at the center of texel [0,0]
# uv=(w-0.5, w-0.5)/w is the center of texel [w-1,w-1]
# texel = floor(u*w - 0.5)
# u = (texel+0.5)/w
uv_grid = torch.meshgrid(
torch.linspace(0.5, uv_shape[0] - 1 + 0.5, uv_shape[0]) / uv_shape[0],
torch.linspace(0.5, uv_shape[1] - 1 + 0.5, uv_shape[1]) / uv_shape[1], indexing='ij') # HxW, v,u
uv_grid = torch.stack(uv_grid[::-1], dim=2) # HxW, u, v
uv = uv_grid.reshape(-1, 2).data.to("cpu").numpy()
vth = np.hstack((vt, vt[:, 0:1] * 0 + 1))
uvh = np.hstack((uv, uv[:, 0:1] * 0 + 1))
approx, barys, interp_idxs, face_idxs, _ = closest_point_barycentrics(vth, vti, uvh)
index_img = torch.from_numpy(face_idxs.reshape(uv_shape[0], uv_shape[1])).long()
bary_img = torch.from_numpy(barys.reshape(uv_shape[0], uv_shape[1], 3)).float()
return index_img, bary_img
def compute_tbn_uv(tri_xyz, tri_uv, eps=1e-5):
"""Compute tangents, bitangents, normals.
Args:
tri_xyz: [B,N,3,3] vertex coordinates
tri_uv: [B,N,3,2] texture coordinates
Returns:
tangents, bitangents, normals
"""
v01 = tri_xyz[:, :, 1] - tri_xyz[:, :, 0]
v02 = tri_xyz[:, :, 2] - tri_xyz[:, :, 0]
normals = torch.cross(v01, v02, dim=-1)
normals = normals / torch.norm(normals, dim=-1, keepdim=True).clamp(min=eps)
vt01 = tri_uv[:, :, 1] - tri_uv[:, :, 0]
vt02 = tri_uv[:, :, 2] - tri_uv[:, :, 0]
f = 1.0 / (vt01[..., 0] * vt02[..., 1] - vt01[..., 1] * vt02[..., 0])
tangents = f[..., np.newaxis] * (
v01 * vt02[..., 1][..., np.newaxis] - v02 * vt01[..., 1][..., np.newaxis])
tangents = tangents / torch.norm(tangents, dim=-1, keepdim=True).clamp(min=eps)
bitangents = torch.cross(normals, tangents, dim=-1)
bitangents = bitangents / torch.norm(bitangents, dim=-1, keepdim=True).clamp(min=eps)
return tangents, bitangents, normals
def strands_world2tbn(strands, head_mesh, scalp_mesh, scalp_faces_idx):
# print('Utils::MeshUtils Convert strands to TBN space...')
num_strands = strands.shape[0]
# get all roots points
roots_pc = []
for i_strand in range(num_strands):
roots_pc.append(strands[i_strand][0, 0:3])
roots_pc = np.array(roots_pc)
approx, barys, interp_idxs, faces_idxs, valid_q_idxs = closest_point_barycentrics(scalp_mesh.vertices, scalp_mesh.faces, roots_pc, filtering=True, filter_dis_thres=6.4) # 3.6 -> 6.4, 7.2
valid_strands = strands[valid_q_idxs]
# invalid_q_idxs = list(set(np.arange(roots_pc.shape[0])) - set(valid_q_idxs))
# invalid_strands = strands[invalid_q_idxs]
# save_bin_strands('temp/valid_strands.bin', valid_strands)
# save_bin_strands('temp/invalid_strands.bin', invalid_strands)
num_valid_strands = valid_strands.shape[0]
triangled_vertices = torch.tensor(head_mesh.vertices[head_mesh.faces, :])[None, :]
triangled_vertices_uv = torch.tensor(head_mesh.visual.uv[head_mesh.faces, :])[None, :]
tangents, bitangents, normals = compute_tbn_uv(triangled_vertices, triangled_vertices_uv) # get tbn for each face
scalp_tangents = tangents[0][scalp_faces_idx].detach().cpu().numpy()
scalp_bitangents = bitangents[0][scalp_faces_idx].detach().cpu().numpy()
scalp_normals = normals[0][scalp_faces_idx].detach().cpu().numpy()
tbn_strands = []
for i_strand in range(num_valid_strands):
tangent = scalp_tangents[faces_idxs[i_strand]]
bitangent = scalp_bitangents[faces_idxs[i_strand]]
normal = scalp_normals[faces_idxs[i_strand]]
tbn_basis_T = np.array([tangent, bitangent, normal])
tbn_strand = (tbn_basis_T @ valid_strands[i_strand][:, 0:3].T).T
tbn_strand = tbn_strand - tbn_strand[0]
tbn_strands.append(tbn_strand)
# print('Utils::MeshUtils End converting, number of original strands: %d, number of valid strands: %d'%(num_strands, num_valid_strands))
return tbn_strands, barys, interp_idxs, faces_idxs, valid_q_idxs, tangents, bitangents, normals
def strands_align_normal(strands, head_mesh):
num_strands = len(strands)
# get all roots points
roots_pc = []
for i_strand in range(num_strands):
roots_pc.append(strands[i_strand][0])
roots_pc = np.array(roots_pc)[:, 0:3]
sqr_distances, face_idxs, p = igl.point_mesh_squared_distance(roots_pc, head_mesh.vertices, head_mesh.faces)
closest_faces = head_mesh.faces[face_idxs]
closest_triangles = torch.tensor(head_mesh.vertices[closest_faces, :])[None, :]
v01 = closest_triangles[:, :, 1] - closest_triangles[:, :, 0]
v02 = closest_triangles[:, :, 2] - closest_triangles[:, :, 0]
normals = torch.cross(v01, v02, dim=-1)
normals = normals / torch.norm(normals, dim=-1, keepdim=True).clamp(min=1e-5)
z_aixs = torch.zeros_like(normals)
z_aixs[:, :, 2] = 1
t_axises = torch.cross(normals, z_aixs)
t_axises = t_axises / torch.norm(t_axises, dim=-1, keepdim=True).clamp(min=1e-5)
b_axises = torch.cross(normals, t_axises)
b_axises = b_axises / torch.norm(b_axises, dim=-1, keepdim=True).clamp(min=1e-5)
tangents = t_axises[0].detach().cpu().numpy()
bitangents = b_axises[0].detach().cpu().numpy()
normals = normals[0].detach().cpu().numpy()
aligned_strands = []
valid_rot_mats = []
valid_roots_pts = []
for i_strand in range(num_strands):
tangent = tangents[i_strand]
bitangent = bitangents[i_strand]
normal = normals[i_strand]
strand = np.array(strands[i_strand])
root_pts = strand[0]
strand = strand - root_pts
tbn_basis_T = np.array([tangent, bitangent, normal])
aligned_strand = (tbn_basis_T @ strand.T).T
if np.sum(aligned_strand ** 2) < 1e-7 or np.isnan(np.sum(aligned_strand)): # delete some noise data for avoiding nan
continue
aligned_strands.append(aligned_strand)
valid_rot_mats.append(tbn_basis_T)
valid_roots_pts.append(root_pts)
return aligned_strands, valid_rot_mats, valid_roots_pts
|
CT2Hair-main
|
CT2Hair/utils/meshutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import time
import numpy as np
import open3d as o3d
from copy import deepcopy
from matplotlib import cm
def volume2pc(voxels, threshold=1e-1, scale_ratio=np.array([0.125, 0.125, 0.125]), get_colors=True):
start_time = time.time()
x, y, z = np.where(voxels > threshold)
points = np.concatenate((x[:, None], y[:, None], z[:, None]), axis=1).astype(np.float32)
points = points * scale_ratio
values = voxels[x, y, z]
if get_colors:
BuGn_color_map = cm.get_cmap('BuGn', 256)
colors = np.array(BuGn_color_map(values))[:, 0:3]
print('Finish volume to pc stage, used %fs'%(time.time() - start_time))
return points, colors
else:
print('Finish volume to pc stage, used %fs'%(time.time() - start_time))
return points, values
def pc2volume(points, colors=None, normals=None, num_angles=12):
min_bound = np.min(points, axis=0).astype(np.int16)
max_bound = np.max(points, axis=0).astype(np.int16)
voxel_size = max_bound - min_bound + 1
voxel_size = np.append(voxel_size, [4])
voxels = np.zeros(voxel_size)
points = points.astype(np.int16)
points = points - min_bound
if colors is not None:
voxels[points[:, 0], points[:, 1], points[:, 2], 0] = colors[:, 0] # confidence
voxels[points[:, 0], points[:, 1], points[:, 2], 1] = colors[:, 1] * num_angles # thete
voxels[points[:, 0], points[:, 1], points[:, 2], 2] = colors[:, 2] * num_angles # phi
voxels[points[:, 0], points[:, 1], points[:, 2], 3] = np.arange(points.shape[0]) # point_index
elif normals is not None:
voxels[points[:, 0], points[:, 1], points[:, 2], 0:3] = normals # confidence
voxels[points[:, 0], points[:, 1], points[:, 2], 3] = np.arange(points.shape[0]) # point_index
return voxels, min_bound
def strands2pc(strands, step_size=None, rand_color=True):
num_strands = strands.shape[0]
if step_size == None:
strands_points = []
strands_normals = []
strands_colors = []
strands_tangents = []
strands_sep = [] # number of points for each strand
for i_strand in range(num_strands):
num_points = strands[i_strand].shape[0]
points = strands[i_strand][:, :3]
normals = strands[i_strand][:, 3:]
tangents = points[1:] - points[:-1]
tangents = tangents / np.linalg.norm(tangents, axis=-1, keepdims=True)
tangents = np.concatenate((tangents, tangents[-1:]), axis=0)
points = points.tolist()
normals = normals.tolist()
tangents = tangents.tolist()
strands_points.extend(points)
strands_normals.extend(normals)
strands_tangents.extend(tangents)
if rand_color:
strand_color = np.random.rand(1, 3)
strand_colors = np.repeat(strand_color, num_points, axis=0)
strand_colors = strand_colors.tolist()
strands_colors.extend(strand_colors)
strands_sep.append(num_points)
strands_points = np.array(strands_points)
strands_tangents = np.array(strands_tangents)
if rand_color:
strands_colors = np.array(strands_colors)
return strands_points, strands_colors, strands_sep
else:
return strands_points, strands_tangents, strands_sep
else:
max_step_lenght = 0
strands_steps_pos_norm = []
strands_steps_colors = []
for i_strand in range(num_strands):
num_steps = strands[i_strand].shape[0] // step_size
num_points = num_steps * step_size
strand = np.reshape(strands[i_strand][:num_points], (num_steps, step_size, strands[i_strand].shape[-1]))
strands_steps_pos_norm.append(strand)
if rand_color:
strand_color = np.random.rand(1, 3)
strand_colors = np.repeat(strand_color, num_points, axis=0)
strand_colors = np.reshape(strand_colors, (num_steps, step_size, 3))
strands_steps_colors.append(strand_colors)
if num_steps > max_step_lenght:
max_step_lenght = num_steps
steps_points = []
steps_normals = []
steps_colors = []
for i_step in range(max_step_lenght):
step_points = []
step_normals = []
step_colors = []
for j_strand in range(num_strands):
step_lenght = strands_steps_pos_norm[j_strand].shape[0]
if (step_lenght <= i_step):
continue
step_points.append(strands_steps_pos_norm[j_strand][i_step, :, :3])
step_normals.append(strands_steps_pos_norm[j_strand][i_step, :, 3:])
if rand_color:
step_colors.append(strands_steps_colors[j_strand][i_step])
steps_points.append(np.array(step_points).reshape(-1, 3))
steps_normals.append(np.array(step_normals).reshape(-1, 3))
if rand_color:
steps_colors.append(np.array(step_colors).reshape(-1, 3))
if rand_color:
return max_step_lenght, steps_points, steps_colors
else:
return max_step_lenght, steps_points, None
def read_pc(pc_path):
point_cloud = o3d.io.read_point_cloud(pc_path)
return point_cloud
def load_pc(pc_path, load_color=True, load_normal=False):
point_cloud = o3d.io.read_point_cloud(pc_path)
points = np.asarray(point_cloud.points)
if load_color:
assert point_cloud.has_colors(), "Loaded point cloud has no colors"
colors = np.asarray(point_cloud.colors)
return points, colors
elif load_normal:
assert point_cloud.has_normals(), "Loaded point cloud has no normals"
normals = np.asarray(point_cloud.normals)
return points, normals
else:
return points
def save_pc_float64(pc_path, points, colors=None, normals=None):
point_cloud = o3d.geometry.PointCloud()
point_cloud.points = o3d.utility.Vector3dVector(points)
if colors is not None:
assert points.shape[0] == colors.shape[0], "points and colors should have same numbers"
point_cloud.colors = o3d.utility.Vector3dVector(colors)
if normals is not None:
point_cloud.normals = o3d.utility.Vector3dVector(normals)
return o3d.io.write_point_cloud(pc_path, point_cloud)
def save_pc(pc_path, points, colors=None, normals=None):
pc_device = o3d.core.Device("CPU:0")
pc_type = o3d.core.float32
point_cloud = o3d.t.geometry.PointCloud(pc_device)
point_cloud.point["positions"] = o3d.core.Tensor(points.astype(np.float32), pc_type, pc_device)
if normals is not None:
point_cloud.point["normals"] = o3d.core.Tensor(normals.astype(np.float32), pc_type, pc_device)
if colors is not None:
assert points.shape[0] == colors.shape[0], "points and colors should have same numbers"
colors = (colors * 255).astype(np.int8) # need to do this for open3d version 0.15.1, after this I can vis it via meshlab
point_cloud.point["colors"] = o3d.core.Tensor(colors, o3d.core.uint8, pc_device)
return o3d.t.io.write_point_cloud(pc_path, point_cloud, compressed=True, print_progress=True)
def get_bbox(points):
x_min = np.min(points[:, 0])
x_max = np.max(points[:, 0])
y_min = np.min(points[:, 1])
y_max = np.max(points[:, 1])
z_min = np.min(points[:, 2])
z_max = np.max(points[:, 2])
bbox = np.array([[x_min, x_max],
[y_min, y_max],
[z_min, z_max]])
center = ((bbox[:, 1] + bbox[:, 0]) / 2.).T
return bbox, center
def pc_voxelization(points, shape):
segments = []
steps = []
for i in range(3):
s, step = np.linspace(0, shape[i] - 1, num=shape[i], retstep=True)
segments.append(s)
steps.append(step)
vidx_x = np.clip(np.searchsorted(segments[0], points[:, 0]), 0, shape[0] - 1)
vidx_y = np.clip(np.searchsorted(segments[1], points[:, 1]), 0, shape[1] - 1)
vidx_z = np.clip(np.searchsorted(segments[2], points[:, 2]), 0, shape[2] - 1)
vidx = np.concatenate((vidx_x[:, None], vidx_y[:, None], vidx_z[:, None]), axis=-1)
vidx = np.unique(vidx, axis=0)
return vidx[:, 0], vidx[:, 1], vidx[:, 2]
def patch_filter_major(points, voxels, weights, kernel_size=5):
assert voxels.ndim == 3, "Only works for 1-dim voxel"
assert voxels.dtype == np.int16, "Only works for int voxel"
num_points = points.shape[0]
offset = kernel_size // 2
padded_voxels = np.pad(voxels, ((offset, offset), (offset, offset), (offset, offset)), mode='reflect')
padded_weights = np.pad(weights, ((offset, offset), (offset, offset), (offset, offset)), mode='reflect')
filtered_voxels = deepcopy(voxels)
for i_point in range(num_points):
grid_idx = points[i_point]
# selected_region_start_pos = grid_idx - offset
selected_region = padded_voxels[grid_idx[0] : grid_idx[0] + kernel_size,
grid_idx[1] : grid_idx[1] + kernel_size,
grid_idx[2] : grid_idx[2] + kernel_size,]
selected_weights = padded_weights[grid_idx[0] : grid_idx[0] + kernel_size,
grid_idx[1] : grid_idx[1] + kernel_size,
grid_idx[2] : grid_idx[2] + kernel_size,]
major_value = np.bincount(selected_region.reshape(-1), selected_weights.reshape(-1)).argmax()
filtered_voxels[grid_idx[0], grid_idx[1], grid_idx[2]] = major_value
return filtered_voxels
|
CT2Hair-main
|
CT2Hair/utils/pcutils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import cv2
import math
import torch
import torch.nn as nn
import numpy as np
from matplotlib import cm
def polar2vector(theta, phi, step_length=1, start_vector=np.array([1, 0, 0])):
sin_a, cos_a = math.sin(0), math.cos(0)
sin_b, cos_b = math.sin(phi), math.cos(phi)
sin_g, cos_g = math.sin(theta), math.cos(theta)
R_x = np.array([[1, 0, 0],
[0, cos_a, -sin_a],
[0, sin_a, cos_a]])
R_y = np.array([[ cos_b, 0, sin_b],
[ 0, 1, 0],
[-sin_b, 0, cos_b]])
R_z = np.array([[cos_g, -sin_g, 0],
[sin_g, cos_g, 0],
[ 0, 0, 1]],)
R = R_z @ R_y @ R_x
vector = start_vector * step_length
vector = vector.T
vector = R @ vector
return vector
def polar2vector_torch(theta, phi, step_length=1, start_vector=torch.tensor([1, 0, 0]), device='cuda'):
if not torch.is_tensor(theta):
theta = torch.tensor(theta, device=device)
if not torch.is_tensor(phi):
phi = torch.tensor(phi, device=device)
start_vector = start_vector.float().to(device)
num = theta.shape[0]
sin_a, cos_a = torch.sin(torch.zeros(num, device=device)), torch.cos(torch.zeros(num, device=device))
sin_b, cos_b = torch.sin(phi), torch.cos(phi)
sin_g, cos_g = torch.sin(theta), torch.cos(theta)
R_x = torch.zeros(size=(num, 3, 3)).to(device)
R_x[:, 1, 1] = cos_a
R_x[:, 1, 2] = -sin_a
R_x[:, 2, 1] = sin_a
R_x[:, 2, 2] = cos_a
R_x[:, 0, 0] = 1
R_y = torch.zeros(size=(num, 3, 3)).to(device)
R_y[:, 0, 0] = cos_b
R_y[:, 0, 2] = sin_b
R_y[:, 2, 0] = -sin_b
R_y[:, 2, 2] = cos_b
R_y[:, 1, 1] = 1
R_z = torch.zeros(size=(num, 3, 3)).to(device)
R_z[:, 0, 0] = cos_g
R_z[:, 0, 1] = -sin_g
R_z[:, 1, 0] = sin_g
R_z[:, 1, 1] = cos_g
R_z[:, 2, 2] = 1
with torch.no_grad():
R = R_z @ R_y @ R_x
vector = start_vector * step_length
vector = R @ vector
return vector.detach().cpu().numpy()
def downsample3dpool(data, ratio=2, mode='avg', dtype=torch.float32):
data_shape = data.shape
if not torch.is_tensor(data):
data = torch.tensor(data, dtype=dtype, device='cuda')
data = data.view((1, 1, data_shape[0], data_shape[1], data_shape[2])).contiguous()
if mode == 'max':
pool = nn.MaxPool3d(kernel_size=ratio)
elif mode == 'avg':
pool = nn.AvgPool3d(kernel_size=ratio)
data = pool(data) # type: ignore (for avoid pyplace error report)
return data[0, 0].detach().cpu().numpy()
def get_color_mapping(samples=1024):
# hsv_color_map = cm.get_cmap('hsv', 256)
twi_color_map = cm.get_cmap('twilight', 256)
twi_shift_color_map = cm.get_cmap('twilight_shifted', 256)
x, y = np.meshgrid(np.linspace(0, 1, samples), np.linspace(0, 1, samples))
# hsv_rgb = np.float32(hsv_color_map(x))
# hsv_bgr = cv2.cvtColor(hsv_rgb, cv2.COLOR_RGBA2BGRA)
# cv2.imwrite('temp/mapping.png', hsv_bgr * 255)
twi_rgb = np.float32(twi_color_map(x)) # type: ignore (for avoid pyplace error report)
twi_bgr = cv2.cvtColor(twi_rgb, cv2.COLOR_RGBA2BGRA)
twi_sh_rgb = np.float32(twi_shift_color_map(y)) # type: ignore (for avoid pyplace error report)
twi_sh_bgr = cv2.cvtColor(twi_sh_rgb, cv2.COLOR_RGBA2BGRA)
cv2.imwrite('temp/mapping_theta.png', twi_bgr * 255)
cv2.imwrite('temp/mapping_phi.png', twi_sh_bgr * 255)
def batched_index_select(t, dim, inds):
dummy = inds.unsqueeze(2).expand(inds.size(0), inds.size(1), t.size(2))
out = t.gather(dim, dummy) # b x e x f
return out
def scale2mat(scale_ratio):
mat44 = np.eye(4)
for i in range(3):
mat44[i, i] = scale_ratio
return mat44
def translate2mat(offset):
mat44 = np.eye(4)
mat44[0:3, 3] = offset.T
return mat44
def homo_rot_mat(mat33):
mat44 = np.eye(4)
mat44[0:3, 0:3] = mat33
return mat44
def idx_map_2_rgb(idx_map):
[map_height, map_width] = idx_map.shape[:2]
idx_map_rgb = np.zeros((map_height, map_width, 3))
# R G B for cv2.imwrite
# TODO convert to binary operator later
idx_map_rgb[:, :, 2] = idx_map // (256 * 256)
idx_map_rgb[:, :, 1] = (idx_map - (idx_map_rgb[:, :, 2] * 256 * 256)) // 256
idx_map_rgb[:, :, 0] = (idx_map - (idx_map_rgb[:, :, 2] * 256 * 256 +
idx_map_rgb[:, :, 1] * 256))
return idx_map_rgb
def idx_rgb_recover(idx_bgr):
[map_height, map_width] = idx_bgr.shape[:2]
idx_map = np.zeros((map_height, map_width))
idx_rgb = cv2.cvtColor(idx_bgr, cv2.COLOR_BGR2RGB).astype(np.int64)
idx_map = idx_rgb[:, :, 0] * 256 * 256 + idx_rgb[:, :, 1] * 256 + idx_rgb[:, :, 2] - 1
return idx_map
def cheap_stack(tensors, dim):
if len(tensors) == 1:
return tensors[0].unsqueeze(dim)
else:
return torch.stack(tensors, dim=dim)
|
CT2Hair-main
|
CT2Hair/utils/utils.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
class Conv3dGaussian(nn.Module):
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
size: int,
sigma=3,
gamma_y=1.0,
gamma_z=1.0,
padding=None,
device='cuda'):
super().__init__()
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.sigma = sigma
self.gamma_y = gamma_y
self.gamma_z = gamma_z
self.kernels = self.init_kernel()
def init_kernel(self):
sigma_x = self.sigma
sigma_y = self.sigma * self.gamma_y
sigma_z = self.sigma * self.gamma_z
c_max, c_min = int(self.size / 2), -int(self.size / 2)
(x, y, z) = torch.meshgrid(torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1), indexing='ij') # for future warning
x = x.to(self.device)
y = y.to(self.device)
z = z.to(self.device)
kernel = torch.exp(-.5 * (x ** 2 / sigma_x ** 2 + y ** 2 / sigma_y ** 2 + z ** 2 / sigma_z ** 2))
# normalize
kernel = F.normalize(kernel)
return kernel.reshape(1, 1, self.size, self.size, self.size).contiguous()
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
return x
class Conv3dLaplacian():
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
padding=None,
device='cuda'):
super().__init__()
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.kernels = self.init_kernel()
def init_kernel(self):
kernel = torch.ones((3, 3, 3), device=self.device) * -1
kernel[1, 1, 1] = 26
return kernel.reshape(1, 1, 3, 3, 3)
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
mask = x[0, 0] > 0
return mask.float()
class Conv3dErosion(nn.Module):
'''
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
'''
def __init__(self,
size=3,
padding=None,
device='cuda'):
super().__init__()
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
self.kernels = self.init_kernel()
def init_kernel(self):
kernel = torch.ones((self.size, self.size, self.size), device=self.device)
return kernel.reshape(1, 1, self.size, self.size, self.size)
def forward(self, x, ration=1):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
mask = x[0, 0] >= self.size ** 3 * ration
return mask.float()
class Conv3dGabor():
'''
Applies a 3d convolution over an input signal using Gabor filter banks.
WARNING: the size of the kernel must be an odd number otherwise it'll be shifted with respect to the origin
Refer to https://github.com/m-evdokimov/pytorch-gabor3d
'''
def __init__(self,
in_channels: int,
out_channels: int,
size: int,
sigma=3,
gamma_y=0.5,
gamma_z=0.5,
lambd=6,
psi=0.,
padding=None,
device='cuda'):
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
self.num_filters = in_channels * out_channels
self.size = size
self.device = device
if padding:
self.padding = padding
else:
self.padding = 0
# all additional axes are made for correct broadcast
# the bounds of uniform distribution adjust manually for every size (rn they're adjusted for 5x5x5 filters)
# for better understanding: https://medium.com/@anuj_shah/through-the-eyes-of-gabor-filter-17d1fdb3ac97
self.sigma = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * sigma
self.gamma_y = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * gamma_y
self.gamma_z = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * gamma_z
self.lambd = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * lambd
self.psi = torch.ones(size=(self.num_filters, 1, 1, 1)).to(self.device) * psi
self.angles = torch.zeros(size=(self.num_filters, 3)).to(self.device)
num_angles_per_axis = round(math.sqrt(self.num_filters))
angle_step = math.pi / num_angles_per_axis
# use polar coordinate, theta round with x, phi round with y
for i_theta in range(num_angles_per_axis):
for j_phi in range(num_angles_per_axis):
rot_angle = torch.tensor([0, j_phi * angle_step, i_theta * angle_step]).to(self.device)
self.angles[i_theta * num_angles_per_axis + j_phi] = rot_angle
self.kernels = self.init_kernel()
def init_kernel(self):
'''
Initialize a gabor kernel with given parameters
Returns torch.Tensor with size (out_channels, in_channels, size, size, size)
'''
lambd = self.lambd
psi = self.psi
sigma_x = self.sigma
sigma_y = self.sigma * self.gamma_y
sigma_z = self.sigma * self.gamma_z
R = self.get_rotation_matrix().reshape(self.num_filters, 3, 3, 1, 1, 1)
c_max, c_min = int(self.size / 2), -int(self.size / 2)
(x, y, z) = torch.meshgrid(torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1),
torch.arange(c_min, c_max + 1), indexing='ij') # for future warning
x = x.to(self.device)
y = y.to(self.device)
z = z.to(self.device)
# meshgrid for every filter
x = x.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
y = y.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
z = z.unsqueeze(0).repeat(self.num_filters, 1, 1, 1)
x_prime = z * R[:, 2, 0] + y * R[:, 2, 1] + x * R[:, 2, 2]
y_prime = z * R[:, 1, 0] + y * R[:, 1, 1] + x * R[:, 1, 2]
z_prime = z * R[:, 0, 0] + y * R[:, 0, 1] + x * R[:, 0, 2]
yz_prime = torch.sqrt(y_prime ** 2 + z_prime ** 2)
# gabor formula
kernel = torch.exp(-.5 * (x_prime ** 2 / sigma_x ** 2 + y_prime ** 2 / sigma_y ** 2 + z_prime ** 2 / sigma_z ** 2)) \
* torch.cos(2 * math.pi * yz_prime / (lambd + 1e-6) + psi)
return kernel.reshape(self.out_channels, self.in_channels, self.size, self.size, self.size).contiguous()
def get_rotation_matrix(self):
'''
Makes 3d rotation matrix.
R_x = torch.Tensor([[cos_a, -sin_a, 0],
[sin_a, cos_a, 0],
[0, 0, 1]],)
R_y = torch.Tensor([[cos_b, 0, sin_b],
[0 , 1, 0],
[-sin_b, 0, cos_b]])
R_z = torch.Tensor([[1, 0, 0],
[0, cos_g, -sin_g],
[0, sin_g, cos_g]])
'''
sin_a, cos_a = torch.sin(self.angles[:, 0]), torch.cos(self.angles[:, 0])
sin_b, cos_b = torch.sin(self.angles[:, 1]), torch.cos(self.angles[:, 1])
sin_g, cos_g = torch.sin(self.angles[:, 2]), torch.cos(self.angles[:, 2])
R_x = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_x[:, 0, 0] = cos_a
R_x[:, 0, 1] = -sin_a
R_x[:, 1, 0] = sin_a
R_x[:, 1, 1] = cos_a
R_x[:, 2, 2] = 1
R_y = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_y[:, 0, 0] = cos_b
R_y[:, 0, 2] = sin_b
R_y[:, 2, 0] = -sin_b
R_y[:, 2, 2] = cos_b
R_y[:, 1, 1] = 1
R_z = torch.zeros(size=(self.num_filters, 3, 3)).to(self.device)
R_z[:, 1, 1] = cos_g
R_z[:, 1, 2] = -sin_g
R_z[:, 2, 1] = sin_g
R_z[:, 2, 2] = cos_g
R_z[:, 0, 0] = 1
return R_x @ R_y @ R_z
def forward(self, x):
with torch.no_grad():
x = F.conv3d(x, weight=self.kernels, padding=self.padding)
return x
|
CT2Hair-main
|
CT2Hair/utils/kernels.py
|
from .chamfer_distance import ChamferDistance
|
CT2Hair-main
|
CT2Hair/libs/chamfer_distance/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
# https://github.com/chrdiller/pyTorchChamferDistance/tree/master
import torch
from torch.utils.cpp_extension import load
cd = load(name="cd",
sources=["CT2Hair/libs/chamfer_distance/chamfer_distance.cpp",
"CT2Hair/libs/chamfer_distance/chamfer_distance.cu"])
class ChamferDistanceFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, xyz1, xyz2):
batchsize, n, _ = xyz1.size()
_, m, _ = xyz2.size()
xyz1 = xyz1.contiguous()
xyz2 = xyz2.contiguous()
dist1 = torch.zeros(batchsize, n)
dist2 = torch.zeros(batchsize, m)
idx1 = torch.zeros(batchsize, n, dtype=torch.int)
idx2 = torch.zeros(batchsize, m, dtype=torch.int)
if not xyz1.is_cuda:
cd.forward(xyz1, xyz2, dist1, dist2, idx1, idx2)
else:
dist1 = dist1.cuda()
dist2 = dist2.cuda()
idx1 = idx1.cuda()
idx2 = idx2.cuda()
cd.forward_cuda(xyz1, xyz2, dist1, dist2, idx1, idx2)
ctx.save_for_backward(xyz1, xyz2, idx1, idx2)
return dist1, dist2
@staticmethod
def backward(ctx, graddist1, graddist2):
xyz1, xyz2, idx1, idx2 = ctx.saved_tensors
graddist1 = graddist1.contiguous()
graddist2 = graddist2.contiguous()
gradxyz1 = torch.zeros(xyz1.size())
gradxyz2 = torch.zeros(xyz2.size())
if not graddist1.is_cuda:
cd.backward(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
else:
gradxyz1 = gradxyz1.cuda()
gradxyz2 = gradxyz2.cuda()
cd.backward_cuda(xyz1, xyz2, gradxyz1, gradxyz2, graddist1, graddist2, idx1, idx2)
return gradxyz1, gradxyz2
class ChamferDistance(torch.nn.Module):
def forward(self, xyz1, xyz2):
return ChamferDistanceFunction.apply(xyz1, xyz2)
|
CT2Hair-main
|
CT2Hair/libs/chamfer_distance/chamfer_distance.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import math
import torch
import numpy as np
from tqdm import tqdm
from sklearn.cluster import MeanShift
from scipy.spatial import KDTree
from utils.meshutils import strands_world2tbn, make_closest_uv_barys
from datautils.dataloaders import TbnStrandsBinDataset
from modules.strands_codec import StrandCodec
class NeuralStrands():
def __init__(self, is_resampled=True):
self.is_resampled = is_resampled
self.texture_height = 1024
self.texture_width = 1024
self.feature_channels = 128 # 64 for old, 128 for new
self.num_strds_points = 256 # 100 for old, 256 for new
self.neural_texture = np.zeros((self.texture_width, self.texture_height, self.feature_channels))
self.neural_texture_pca_rgb = np.zeros((self.texture_width, self.texture_height, 3))
self.strds_idx_map = np.zeros((self.texture_width, self.texture_height, 1), dtype=np.uint32)
train_param = {"num_pts": self.num_strds_points, "code_channels": self.feature_channels}
self.ckpt_path = 'CT2Hair/ckpt/neuralstrands_model.pt'
self.model = StrandCodec(do_vae=True, decode_direct_xyz=False, decode_random_verts=False, train_params=train_param, is_train=False).to("cuda")
checkpoint = torch.load(self.ckpt_path)
self.model.load_state_dict(checkpoint['model_state_dict'], strict=False)
self.model.eval()
def prep_strands_data(self, strands, head_mesh, scalp_mesh, scalp_faces_idx):
self.original_strands = strands
self.head_mesh = head_mesh
self.tbn_strands, barys, interp_idxs, face_idxs, self.valid_strds_idxs, \
self.tangents, self.bitangents, self.normals = strands_world2tbn(strands, head_mesh, scalp_mesh, scalp_faces_idx)
self.head_index_map, self.head_bary_map = make_closest_uv_barys(torch.tensor(head_mesh.visual.uv), torch.tensor(head_mesh.faces),
[self.texture_height, self.texture_width]) # type: ignore for avoiding pylance error
# get uv coords for hair strands roots
head_interp_idxs = head_mesh.faces[scalp_faces_idx][face_idxs]
head_uv_coords = head_mesh.visual.uv # num_vertices x 2
v0 = head_uv_coords[head_interp_idxs[:, 0]]
v1 = head_uv_coords[head_interp_idxs[:, 1]]
v2 = head_uv_coords[head_interp_idxs[:, 2]]
b0, b1, b2 = np.split(barys, 3, axis=1)
self.strds_uv_coords = b0 * v0 + b1 * v1 + b2 * v2
# try to save a texture map for demonstration
self.strds_texel_coords = self.strds_uv_coords * [self.texture_height, self.texture_width]
self.strds_texel_coords = np.around(self.strds_texel_coords).astype(np.int32)
tbn_strds_dataset = TbnStrandsBinDataset(self.tbn_strands, is_resampled=self.is_resampled, num_strds_points=self.num_strds_points)
self.tbn_strds_dataloader = tbn_strds_dataset.get_dataloader()
def decode(self, strds_code):
strds_code_dict = {}
strds_code_dict['s_shape'] = strds_code
pred_dict = self.model.decode(strds_code_dict)
pred_points = pred_dict["pred_points"]
return pred_points
def get_neural_representations(self, iter_opt=0, lr=1e-4):
# loss_writer = SummaryWriter('log/neural_rep/')
self.regular_strands = torch.zeros((0, self.num_strds_points, 3)).cuda() # valid strands in TBN space with the unified number of points
self.strds_features = torch.zeros((0, self.feature_channels)).cuda()
hair_loss_l2 = []
hair_loss_dir = []
loop = tqdm(enumerate(self.tbn_strds_dataloader, 0))
for i_data, input_data in loop:
self.model.diff_spline(input_data)
encoded_dict = self.model.encode()
strds_code = encoded_dict['s_shape'].clone().detach()
# setup optimization
strds_code = strds_code.requires_grad_(True)
strds_code_dict = {}
strds_code_dict['s_shape'] = strds_code
code_optimizer = torch.optim.Adam([strds_code_dict['s_shape']], lr=lr)
if iter_opt == 0:
prediction_dict = self.model.decode(strds_code_dict)
loss_l2 = self.model.compute_loss_l2(prediction_dict)
loss_dir = self.model.compute_loss_dir(prediction_dict)
loss = loss_l2 + loss_dir * 1e-4
hair_loss_l2.append(loss_l2.item())
hair_loss_dir.append(loss_dir.item())
else:
for i_iter in range(iter_opt):
self.model.train()
prediction_dict = self.model.decode(strds_code_dict)
loss_l2 = self.model.compute_loss_l2(prediction_dict)
loss_dir = self.model.compute_loss_dir(prediction_dict)
loss = loss_l2 + loss_dir * 0.001
code_optimizer.zero_grad()
loss.backward()
code_optimizer.step()
hair_loss_l2.append(loss_l2.item())
hair_loss_dir.append(loss_dir.item())
loop.set_description("Getting neural representations, batch loss: l2: %f, dir: %f"%(loss_l2.item(), loss_dir.item()))
self.regular_strands = torch.concat((self.regular_strands, self.model.splined_points), dim=0)
self.strds_features = torch.concat((self.strds_features, strds_code_dict['s_shape']), dim=0)
hair_loss_l2 = np.array(hair_loss_l2)
hair_loss_dir = np.array(hair_loss_dir)
print('Average reconstruction errors: l2: %f, dir: %f'%(np.mean(hair_loss_l2), np.mean(hair_loss_dir)))
self.regular_strands = self.regular_strands.reshape(-1, self.num_strds_points, 3).detach().cpu().numpy()
self.strds_features = self.strds_features.reshape(-1, self.feature_channels).detach().cpu().numpy()
self.neural_texture[np.clip(self.texture_height - self.strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.strds_features[:, :]
strds_idxs = np.arange(self.strds_features.shape[0]) + 1
self.strds_idx_map[np.clip(self.texture_height - self.strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
valid_texel_in_map = np.where(self.strds_idx_map > 0)
self.texel_strds_idxs = self.strds_idx_map[valid_texel_in_map[0], valid_texel_in_map[1], 0] - 1
self.used_strands = self.original_strands[self.valid_strds_idxs][self.texel_strds_idxs]
def denoise_on_strds(self, num_init_clusters=16, num_iters=64, max_cls_thres=2., num_strds_thres=64):
'''
Denoising on regular TBN strands.
Return denoised strands index on valid strands (tbn strands).
'''
# Try classic K-means on points
valid_texel_in_map = np.where(self.strds_idx_map > 0)
texel_strds_idxs = self.strds_idx_map[valid_texel_in_map[0], valid_texel_in_map[1], 0] - 1
num_texel_strds = texel_strds_idxs.shape[0]
init_centers_idxs = np.arange(num_init_clusters) * (num_texel_strds // num_init_clusters)
init_strds_centriods = self.regular_strands[init_centers_idxs]
num_clusters = num_init_clusters
strds_centriods = init_strds_centriods
adaptive_iter = 0
while(True):
repeated_strds = self.regular_strands[:, None, :, :].repeat(num_clusters, axis=1) # type: ignore for avoiding pylance error
for i_iter in range(num_iters):
pts_dis_centriods = np.sqrt(np.sum((repeated_strds - strds_centriods) ** 2, axis=-1, keepdims=False))
strd_dis_centriods = np.sum(pts_dis_centriods, axis=-1, keepdims=False) # naive sum without weights
strd_clusters = np.argmin(strd_dis_centriods, axis=-1)
# update means
pre_strds_centroids = copy.deepcopy(strds_centriods)
for j_cls in range(num_clusters):
cluster_strds = self.regular_strands[np.where(strd_clusters == j_cls)[0]]
strds_centriods[j_cls] = np.sum(cluster_strds, axis=0, keepdims=False) / cluster_strds.shape[0]
# centroid_dis = np.sum(np.sqrt(np.sum((strds_centriods - pre_strds_centroids) ** 2, axis=-1, keepdims=False)))
# print(centroid_dis)
# recalculate strands cluster use the final center
pts_dis_centriods = np.sqrt(np.sum((repeated_strds - strds_centriods) ** 2, axis=-1, keepdims=False))
strd_dis_centriods = np.sum(pts_dis_centriods, axis=-1, keepdims=False) # naive sum without weights
strd_clusters = np.argmin(strd_dis_centriods, axis=-1)
# calculate the max distances in clusters
strd_clusters_dis = np.min(strd_dis_centriods, axis=-1)
num_currt_clusters = num_clusters
for i_cls in range(num_currt_clusters):
strd_cluster_idx = np.where(strd_clusters == i_cls)[0]
cluster_dis = strd_clusters_dis[strd_cluster_idx]
max_cls_dis = np.max(cluster_dis)
max_strd_idx = np.argmax(cluster_dis)
if max_cls_dis > max_cls_thres:
num_clusters += 1
strds_centriods = np.concatenate((strds_centriods, self.regular_strands[strd_cluster_idx][max_strd_idx:max_strd_idx+1]), axis=0)
if num_clusters == num_currt_clusters:
break
num_iters = num_iters // 2
if num_iters < 1:
break
adaptive_iter += 1
print('Adaptive K-means iter %d...'%(adaptive_iter))
denoised_strds_idxs = [] # for valid tbn_strands
for i_cls in range(num_clusters):
cluster_idxs = np.where(strd_clusters == i_cls)[0].tolist()
if len(cluster_idxs) >= num_strds_thres: # type: ignore for avoiding pylance error
denoised_strds_idxs.extend(cluster_idxs)
# # temp visualization
# cluster_strds = world_strands[cluster_idxs]
# cluster_rgb = strd_clusters_rgb[cluster_idxs]
# save_color_strands('../temp/KMeans/kmeans_strands_cls_%d.cin'%(i_cls), cluster_strds, cluster_rgb)
print('Final number of clusters: %d, remove noise strands: %d.'%(num_clusters, self.regular_strands.shape[0] - len(denoised_strds_idxs)))
self.denoised_regular_strds = self.regular_strands[denoised_strds_idxs]
self.denoised_strds_features = self.strds_features[denoised_strds_idxs]
self.denoised_strds_texel_coords = self.strds_texel_coords[denoised_strds_idxs]
self.denoised_neural_texture = np.zeros((self.texture_height, self.texture_width, self.feature_channels))
self.denoised_strds_idx_map = np.zeros((self.texture_height, self.texture_width, 1), dtype=np.uint32)
self.denoised_neural_texture[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.denoised_strds_features[:, :]
strds_idxs = np.arange(self.denoised_strds_features.shape[0]) + 1
self.denoised_strds_idx_map[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
return denoised_strds_idxs, strd_clusters
def interpolation_on_strds(self, texel_roots_map, interp_kernel_size=5, interp_neig_pts=3, max_dis_thres=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
# build kd-tree for points
texel_strds_pts = np.where(self.denoised_strds_idx_map > 0)
texel_strds_pts = np.concatenate((texel_strds_pts[0][:, None], texel_strds_pts[1][:, None]), axis=1)
texel_pts_kdtree = KDTree(texel_strds_pts)
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
interped_strands = []
interped_strds_face_idxs = []
interped_strds_face_barys = []
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
dis, idx = texel_pts_kdtree.query(np.array([cen_h, cen_w]), interp_neig_pts)
dis = np.array(dis)
if np.sum(dis) > max_dis_thres * 3:
continue
dis = 1. / dis
normalized_dis = dis / np.linalg.norm(dis) # add np.array for avoiding pylance error
knn_strds_idxs = self.denoised_strds_idx_map[texel_strds_pts[idx, 0], texel_strds_pts[idx, 1], 0] # type: ignore for avoiding pylance error # for valid strands in TBN space
knn_strands = self.regular_strands[knn_strds_idxs]
if interp_neig_pts == 1:
interped_strand = knn_strands
else:
interped_strand = np.average(knn_strands, axis=0, weights=normalized_dis)
interped_strands.append(interped_strand)
interped_strds_face_idxs.append(self.head_index_map[cen_h, cen_w].detach().numpy())
interped_strds_face_barys.append(self.head_bary_map[cen_h, cen_w].detach().numpy())
self.interp_count += 1
interped_strands = np.array(interped_strands)
interped_strds_face_idxs = np.array(interped_strds_face_idxs)
interped_strds_face_barys = np.array(interped_strds_face_barys)
return interped_strands, interped_strds_face_idxs, interped_strds_face_barys
def denoise_neural_texture(self, num_del_cls=4, do_denoise=True):
if do_denoise:
clustering = MeanShift().fit(self.strds_features)
num_cls = np.max(clustering.labels_) + 1
strds_cls = clustering.labels_
cls_amount = np.zeros(num_cls)
for i_cls in range(num_cls):
cls_idx = np.where(strds_cls == i_cls)[0]
cls_amount[i_cls] = cls_idx.shape[0]
argsort_cls_idx = np.argsort(cls_amount)
if num_del_cls == 0:
num_del_cls = num_cls - 1
denoised_cls_idx = argsort_cls_idx[num_del_cls:]
num_denoised_cls = denoised_cls_idx.shape[0]
denoised_strds_idxs = []
for i_cls in range(num_denoised_cls):
strds_idx = np.where(strds_cls == denoised_cls_idx[i_cls])[0].tolist()
denoised_strds_idxs.extend(strds_idx)
else:
denoised_strds_idxs = np.arange(self.strds_features.shape[0]).tolist()
self.denoised_strds_features = self.strds_features[denoised_strds_idxs]
self.denoised_strds_texel_coords = self.strds_texel_coords[denoised_strds_idxs]
self.denoised_neural_texture = np.zeros((self.texture_height, self.texture_width, self.feature_channels))
self.denoised_strds_idx_map = np.zeros((self.texture_height, self.texture_width, 1), dtype=np.uint32)
self.denoised_neural_texture[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), :] = self.denoised_strds_features[:, :]
strds_idxs = np.arange(self.denoised_strds_features.shape[0]) + 1
self.denoised_strds_idx_map[np.clip(self.texture_height - self.denoised_strds_texel_coords[:, 1], 0, self.texture_height - 1),
np.clip(self.denoised_strds_texel_coords[:, 0], 0, self.texture_width - 1), 0] = strds_idxs
return denoised_strds_idxs
def interpolation_local_average(self, texel_roots_map, interp_kernel_size=5, interp_neig_radius=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
self.interp_neural_texture = np.zeros_like(self.denoised_neural_texture)
self.interp_strds_idx_map = np.zeros_like(self.denoised_strds_idx_map, dtype=np.uint32)
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
# get the neighbor for centroid using neig_radius
neig_ul = np.clip(np.array([cen_h, cen_w]) - interp_neig_radius, 0, [self.texture_height, self.texture_width])
neig_br = np.clip(np.array([cen_h, cen_w]) + interp_neig_radius, 0, [self.texture_height, self.texture_width])
neig = self.neural_texture[neig_ul[0]:neig_br[0], neig_ul[1]:neig_br[1]]
num_features = np.sum(texel_strds_mask[neig_ul[0]:neig_br[0], neig_ul[1]:neig_br[1]].astype(np.int16))
if num_features == 0:
continue
self.interp_neural_texture[cen_h, cen_w] = np.sum(np.sum(neig, axis=1, keepdims=False), axis=0, keepdims=False) / num_features
self.interp_strds_idx_map[cen_h, cen_w] = self.interp_count + 1
self.interp_count += 1
def interpolation_knn(self, texel_roots_map, interp_kernel_size=5, interp_neig_pts=3, is_bilateral=True, max_dis_thres=16):
steps_height = self.texture_height // interp_kernel_size
steps_width = self.texture_width // interp_kernel_size
# build kd-tree for points
texel_strds_pts = np.where(self.denoised_strds_idx_map > 0)
texel_strds_pts = np.concatenate((texel_strds_pts[0][:, None], texel_strds_pts[1][:, None]), axis=1)
texel_pts_kdtree = KDTree(texel_strds_pts)
texel_strds_mask = self.denoised_strds_idx_map > 0
texel_roots_mask = texel_roots_map > 0.5
self.interp_neural_texture = np.zeros_like(self.denoised_neural_texture)
self.interp_strds_idx_map = np.zeros_like(self.denoised_strds_idx_map, dtype=np.uint32)
self.interp_count = 0
for i_h in range(steps_height):
for i_w in range(steps_width):
cen_h = i_h * interp_kernel_size + (interp_kernel_size // 2)
cen_w = i_w * interp_kernel_size + (interp_kernel_size // 2)
if texel_roots_mask[cen_h, cen_w] == False or texel_strds_mask[cen_h, cen_w] == True:
continue
num_existing_features = np.sum(texel_strds_mask[cen_h - (interp_kernel_size // 2) : cen_h + (interp_kernel_size // 2) + 1,
cen_w - (interp_kernel_size // 2) : cen_w + (interp_kernel_size // 2) + 1].astype(np.int16))
if num_existing_features > 0:
continue
dis, idx = texel_pts_kdtree.query(np.array([cen_h, cen_w]), interp_neig_pts)
dis = np.array(dis)
if np.sum(dis) > max_dis_thres * 3:
continue
dis = 1. / dis
normalized_dis = dis / np.linalg.norm(dis)
knn_strds_codes = self.denoised_neural_texture[texel_strds_pts[idx, 0], texel_strds_pts[idx, 1]] # for valid strands in TBN space
nn_strds_code = knn_strds_codes[0]
similarities = np.abs(np.dot(knn_strds_codes, nn_strds_code.T)
/ (np.linalg.norm(knn_strds_codes, axis=-1) * np.linalg.norm(nn_strds_code, axis=-1)))
if is_bilateral:
interp_weigths = similarities * normalized_dis
interp_weigths = interp_weigths / np.linalg.norm(interp_weigths)
else:
interp_weigths = normalized_dis
if interp_neig_pts == 1:
self.interp_neural_texture[cen_h, cen_w] = knn_strds_codes
else:
self.interp_neural_texture[cen_h, cen_w] = np.average(knn_strds_codes, axis=0, weights=interp_weigths)
self.interp_strds_idx_map[cen_h, cen_w] = self.interp_count + 1
self.interp_count += 1
print('Interpolation done!')
def world_strands_from_tbn(self, strands, face_idxs, face_barys):
if not torch.is_tensor(strands):
strands = torch.tensor(strands, dtype=torch.float32).cuda()
if not torch.is_tensor(face_barys):
face_barys = torch.tensor(face_barys, dtype=torch.float32)
tbn_basis = torch.stack((self.tangents[0], self.bitangents[0], self.normals[0]), dim=2)[face_idxs]
# basis change
orig_points = torch.matmul(tbn_basis.float().cuda(), strands.permute(0, 2, 1)).permute(0, 2, 1)
# scale
orig_points = orig_points * 1000. # m -> mm
# translate to world space with brad and triangle vertices
triangled_vertices = torch.tensor(self.head_mesh.vertices[self.head_mesh.faces, :])
roots_triangles = triangled_vertices[face_idxs]
roots_positions = roots_triangles[:, 0] * face_barys[:, 0:1] + \
roots_triangles[:, 1] * face_barys[:, 1:2] + \
roots_triangles[:, 2] * face_barys[:, 2:3]
strds_points = orig_points + roots_positions[:, None, :].cuda()
return strds_points
def world_strands_from_texels(self, neural_texture, strds_idx_map, batch_size=300):
texel_idx = np.where(strds_idx_map > 0)
strds_codes = neural_texture[texel_idx[0], texel_idx[1], :]
num_interped = strds_codes.shape[0]
if not torch.is_tensor(strds_codes):
strds_codes = torch.tensor(strds_codes, dtype=torch.float32).cuda()
pred_points = torch.zeros((num_interped, self.num_strds_points, 3)).cuda()
num_batches = math.ceil(num_interped / batch_size)
loop = tqdm(range(num_batches))
loop.set_description('Decoding strands')
for i_b in loop:
i_start = i_b * batch_size
i_end = min((i_b + 1) * batch_size, num_interped)
pred_points[i_start:i_end] = self.decode(strds_codes[i_start:i_end])
face_idxs = self.head_index_map[texel_idx[0], texel_idx[1]]
tbn_basis = torch.stack((self.tangents[0], self.bitangents[0], self.normals[0]), dim=2)[face_idxs]
# basis change
orig_points = torch.matmul(tbn_basis.float().cuda(), pred_points.permute(0, 2, 1)).permute(0, 2, 1)
# scale
orig_points = orig_points * 1000. # m -> mm
# translate to world space with brad and triangle vertices
triangled_vertices = torch.tensor(self.head_mesh.vertices[self.head_mesh.faces, :])
roots_triangles = triangled_vertices[face_idxs]
face_barys = self.head_bary_map[texel_idx[0], texel_idx[1]]
roots_positions = roots_triangles[:, 0] * face_barys[:, 0:1] + \
roots_triangles[:, 1] * face_barys[:, 1:2] + \
roots_triangles[:, 2] * face_barys[:, 2:3]
strds_points = orig_points + roots_positions[:, None, :].cuda()
return strds_points
|
CT2Hair-main
|
CT2Hair/modules/neural_strands.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from scipy.spatial import KDTree
from libs.chamfer_distance import ChamferDistance
from utils.strandsutils import spline_strand, pad_strand, natural_cubic_spline_coeffs, NaturalCubicSpline
def uncompress_strand(strands_pc, strands_sep):
sidx = 0
strands = []
for v in strands_sep:
strands.append(strands_pc[sidx:sidx+v])
sidx += v
return strands
def strands_kdtree_query(input_pc, target_kdtree, target_pc, k=10, radius=None):
if radius:
idx = target_kdtree.query_ball_point(input_pc, radius)
else:
k = np.arange(k) + 1
dis, idx = target_kdtree.query(input_pc, k)
idx = idx.reshape(-1)
idx = np.unique(idx)
knn_target_pc = target_pc[0, idx, :]
knn_target_pc = knn_target_pc[None, :]
return knn_target_pc, dis, idx
def densify_pc(input_pc, density, dup=4):
dup_sep = 256 // dup
dense_pc = []
if torch.is_tensor(input_pc):
for i in range(len(input_pc)):
dense_pc.append(input_pc[i].detach().cpu().numpy().tolist())
num_dup = density[i] // dup_sep
for j in range(int(num_dup)):
dense_pc.append(input_pc[i].detach().cpu().numpy().tolist())
dense_pc = torch.tensor(dense_pc)[None, :].cuda()
else:
print("Densifying point cloud...")
for i in tqdm(range(len(input_pc))):
dense_pc.append(input_pc[i])
num_dup = density[i] // dup_sep
for j in range(int(num_dup)):
dense_pc.append(input_pc[i])
dense_pc = np.array(dense_pc)
print("Number of origina points: %d, number of densified points: %d"%(input_pc.shape[0], dense_pc.shape[0]))
return dense_pc
def compute_len_loss(strands_pc, strands_pc_next, strands_sep, losstype="l2", **kwargs):
strands = []
loss = 0
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
for s, s_next in zip(strands, strands_next):
delta1 = s[:-1] - s[1:]
delta2 = s_next[:-1] - s_next[1:]
delta1 = torch.sqrt(torch.sum(delta1**2, dim=-1))
delta2 = torch.sqrt(torch.sum(delta2**2, dim=-1))
delta = delta1 - delta2
if losstype == "l2":
loss += torch.mean(delta**2)
elif losstype == "l1":
loss += torch.mean(torch.abs(delta))
else:
raise NotImplementedError(f"losstype {losstype} is not implemented for compute_len_loss")
loss = loss / len(strands)
return loss
def compute_len2_loss(strands_pc, strands_pc_next, strands_sep, losstype="max", max_ratio=0.1, **kwargs):
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
loss = 0
for s_ori, s_next in zip(strands, strands_next):
delta_ori = s_ori[:-2] - s_ori[2:]
delta_next = s_next[:-2] - s_next[2:]
delta_ori = torch.sqrt(torch.sum(delta_ori**2, dim=-1))
delta_next = torch.sqrt(torch.sum(delta_next**2, dim=-1))
if losstype == "l1":
loss += torch.mean(torch.abs(delta_next - delta_ori))
elif losstype == "l2":
loss += torch.mean((delta_next - delta_ori)**2)
elif losstype == "max":
dismat = torch.abs(delta_next - delta_ori)
thres = max_ratio * delta_ori
dismat = F.relu(dismat - thres)
loss += torch.mean(dismat)
else:
raise NotImplementedError(f"losstype {losstype} is not defined for compute_len2_loss")
loss = loss / len(strands)
return loss
def compute_tangential_loss(strands_pc, strands_pc_next, strands_sep, losstype="l2", cycle=False, **kwargs):
loss = 0
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
for s, s_next in zip(strands, strands_next):
delta = s_next - s
hair_dirs = s[1:] - s[:-1]
hair_dirs_normalized = F.normalize(hair_dirs, p=2, dim=-1)
dot_root = torch.sum(delta[:-1] * hair_dirs_normalized, dim=-1)
dot_child = torch.sum(delta[1:] * hair_dirs_normalized, dim=-1)
if cycle:
hair_dirs_next = s_next[1:] - s_next[:-1]
hair_dirs_next_normalized = F.normalize(hair_dirs_next, p=2, dim=-1)
dot_root_next = torch.sum(delta[:-1] * hair_dirs_next_normalized, dim=-1)
dot_child_next = torch.sum(delta[1:] * hair_dirs_next_normalized, dim=-1)
if losstype == "l2":
loss += torch.mean((dot_root - dot_child)**2)
if cycle:
loss += torch.mean((dot_root_next - dot_child_next)**2)
elif losstype == "l1":
loss += torch.mean(torch.abs(dot_root - dot_child))
if cycle:
loss += torch.mean(torch.abs(dot_root_next - dot_child_next))
else:
raise NotImplementedError(f"losstype {losstype} is not implemented for compute_tangential_loss")
loss = loss / len(strands)
return loss
class StrandsOptimizerNeuralCubic():
def __init__(self, input_strands, target_pc, target_density, num_strd_pts=128, num_strands_per_opt=1600):
self.target_pc = target_pc
self.target_density = target_density * 255
self.target_pc = densify_pc(self.target_pc, self.target_density)
print('Building KDTree for target point cloud...')
self.target_kdtree = KDTree(self.target_pc)
self.num_strands_per_opt = num_strands_per_opt
num_origi_strands = input_strands.shape[0]
filtered_strands = self.filtering_strands(input_strands)
self.num_strands = len(filtered_strands)
print('Number original strands: %d, filtered strands: %d'%(num_origi_strands, self.num_strands))
print('Pre-padding strands for neural cubic interpolation...')
self.num_strd_pts = num_strd_pts
self.input_strands = []
self.times = []
self.input_num_strds_pts = []
for i_strd in tqdm(range(self.num_strands)):
strand = filtered_strands[i_strd][:, :3].astype(np.float32)
if strand.shape[0] > self.num_strd_pts:
strand = spline_strand(strand, num_strand_points=self.num_strd_pts)
self.input_num_strds_pts.append(strand.shape[0])
strand, time = pad_strand(strand, num_strand_points=self.num_strd_pts)
self.input_strands.append(strand)
self.times.append(time)
self.input_strands = np.array(self.input_strands)
self.times = np.array(self.times)
if not torch.is_tensor(self.target_pc):
self.target_pc = torch.tensor(self.target_pc).float().cuda()[None, :]
self.epoch = 80
self.eps = 1e-1
self.chamfer_dis = ChamferDistance().cuda()
self.learning_rate = 1e-1
self.forward_weight = 1.0
self.backward_weight = 1.0
self.length_weight = 100.0
self.tangent_weight = 100.0
def filtering_strands(self, input_strands, eps=3.0):
print("Filtering strands outliers...")
num_strands = input_strands.shape[0]
filtered_strands = []
for i_strd in tqdm(range(num_strands)):
strand = np.array(input_strands[i_strd]).astype(np.float32)[:, :3]
_, dis, _ = strands_kdtree_query(strand, self.target_kdtree, self.target_pc[None, :])
if (np.mean(dis) < eps):
filtered_strands.append(strand)
return filtered_strands
def diff_spline(self, strands, times):
coeffs = natural_cubic_spline_coeffs(times, strands)
spline = NaturalCubicSpline(coeffs)
time_pts = torch.arange(self.num_strd_pts).to(strands.device) / (self.num_strd_pts - 1)
time_pts = time_pts.repeat(strands.shape[0], 1)
splined_points = spline.evaluate(time_pts)
return splined_points
def optimization(self, regularization=True):
num_opts = self.num_strands // self.num_strands_per_opt + 1
ori_splined_points = []
opted_splined_points = []
opted_strands_pc = []
strands_seps = np.ones(self.num_strands).astype(np.int16) * self.num_strd_pts
print('Start optimization...')
for i_opt in tqdm(range(num_opts)):
i_start = i_opt * self.num_strands_per_opt
i_end = min((i_opt + 1) * self.num_strands_per_opt, self.num_strands)
num_strds_this_opt = i_end - i_start
strands = torch.tensor(self.input_strands[i_start:i_end]).cuda()
times = torch.tensor(self.times[i_start:i_end]).cuda()
strands_noroots = strands[:, 1:, :].clone().detach()
strands_roots = strands[:, 0:1, :].clone().detach()
strands_noroots = strands_noroots.requires_grad_(True)
strands_roots = strands_roots.requires_grad_(True)
self.optimizer = torch.optim.Adam([strands_noroots], lr=self.learning_rate)
# before optimization
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
ori_splined_points.extend(splined_points.view(-1, 3).detach().cpu().numpy().tolist())
constraint_pc = splined_points.view(-1, 3).clone().detach()
strands_sep = np.ones(num_strds_this_opt).astype(np.int16) * self.num_strd_pts
for i_epoch in range(self.epoch):
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
input_pc = splined_points.view(1, -1, 3)
input_pc_numpy = input_pc.clone().detach().cpu().numpy()[0]
knn_target_pc, _, knn_idx = strands_kdtree_query(input_pc_numpy, self.target_kdtree, self.target_pc)
dist1, dist2 = self.chamfer_dis(input_pc, knn_target_pc)
chamfer_loss = self.forward_weight * torch.mean(dist1) + self.backward_weight * torch.mean(dist2)
if regularization:
len_loss = compute_len_loss(constraint_pc, input_pc[0], strands_sep)
len2_loss = compute_len2_loss(constraint_pc, input_pc[0], strands_sep)
tangent_loss = compute_tangential_loss(constraint_pc, input_pc[0], strands_sep)
loss = chamfer_loss + \
self.length_weight * len_loss + self.length_weight * len2_loss + \
self.tangent_weight * tangent_loss
else:
loss = chamfer_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
print('\topts: %d/%d, epochs: %d/%d, number of points: %d, current loss: %f'%(i_opt, num_opts, i_epoch, self.epoch, input_pc.shape[1], loss.data), end='\r')
# after optimization
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
opted_splined_points.extend(splined_points.view(-1, 3).detach().cpu().numpy().tolist())
# original control points
num_strds_pts = self.input_num_strds_pts[i_start:i_end]
strands_pc = np.zeros((np.sum(num_strds_pts, keepdims=False), 3))
sidx = 0
for i_strd in range(num_strds_this_opt):
strands_pc[sidx:sidx + num_strds_pts[i_strd]] = strands.detach().cpu().numpy()[i_strd, :num_strds_pts[i_strd]]
sidx += num_strds_pts[i_strd]
opted_strands_pc.extend(strands_pc.tolist())
return np.array(ori_splined_points), np.array(opted_splined_points), strands_seps, np.array(opted_strands_pc), self.input_num_strds_pts
|
CT2Hair-main
|
CT2Hair/modules/strands_opt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import csv
import torch
import torch.nn as nn
from modules.networks import *
from utils.utils import batched_index_select
from utils.strandsutils import natural_cubic_spline_coeffs, NaturalCubicSpline
class StrandEncoder1dCNN(nn.Module):
def __init__(self, do_vae, num_pts=100, out_channels=64):
super(StrandEncoder1dCNN, self).__init__()
self.do_vae = do_vae
self.num_pts = num_pts
self.training = False
out_channels *= 2 # not that we do vae the features are dobules so that we can get mean and variance
in_channels = 0
in_channels += 3 # 3 for the xyz
in_channels += 3 # 3 for the direction
if num_pts == 100:
self.cnn_encoder = torch.nn.Sequential(
Conv1dWN(in_channels, 32, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(32, 32, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(32, 64, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(64, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU())
# after runnign cnn we still end up with some elments per strand, and we want to pool over them with something better than an avg pool
self.final_cnn_aggregator=torch.nn.Sequential(
LinearWN(128*3, 128), torch.nn.SiLU(),
LinearWN(128, out_channels))
elif num_pts == 256:
self.cnn_encoder = torch.nn.Sequential(
Conv1dWN(in_channels, 32, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(32, 32, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(32, 64, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(64, 128, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(128, 256, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(256, 256, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU())
self.final_cnn_aggregator = torch.nn.Sequential(
LinearWN(256 * int(self.num_pts / 64), 256), torch.nn.SiLU(),
LinearWN(256, out_channels))
else:
print("Number of points %d is not supported."%(num_pts))
exit(0)
self.pred_mean = torch.nn.Sequential(
torch.nn.SiLU(),
LinearWN(out_channels, out_channels), torch.nn.SiLU(),
LinearWN(out_channels, int(out_channels / 2))
)
self.pred_logstd = torch.nn.Sequential(
torch.nn.SiLU(),
LinearWN(out_channels, out_channels), torch.nn.SiLU(),
LinearWN(out_channels, int(out_channels / 2))
)
self.apply(lambda x: swish_init(x, False))
swish_init(self.pred_mean, True)
swish_init(self.pred_logstd, True)
self.pe = LearnedPE(in_channels=1, num_encoding_functions=5, logsampling=True)
def forward(self, points):
points = points.view(-1, self.num_pts, 3) # nr_strands, points_per_strand, xyz
original_points = points
points = points.permute(0, 2, 1) ## nr_strands, xyz, 100
nr_strands = points.shape[0]
### get also the direciton from point to the next
cur_points = original_points[:, 0:self.num_pts - 1, : ]
next_points = original_points[:, 1:self.num_pts, :]
direction = next_points - cur_points
# pad_zero=torch.zeros(nr_strands,1,3).cuda()
# direction = torch.cat([direction,pad_zero],1) # make the direction nr_strands, 100, 3
last_dir = direction[:, self.num_pts - 2:self.num_pts - 1, :]
direction = torch.cat([direction, last_dir],1) # make the direction nr_strands, 100, 3
direction = direction.permute(0, 2, 1) # nr_strands, xyz, 100
# direction=direction * 100 # (we multiply by the nr of segments so that the value is not so small and is closer to our desired range)
per_point_features = torch.cat([points, direction] ,1)
strand_features = self.cnn_encoder(per_point_features) # nr_strands, 128(nr_features), 3(elements per string)
strand_features = strand_features.view(nr_strands, -1).contiguous()
strand_features = self.final_cnn_aggregator(strand_features) # outputs nr_strands x 128
s = self.pred_mean(strand_features)
s_mean_and_logstd_dict = {}
if self.do_vae:
s_mean = s
# print("s_mean has mean std ", s_mean.mean(), s_mean.std())
s_logstd = 0.1 * self.pred_logstd(strand_features)
s_mean_and_logstd_dict["mean"] = s_mean
s_mean_and_logstd_dict["logstd"] = s_logstd
# print("s_logstd has mean std ", s_logstd.mean(), s_logstd.std())
if self.training:
std = torch.exp(s_logstd)
eps = torch.empty_like(std).normal_()
s = s + std * eps
# print("strand std min max", std.min(), " ", std.max())
return s, s_mean_and_logstd_dict
class StrandGeneratorSiren(nn.Module):
# a siren network which predicts various direction vectors along the strand similar ot FakeODE.
# the idea is that siren works well when periodic thing needs to be predicted and the strand can be seen as some periodic direction vectors being repeted at some points on the strand
# the idea is similar to modulation siren https://arxiv.org/pdf/2104.03960.pdf
def __init__(self, in_channels, modulation_hidden_dim, siren_hidden_dim, scale_init, decode_direct_xyz, decode_random_verts, num_pts=100):
super(StrandGeneratorSiren, self).__init__()
self.num_pts = num_pts
self.decode_direct_xyz = decode_direct_xyz
self.decode_random_verts = decode_random_verts
self.swish = torch.nn.SiLU()
self.tanh = torch.nn.Tanh()
self.nr_layers = 3
cur_nr_channels = in_channels
# cur_nr_channels+=1 #+1 for the time t
self.modulation_layers = torch.nn.ModuleList([])
for i in range(self.nr_layers):
self.modulation_layers.append(LinearWN(cur_nr_channels, modulation_hidden_dim))
cur_nr_channels = modulation_hidden_dim+in_channels # at the end we concatenate the input z
self.decode_dir = LinearWN(siren_hidden_dim, 3)
self.apply(lambda x: swish_init(x, False))
swish_init(self.decode_dir, True)
self.siren_layers = torch.nn.ModuleList([])
self.siren_layers.append(BlockSiren(in_channels=1, out_channels=siren_hidden_dim, is_first_layer=True, scale_init=scale_init))
for i in range(self.nr_layers-1):
self.siren_layers.append(BlockSiren(in_channels=siren_hidden_dim, out_channels=siren_hidden_dim))
def forward(self, strand_features):
nr_verts_to_create = self.num_pts - 1 # we create only 99 because the frist one is just the origin
if self.decode_random_verts:
nr_verts_to_create = 1
nr_strands = strand_features.shape[0]
strand_features = strand_features.view(nr_strands, 1, -1).repeat(1, nr_verts_to_create, 1) # nr_strands x 100 x nr_channels
# sampling t
t = torch.linspace(0, 1, self.num_pts).cuda()
t = t.view(self.num_pts, 1)
if self.decode_direct_xyz:
t = t[1:self.num_pts, :] # we don't create the root because it's already given
else: # we are decoding direction therefore the first direction should be computed but the last direction should be ingored because the tip doesnt need a direction
t = t[0:self.num_pts - 1, :]
# repeat strand featues to be nr_strands x nr_vert x nr_channels
# concat for each vertex the positional encoding
t = t.view(1, self.num_pts - 1, -1).repeat(nr_strands, 1, 1) #nrstrands, nr_verts, nr_channels
# strand_features_with_time=torch.cat([strand_features,t],2)
point_indices = None
if self.decode_random_verts:
# choose a random t for each strand
# we can create only up until the very last vertex, except the tip, we need to be able to sample the next vertex so as to get a direction vector
probability = torch.ones([nr_strands, self.num_pts - 2], dtype=torch.float32, device=torch.device("cuda"))
point_indices = torch.multinomial(probability, nr_verts_to_create, replacement=False) # size of the chunk size we selected
# add also the next vertex on the strand so that we can compute directions
point_indices = torch.cat([point_indices, point_indices + 1], 1)
t = batched_index_select(t, 1, point_indices)
# decode xyz
h_siren = t
# z_scaling=0.001 #this has to be initialized so that the h_modulation is something like 0.2.If its lower,
# then no gradient will flow into Z and then the network will not be optimized. You might need to do one run and check the gradients of the network with model.summary to see if the gradients don't vanish
z_scaling = 1.0
z = strand_features
z_initial = z * z_scaling
z = z * z_scaling
with_checkpointing = True
for i in range(self.nr_layers):
h_modulation = self.swish( self.modulation_layers[i](z))
s = self.siren_layers[i](h_siren)
h_siren = (1 - h_modulation) * s
# for next iter
z = torch.cat([z_initial, h_modulation], 2)
if self.decode_direct_xyz:
points_dir = self.decode_dir(h_siren) * 0.1
if self.decode_random_verts:
pred_strands = points_dir
else:
start_positions = torch.zeros(nr_strands, 1, 3).cuda()
pred_strands = torch.cat([start_positions, points_dir], 1)
else:
# divide by the nr of points on the strand otherwise the direction will have norm=1 and then when integrated you end up with a gigantic strand that has 100 units
hair_dir = self.decode_dir(h_siren) * 0.01
pred_strands = torch.cumsum(hair_dir, dim=1) # nr_strands, nr_verts-1, 3
# we know that the first vertex is 0,0,0 so we just concatenate that one
start_positions = torch.zeros(nr_strands, 1, 3).cuda()
pred_strands = torch.cat([start_positions, pred_strands], 1)
return pred_strands, point_indices
'''
uses only one Z tensor and predicts the strands using SIREN. There is no normalization apart from moving the strands to origin
is used to predict and regress only strand data, with no scalp
'''
class StrandCodec(nn.Module):
def __init__(self, do_vae, decode_direct_xyz, decode_random_verts, train_params, is_train=True):
super(StrandCodec, self).__init__()
self.do_vae = do_vae
self.decode_direct_xyz = decode_direct_xyz
self.decode_random_verts = decode_random_verts
self.nr_verts_per_strand = train_params['num_pts']
if self.decode_random_verts:
self.nr_verts_per_strand = 2
self.cosine_embed_loss = nn.CosineEmbeddingLoss()
if is_train:
self.weight_pts = train_params['weight_pts']
self.weight_dir = train_params['weight_dir'] # 0.001
self.weight_kl= train_params['weight_kl'] # 0.0001
# encode
self.strand_encoder_for_shape = StrandEncoder1dCNN(self.do_vae, self.nr_verts_per_strand, train_params['code_channels']) # predicts 64 vector of shape, gets the inputs after they were normalized
# decoder
self.strand_generator = StrandGeneratorSiren(in_channels=train_params['code_channels'], modulation_hidden_dim=32, siren_hidden_dim=32,
scale_init=5, decode_direct_xyz=decode_direct_xyz, decode_random_verts=decode_random_verts,
num_pts=self.nr_verts_per_strand) # generate a whoel strand from 64 dimensional shape vector
def save(self, root_folder, experiment_name, iter_nr):
models_path = os.path.join(root_folder, experiment_name, str(iter_nr), "models")
if not os.path.exists(models_path):
os.makedirs(models_path, exist_ok=True)
torch.save(self.state_dict(), os.path.join(models_path, "strand_codec.pt"))
# write csv with some info
out_info_path = os.path.join(models_path, "strand_codec_info.csv")
with open(out_info_path, "w") as f: #we need to put the writer in a block so that it closes the file automaticaly afterwards
w = csv.writer(f)
w.writerow(["do_vae", self.do_vae])
w.writerow(["decode_direct_xyz", self.decode_direct_xyz])
w.writerow(["decode_random_verts", self.decode_random_verts])
def diff_spline(self, hair_data_dict):
points = hair_data_dict["points"].cuda()
times = hair_data_dict["times"].cuda()
coeffs = natural_cubic_spline_coeffs(times, points)
spline = NaturalCubicSpline(coeffs)
time_pts = torch.arange(self.nr_verts_per_strand).cuda() / (self.nr_verts_per_strand - 1)
time_pts = time_pts.repeat(points.shape[0], 1)
self.splined_points = spline.evaluate(time_pts)
self.splined_points = self.splined_points.detach()
def encode(self):
s_shape, s_shape_mean_and_logstd_dict = self.strand_encoder_for_shape(self.splined_points)
encoded_dict = {}
encoded_dict["s_shape"] = s_shape
encoded_dict["s_shape_mean_and_logstd_dict"] = s_shape_mean_and_logstd_dict
return encoded_dict
def decode(self, encoded_dict):
s_shape = encoded_dict["s_shape"]
# generate the strand points
pred_points, point_indices = self.strand_generator(s_shape)
prediction_dict = {}
prediction_dict["pred_points"] = pred_points
prediction_dict["point_indices"] = point_indices
return prediction_dict
def compute_loss(self, prediction_dict, encoded_dict):
loss_l2 = self.compute_loss_l2(prediction_dict)
loss_dir = self.compute_loss_dir(prediction_dict)
loss_kl = self.compute_loss_kl(encoded_dict)
loss = self.weight_pts * loss_l2 + self.weight_dir * loss_dir + self.weight_kl * loss_kl
# loss = loss_l2 + loss_dir * 0.01 + loss_kl * 0.001
# loss = loss_l2 + loss_dir * 0.1 + loss_kl * 0.001 # this gives the lowest kl and the autodecoding looks nice
loss_dict = {}
loss_dict['loss'] = loss
loss_dict['loss_l2'] = loss_l2
loss_dict['loss_dir'] = loss_dir
loss_dict['loss_kl'] = loss_kl
return loss_dict
def compute_loss_l2(self, prediction_dict):
pred_points = prediction_dict["pred_points"].view(-1, self.nr_verts_per_strand, 3)
loss_l2 = ((pred_points - self.splined_points) ** 2).mean()
return loss_l2
def compute_loss_dir(self, prediction_dict):
pred_points = prediction_dict["pred_points"].view(-1, self.nr_verts_per_strand, 3)
# get also a loss for the direciton, we need to compute the direction
cur_points = pred_points[:, 0:self.nr_verts_per_strand - 1, : ]
next_points = pred_points[:, 1:self.nr_verts_per_strand, :]
pred_deltas = next_points - cur_points
pred_deltas = pred_deltas.view(-1, 3)
gt_cur_points = self.splined_points[:, 0:self.nr_verts_per_strand - 1, : ]
gt_next_points = self.splined_points[:, 1:self.nr_verts_per_strand, :]
gt_dir = gt_next_points - gt_cur_points
gt_dir = gt_dir.view(-1, 3)
loss_dir = self.cosine_embed_loss(pred_deltas, gt_dir, torch.ones(gt_dir.shape[0]).cuda())
return loss_dir
def compute_loss_kl(self, encoded_dict):
#get input data
kl_loss = 0
if self.do_vae:
#kl loss
s_shape_mean_and_logstd_dict = encoded_dict["s_shape_mean_and_logstd_dict"]
kl_shape = self.kl( s_shape_mean_and_logstd_dict["mean"], s_shape_mean_and_logstd_dict["logstd"])
# free bits from IAF-VAE. so that if the KL drops below a certan value, then we stop reducing the KL
kl_shape = torch.clamp(kl_shape, min=0.25)
kl_loss = kl_shape.mean()
return kl_loss
def kl(self, mean, logstd):
kl = (-0.5 - logstd + 0.5 * mean ** 2 + 0.5 * torch.exp(2 * logstd))
return kl
def forward(self, hair_data_dict):
self.diff_spline(hair_data_dict)
encoded_dict=self.encode()
prediction_dict=self.decode(encoded_dict)
return prediction_dict, encoded_dict
|
CT2Hair-main
|
CT2Hair/modules/strands_codec.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
import inspect
import numpy as np
from typing import Dict, List, Optional, Tuple
from torch.nn.utils.weight_norm import WeightNorm, remove_weight_norm
class LearnedPE(torch.nn.Module):
def __init__(self, in_channels, num_encoding_functions, logsampling):
super(LearnedPE, self).__init__()
self.in_channels = in_channels
self.num_encoding_functions = num_encoding_functions
self.logsampling = logsampling
out_channels = in_channels * self.num_encoding_functions * 2
self.conv = torch.nn.Linear(in_channels, int(out_channels / 2), bias=True).cuda() #in the case we set the weight ourselves
self.init_weights()
def init_weights(self):
with torch.no_grad():
num_input = self.in_channels
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
# print("weight is ", self.conv.weight.shape) #60x3
# we make the same as the positonal encoding, which is mutiplying each coordinate with this linespaced frequencies
lin = 2.0 ** torch.linspace(0.0,
self.num_encoding_functions - 1,
self.num_encoding_functions,
dtype=torch.float32,
device=torch.device("cuda"))
lin_size = lin.shape[0]
weight = torch.zeros([self.in_channels, self.num_encoding_functions * self.in_channels], dtype=torch.float32, device=torch.device("cuda"))
for i in range(self.in_channels):
weight[i : i + 1, i * lin_size : i * lin_size + lin_size ] = lin
weight = weight.t().contiguous()
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
x_proj = self.conv(x)
return torch.cat([torch.sin(x_proj), torch.cos(x_proj), x], -1).contiguous()
class BlockSiren(torch.nn.Module):
def __init__(self, in_channels, out_channels, bias=True, activ=torch.sin, is_first_layer=False, scale_init=90):
super(BlockSiren, self).__init__()
self.bias = bias
self.activ = activ
self.is_first_layer = is_first_layer
self.scale_init = scale_init
self.conv = torch.nn.Linear(in_channels, out_channels, bias=self.bias).cuda()
# if self.activ==torch.sin or self.activ==None:
with torch.no_grad():
if self.activ == torch.sin:
num_input = in_channels
# See supplement Sec. 1.5 for discussion of factor 30
if self.is_first_layer:
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
else:
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
elif self.activ == None:
# self.conv.weight.normal_(0, 0.1)
swish_init(self.conv, True)
def forward(self, x):
x = self.conv(x)
if self.activ == torch.sin:
if self.is_first_layer:
x = self.scale_init * x
else:
x = x * 1
x = self.activ(x)
elif self.activ is not None:
x = self.activ(x)
return x
def check_args_shadowing(name, method, arg_names):
spec = inspect.getfullargspec(method)
init_args = {*spec.args, *spec.kwonlyargs}
for arg_name in arg_names:
if arg_name in init_args:
raise TypeError(f"{name} attempted to shadow a wrapped argument: {arg_name}")
# For backward compatibility.
class TensorMappingHook(object):
def __init__(
self,
name_mapping: List[Tuple[str, str]],
expected_shape: Optional[Dict[str, List[int]]] = None,
):
"""This hook is expected to be used with "_register_load_state_dict_pre_hook" to
modify names and tensor shapes in the loaded state dictionary.
Args:
name_mapping: list of string tuples
A list of tuples containing expected names from the state dict and names expected
by the module.
expected_shape: dict
A mapping from parameter names to expected tensor shapes.
"""
self.name_mapping = name_mapping
self.expected_shape = expected_shape if expected_shape is not None else {}
def __call__(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
for old_name, new_name in self.name_mapping:
if prefix + old_name in state_dict:
tensor = state_dict.pop(prefix + old_name)
if new_name in self.expected_shape:
tensor = tensor.view(*self.expected_shape[new_name])
state_dict[prefix + new_name] = tensor
def weight_norm_wrapper(cls, name="weight", g_dim=0, v_dim=0):
"""Wraps a torch.nn.Module class to support weight normalization. The wrapped class
is compatible with the fuse/unfuse syntax and is able to load state dict from previous
implementations.
Args:
name: str
Name of the parameter to apply weight normalization.
g_dim: int
Learnable dimension of the magnitude tensor. Set to None or -1 for single scalar magnitude.
Default values for Linear and Conv2d layers are 0s and for ConvTranspose2d layers are 1s.
v_dim: int
Of which dimension of the direction tensor is calutated independently for the norm. Set to
None or -1 for calculating norm over the entire direction tensor (weight tensor). Default
values for most of the WN layers are None to preserve the existing behavior.
"""
class Wrap(cls):
def __init__(self, *args, name=name, g_dim=g_dim, v_dim=v_dim, **kwargs):
# Check if the extra arguments are overwriting arguments for the wrapped class
check_args_shadowing(
"weight_norm_wrapper", super().__init__, ["name", "g_dim", "v_dim"]
)
super().__init__(*args, **kwargs)
# Sanitize v_dim since we are hacking the built-in utility to support
# a non-standard WeightNorm implementation.
if v_dim is None:
v_dim = -1
self.weight_norm_args = {"name": name, "g_dim": g_dim, "v_dim": v_dim}
self.is_fused = True
self.unfuse()
# For backward compatibility.
self._register_load_state_dict_pre_hook(
TensorMappingHook(
[(name, name + "_v"), ("g", name + "_g")],
{name + "_g": getattr(self, name + "_g").shape},
)
)
def fuse(self):
if self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"] + "_g"
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to fuse frozen module.")
remove_weight_norm(self, self.weight_norm_args["name"])
self.is_fused = True
def unfuse(self):
if not self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"]
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to unfuse frozen module.")
wn = WeightNorm.apply(
self, self.weight_norm_args["name"], self.weight_norm_args["g_dim"]
)
# Overwrite the dim property to support mismatched norm calculate for v and g tensor.
if wn.dim != self.weight_norm_args["v_dim"]:
wn.dim = self.weight_norm_args["v_dim"]
# Adjust the norm values.
weight = getattr(self, self.weight_norm_args["name"] + "_v")
norm = getattr(self, self.weight_norm_args["name"] + "_g")
norm.data[:] = torch.norm_except_dim(weight, 2, wn.dim)
self.is_fused = False
def __deepcopy__(self, memo):
# Delete derived tensor to avoid deepcopy error.
if not self.is_fused:
delattr(self, self.weight_norm_args["name"])
# Deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
if not self.is_fused:
setattr(result, self.weight_norm_args["name"], None)
setattr(self, self.weight_norm_args["name"], None)
return result
return Wrap
def is_weight_norm_wrapped(module):
for hook in module._forward_pre_hooks.values():
if isinstance(hook, WeightNorm):
return True
return False
LinearWN = weight_norm_wrapper(torch.nn.Linear, g_dim=0, v_dim=None)
Conv1dWN = weight_norm_wrapper(torch.nn.Conv1d, g_dim=0, v_dim=None)
def swish_init(m, is_linear, scale=1):
# normally relu has a gain of sqrt(2)
# however swish has a gain of sqrt(2.952) as per the paper https://arxiv.org/pdf/1805.08266.pdf
gain=np.sqrt(2.952)
# gain=np.sqrt(2)
if is_linear:
gain = 1
# gain = np.sqrt(2.0 / (1.0 + 1 ** 2))
if isinstance(m, torch.nn.Conv1d):
ksize = m.kernel_size[0]
n1 = m.in_channels
n2 = m.out_channels
# std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
std = gain / np.sqrt(n1 * ksize)
elif isinstance(m, torch.nn.Conv2d):
ksize = m.kernel_size[0] * m.kernel_size[1]
n1 = m.in_channels
n2 = m.out_channels
# std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
std = gain / np.sqrt(n1 * ksize)
elif isinstance(m, torch.nn.Linear):
n1 = m.in_features
n2 = m.out_features
std = gain / np.sqrt((n1))
else:
return
is_wnw = is_weight_norm_wrapped(m)
if is_wnw:
m.fuse()
m.weight.data.normal_(0, std*scale)
if m.bias is not None:
m.bias.data.zero_()
if is_wnw:
m.unfuse()
|
CT2Hair-main
|
CT2Hair/modules/networks.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
sys.path.append('CT2Hair/')
from interp import neural_interp
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
strands_name = conf['output']['name'] \
+ '_guide.bin'
conf['strands']['guide_strds'] = os.path.join(strands_out_dir, strands_name)
if not os.path.exists(os.path.join(strands_out_dir, strands_name)):
print(colored("Guide hair strands not found, please run scripts/gen_guide_strands.py first.", "red"))
exit(1)
print(colored("Running interpolation:", "yellow"))
neural_interp(conf)
|
CT2Hair-main
|
scripts/interpolation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
sys.path.append('CT2Hair/')
from optim import strands_opt
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
pc_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
strands_name = conf['output']['name'] \
+ '_merged.bin'
conf['pc']['pc_path'] = os.path.join(strands_out_dir, pc_name)
conf['strands']['interp_strds'] = os.path.join(strands_out_dir, strands_name)
if not os.path.exists(os.path.join(strands_out_dir, strands_name)):
print(colored("Interpolated hair strands not found, please run scripts/interpolation.py first.", "red"))
exit(1)
print(colored("Running optimization:", "yellow"))
strands_opt(conf)
|
CT2Hair-main
|
scripts/optimization.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
if not os.path.exists(str(conf['vdb']['path'])):
print("Input VDB file does not exists.")
exit(1)
oriens_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
os.makedirs(oriens_out_dir, exist_ok=True)
oriens_out_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
if platform.system() == 'Linux':
exe_path = 'CT2Hair/GuideHairStrands/GuideHairStrands'
elif platform.system() == 'Windows':
exe_path = 'CT2Hair\\GuideHairStrands\\Release\\GuideHairStrands.exe'
cmd = '{} 0 '.format(exe_path) \
+ str(conf['vdb']['path']) + ' ' \
+ str(conf['vdb']['voxel_size']) + ' ' \
+ conf['head']['roots_path'] + ' ' \
+ str(conf['guide']['wignet_dis']) + ' ' \
+ os.path.join(oriens_out_dir, oriens_out_name)
print(colored("Running command:", "yellow"), colored(cmd, "green"))
os.system(cmd)
|
CT2Hair-main
|
scripts/est_orientations.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import argparse
from shutil import copyfile
from pyhocon import ConfigFactory
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
oriens_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
if not os.path.exists(os.path.join(strands_out_dir, oriens_name)):
print(colored("Orientations not found, please run scripts/est_orientations.py first.", "red"))
exit(1)
strands_out_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '_nr_' + str(conf['guide']['nei_radius']) \
+ '_se_' + str(conf['guide']['sigma_e']) \
+ '_so_' + str(conf['guide']['sigma_o']) \
+ '_ts_' + str(conf['guide']['thres_shift']) \
+ '_nrs_' + str(conf['guide']['nei_radius_seg']) \
+ '_to_' + str(conf['guide']['thres_orient']) \
+ '_tl_' + str(conf['guide']['thres_length']) \
+ '_tnrd_' + str(conf['guide']['thres_nn_roots_dis']) \
+ '_tlg_' + str(conf['guide']['thres_length_grow']) \
+ '.ply'
strands_out_name_simp = conf['output']['name'] + '_guide.bin'
if platform.system() == 'Linux':
exe_path = 'CT2Hair/GuideHairStrands/GuideHairStrands'
elif platform.system() == 'Windows':
exe_path = 'CT2Hair\\GuideHairStrands\\Release\\GuideHairStrands.exe'
cmd = '{} 1 '.format(exe_path) \
+ os.path.join(strands_out_dir, oriens_name) + ' ' \
+ os.path.join(strands_out_dir, strands_out_name) + ' ' \
+ str(conf['guide']['nei_radius']) + ' ' \
+ str(conf['guide']['sigma_e']) + ' ' \
+ str(conf['guide']['sigma_o']) + ' ' \
+ str(conf['guide']['thres_shift']) + ' ' \
+ str(conf['guide']['use_cuda']) + ' ' \
+ k_args.gpu + ' ' \
+ str(conf['guide']['nei_radius_seg']) + ' ' \
+ str(conf['guide']['thres_orient']) + ' ' \
+ str(conf['guide']['thres_length']) + ' ' \
+ conf['head']['roots_path'] + ' ' \
+ str(conf['guide']['thres_nn_roots_dis']) + ' ' \
+ str(conf['guide']['thres_length_grow'])
print(colored("Running command:", "yellow"), colored(cmd, "green"))
os.system(cmd)
copyfile(os.path.join(strands_out_dir, strands_out_name).replace('ply', 'bin'),
os.path.join(strands_out_dir, strands_out_name_simp))
|
CT2Hair-main
|
scripts/gen_guide_strands.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to decode and produce an answer.
Answer decoder for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
Support two kinds of decoders:
(a) Generative: A recurrent neural network based language model that can
generate novel answers. At test time, all candidate answers are scored
based on loglikelihood of the language model.
(b) Discriminative: A discriminative classifier to identify the correct
answer from a pool of candidate options at train time.
At test time, options are ranked based on class probabilities.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import dropout
from tensorflow.contrib.layers import fully_connected as FC
from util import support
class AnswerDecoder:
def __init__(self, inputs, output_pool, params):
"""Initialize answer decoder.
Args:
inputs:
output_pool:
params:
"""
self.params = params
# keep track of inputs and outputs
used_inputs = []
outputs = {}
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
# begin decoding
with tf.variable_scope(self.params['embed_scope'], reuse=True):
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat')
output = tf.nn.embedding_lookup(embed_mat, inputs['ans_in'])
used_inputs.extend(['ans_in', 'ans_out', 'ans_len'])
# recurrent neural network cell
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
# decide the source based on train / evaluation
source = output_pool if params['train_mode'] else inputs
# concatenate question to both
concat_list = []
# add program context vector
concat_list.append(source['context'])
# adding last hidden size
concat_list.append(source['enc_dec_h'][-1])
used_inputs.extend(['enc_dec_h', 'enc_dec_c'])
if not params['train_mode']:
used_inputs.append('context')
#--------------------------------------------------------------------------
# stack all the vectors
stack_vec = tf.concat(concat_list, axis=1)
stack_vec = FC(stack_vec, params['lstm_size'])
# construct encoder decoder H
enc_dec_h = [source['enc_dec_h'][ii]
for ii in range(params['num_layers'] - 1)]
enc_dec_h.append(stack_vec)
# construct encoder decoder C
enc_dec_c = [source['enc_dec_c'][ii] for ii in range(params['num_layers'])]
init_state = [tf.contrib.rnn.LSTMStateTuple(cc, hh)
for cc, hh in zip(enc_dec_c, enc_dec_h)]
if params['decoder'] == 'gen':
for ii in range(params['num_layers']):
# dynamic rnn
output, _ = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['ans_len'],
initial_state=init_state[ii],
dtype=tf.float32, scope='layer_%d' % ii)
# predict the output words
output = FC(output, params['text_vocab_size'], activation_fn=None)
# create a mask
mask = tf.not_equal(inputs['ans_out'], params['pad_id'])
mask = tf.cast(mask, tf.float32)
# multiply by mask for variable length sequences
answer_loss = criterion(logits=output, labels=inputs['ans_out'])
masked_answer_loss = tf.multiply(answer_loss, mask)
token_likelihood = tf.reduce_sum(masked_answer_loss)
num_tokens = tf.maximum(tf.reduce_sum(mask), 1)
outputs['ans_token_loss'] = token_likelihood/num_tokens
outputs['per_sample_loss'] = tf.reduce_sum(masked_answer_loss, 1)
# extract the probabilities
out_softmax = tf.nn.log_softmax(output)
out_softmax_flat = tf.reshape(out_softmax, [-1, params['text_vocab_size']])
orig_shape = tf.shape(inputs['ans_out'])
ans_out_flat = tf.reshape(inputs['ans_out'], [-1])
inds = [tf.range(0, tf.shape(ans_out_flat)[0]), ans_out_flat]
inds = tf.stack(inds, axis=1)
prob_tokens = tf.gather_nd(out_softmax_flat, inds)
prob_tokens = tf.reshape(prob_tokens, orig_shape)
prob_tokens = tf.multiply(prob_tokens, mask)
# compute the loglikelihood
outputs['llh'] = tf.reduce_sum(prob_tokens, 1)
# compute mean instead of sum
num_tokens = tf.maximum(tf.reduce_sum(mask, 1), 1)
outputs['llh_mean'] = outputs['llh'] / num_tokens
elif params['decoder'] == 'disc':
# embed options and encode via lstm
with tf.variable_scope(self.params['embed_scope'], reuse=True):
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat')
opt_embed = tf.nn.embedding_lookup(embed_mat, inputs['opt'])
# transpose and merging batch and option dimension
opt_embed = tf.transpose(opt_embed, [0, 2, 1, 3])
shape = opt_embed.shape.as_list()
opt_embed = tf.reshape(opt_embed, [-1, shape[2], shape[3]])
opt_len = tf.reshape(inputs['opt_len'], [-1])
output, _ = tf.nn.dynamic_rnn(cell, opt_embed,
sequence_length=opt_len,
dtype=tf.float32, scope='opt_layer_0')
for ii in range(1, params['num_layers']):
# dynamic rnn
output, _ = tf.nn.dynamic_rnn(cell, output, \
sequence_length=opt_len,
dtype=tf.float32,
scope='opt_layer_%d' % ii)
opt_encode = support.last_relevant(output, opt_len)
# reshape back
opt_encode = tf.reshape(opt_encode, [-1, shape[1], params['lstm_size']])
# score the options with context vector
score_vec = tf.matmul(opt_encode, tf.expand_dims(stack_vec, -1))
score_vec = tf.squeeze(score_vec, -1)
scores = criterion(logits=score_vec, labels=inputs['gt_ind'])
outputs['ans_token_loss'] = tf.reduce_mean(scores)
outputs['scores'] = score_vec
used_inputs.extend(['opt', 'opt_len', 'gt_ind'])
# setup the inputs and outputs
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#----------------------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#----------------------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None):
"""Produces the feed dict for this subcomponent.
Args:
batch: Batch returned from dataloader
output_pool: Outputs from previous subcomponents, mostly when evaluating
Returns:
feed_dict: Returns the feed dictionary
"""
feed_dict = {}
for key in ['ans_in', 'ans_out', 'ans_len']:
feed_dict[self.inputs[key]] = batch[key]
# if not in train mode, use output_pool
if not self.params['train_mode']:
for key in ['context', 'enc_dec_h', 'enc_dec_c']:
feed_dict[self.inputs[key]] = output_pool[key]
# additional feeds for discriminative decoder
if self.params['decoder'] == 'disc':
feed_dict[self.inputs['opt']] = np.stack(batch['opt_out'], -1)
feed_dict[self.inputs['opt_len']] = np.stack(batch['opt_len'], -1)
feed_dict[self.inputs['gt_ind']] = batch['gt_ind']
return feed_dict
#----------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/decoder.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Add a reasonable description to the file.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_relu_layer as conv_relu
from tensorflow.contrib.layers import fully_connected as FC
from tensorflow.contrib.rnn import LSTMStateTuple
from util import support
def _get_valid_tokens(X, W, b):
constraints_validity = tf.greater_equal(tf.tensordot(X, W, axes=1) - b, 0)
token_validity = tf.reduce_all(constraints_validity, axis=2)
return tf.stop_gradient(token_validity)
#------------------------------------------------------------------------------
def _update_decoding_state(X, s, P):
X = X + tf.nn.embedding_lookup(P, s) # X = X + S P
return tf.stop_gradient(X)
#------------------------------------------------------------------------------
def _get_lstm_cell(num_layers, lstm_dim, apply_dropout):
if isinstance(lstm_dim, list): # Different layers have different dimensions
if not len(lstm_dim) == num_layers:
raise ValueError('the length of lstm_dim must be equal to num_layers')
cell_list = []
for l in range(num_layers):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim[l], state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout and l < num_layers-1:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list.append(dropout_cell)
else: # All layers has the same dimension.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list = [dropout_cell] * (num_layers-1) + [lstm_cell]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
#------------------------------------------------------------------------------
# Sequence to Sequence with attention
class AttSeq2Seq:
def __init__(self, holders, use_gt_prog, assembler, params, reuse=None):
self.T_decoder = params['max_dec_len']
self.encoder_num_vocab = params['text_vocab_size']
self.encoder_embed_dim = params['text_embed_size']
self.decoder_num_vocab = params['prog_vocab_size']
self.decoder_embed_dim = params['prog_embed_size']
self.lstm_dim = params['lstm_size']
self.num_layers = params['num_layers']
self.EOS_token = assembler.EOS_idx
self.embed_scope = params['embed_scope']
self.temperature = params.get('temperature', 1)
# if word vectors need to be used or lstm outputs for attention
params['use_word_vectors'] = 'wv-att' in params['model']
params['generator'] = params.get('generator', 'ques')
self.params = params
# decoding transition variables
self.P = to_T(assembler.P, dtype=tf.int32)
self.W = to_T(assembler.W, dtype=tf.int32)
self.b = to_T(assembler.b, dtype=tf.int32)
self.encoder_dropout = params['enc_dropout']
self.decoder_dropout = params['dec_dropout']
self.decoder_sampling = params['dec_sampling']
# detect fake inputs
if 'fake' in holders: scope = 'enc_dec_cap'
else: scope = 'enc_dec'
with tf.variable_scope(scope, reuse=reuse):
# build a special encoder, if needed
if 'fake' not in holders and params['generator'] == 'mem':
self._build_memory_encoder(holders)
else:
# build a normal encoder
self._build_encoder(holders['ques'], holders['ques_len'])
self._build_decoder(use_gt_prog, holders['prog_gt'])
# build a usual encoder, ques based
def _build_encoder(self, input_seq_batch, seq_len_batch, scope='encoder',
reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
with tf.variable_scope(scope, reuse=reuse):
#T = tf.shape(input_seq_batch)[0]
T = input_seq_batch.shape.as_list()[0]
N = tf.shape(input_seq_batch)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embedding_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embedded_seq = tf.nn.embedding_lookup(embedding_mat, input_seq_batch)
self.embedded_input_seq = embedded_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell, embedded_seq,
seq_len_batch,
dtype=tf.float32,
time_major=True,
scope='lstm')
self.encoder_outputs = encoder_outputs
self.encoder_states = encoder_states
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished has shape [T, N, 1], where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
seq_len_batch[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
# build a special encoder
def _build_memory_encoder(self, holders, scope='encoder', reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
input_seq = holders['ques']
input_seq_len = holders['ques_len']
# facts/memories
hist_size = holders['hist'].shape.as_list()
hist_flat = tf.reshape(holders['hist'], [-1, hist_size[2]])
hist_len_flat = tf.reshape(holders['hist_len'], [-1])
with tf.variable_scope(scope, reuse=reuse):
T = input_seq.shape.as_list()[0]
N = tf.shape(input_seq)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embed_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embed_seq = tf.nn.embedding_lookup(embed_mat, input_seq)
self.embedded_input_seq = embed_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
embed_seq, input_seq_len, dtype=tf.float32,
time_major=True, scope='lstm')
self.encoder_outputs = encoder_outputs
# batch first encoder outputs
batch_encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
ques_enc = support.last_relevant(batch_encoder_outputs, input_seq_len)
size = [-1, self.params['num_rounds'], self.params['lstm_size']]
ques_enc = tf.reshape(ques_enc, size)
self.encoder_states = encoder_states
# similarly encode history
hist_out = tf.nn.embedding_lookup(embed_mat, hist_flat)
# rnns to encode history
cell = tf.contrib.rnn.BasicLSTMCell(self.params['lstm_size'])
for ii in range(0, self.params['num_layers']):
# dynamic rnn
hist_out, states = tf.nn.dynamic_rnn(cell, hist_out, \
sequence_length=hist_len_flat, \
dtype=tf.float32, scope='hist_layer_%d' % ii)
# get output from last timestep
hist_enc = support.last_relevant(hist_out, hist_len_flat)
# reshape back
size = [-1, hist_size[1], self.params['lstm_size']]
hist_enc = tf.reshape(hist_enc, size)
# concatenate, mlp and tanh
num_r = self.params['num_rounds']
# dot product
attention = tf.matmul(ques_enc, hist_enc, transpose_b=True)
# a very small large number
u_mat = np.full((num_r, num_r), -1e10)
suppress_mat = tf.constant(np.triu(u_mat, 1), dtype=tf.float32)
l_mat = np.full((num_r, num_r), 1)
mask_mat = tf.constant(np.tril(l_mat), dtype=tf.float32)
attention = tf.nn.softmax(tf.multiply(attention, mask_mat)
+ suppress_mat)
self.att_history = attention
att_hist_enc = tf.matmul(attention, hist_enc)
# flatten out
size = [-1, self.params['lstm_size']]
att_hist_flat = tf.reshape(att_hist_enc, size)
# concatenate attended history and encoder state for the last layer
concat = tf.concat([encoder_states[-1].h, att_hist_flat], -1)
new_state = LSTMStateTuple(encoder_states[-1].c,
FC(concat, self.params['lstm_size']))
# make it mutable
encoder_states = list(encoder_states)
encoder_states[-1] = new_state
self.encoder_states = tuple(encoder_states)
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
input_seq_len[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
def _build_decoder(self, use_gt_layout, gt_layout_batch, scope='decoder',
reuse=None):
# The main difference from before is that the decoders now takes another
# input (the attention) when computing the next step
# T_max is the maximum length of decoded sequence (including <eos>)
#
# This function is for decoding only. It performs greedy search or sampling.
# the first input is <go> (its embedding vector) and the subsequent inputs
# are the outputs from previous time step
# num_vocab does not include <go>
#
# use_gt_layout is None or a bool tensor, and gt_layout_batch is a tenwor
# with shape [T_max, N].
# If use_gt_layout is not None, then when use_gt_layout is true, predict
# exactly the tokens in gt_layout_batch, regardless of actual probability.
# Otherwise, if sampling is True, sample from the token probability
# If sampling is False, do greedy decoding (beam size 1)
N = self.N
encoder_states = self.encoder_states
T_max = self.T_decoder
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.decoder_dropout
EOS_token = self.EOS_token
sampling = self.decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
embedding_mat = tf.get_variable('embedding_mat',
[self.decoder_num_vocab, self.decoder_embed_dim])
# we use a separate embedding for <go>, as it is only used in the
# beginning of the sequence
go_embedding = tf.get_variable('go_embedding', [1, self.decoder_embed_dim])
with tf.variable_scope('att_prediction'):
v = tf.get_variable('v', [lstm_dim])
W_a = tf.get_variable('weights', [lstm_dim, lstm_dim],
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable('biases', lstm_dim,
initializer=tf.constant_initializer(0.))
# The parameters to predict the next token
with tf.variable_scope('token_prediction'):
W_y = tf.get_variable('weights', [lstm_dim*2, self.decoder_num_vocab],
initializer=tf.contrib.layers.xavier_initializer())
b_y = tf.get_variable('biases', self.decoder_num_vocab,
initializer=tf.constant_initializer(0.))
# Attentional decoding
# Loop function is called at time t BEFORE the cell execution at time t,
# and its next_input is used as the input at time t (not t+1)
# c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
mask_range = tf.reshape(tf.range(self.decoder_num_vocab, dtype=tf.int32),
[1, -1])
if use_gt_layout is not None:
gt_layout_mult = tf.cast(use_gt_layout, tf.int32)
pred_layout_mult = 1 - gt_layout_mult
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
next_cell_state = encoder_states
next_input = tf.tile(go_embedding, to_T([N, 1]))
else: # time > 0
next_cell_state = cell_state
# compute the attention map over the input sequence
# a_raw has shape [T, N, 1]
att_raw = tf.reduce_sum(
tf.tanh(tf.nn.xw_plus_b(cell_output, W_a, b_a) +
self.encoder_h_transformed) * v,
axis=2, keep_dims=True)
# softmax along the first dimension (T) over not finished examples
# att has shape [T, N, 1]
att = tf.nn.softmax(att_raw, dim=0)*self.seq_not_finished
att = att / tf.reduce_sum(att + 1e-10, axis=0, keep_dims=True)
# d has shape [N, lstm_dim]
d2 = tf.reduce_sum(att*self.encoder_outputs, axis=0)
# token_scores has shape [N, num_vocab]
token_scores = tf.nn.xw_plus_b(
tf.concat([cell_output, d2], axis=1),
W_y, b_y)
decoding_state = loop_state[2]
# token_validity has shape [N, num_vocab]
token_validity = _get_valid_tokens(decoding_state, self.W, self.b)
token_validity.set_shape([None, self.decoder_num_vocab])
if use_gt_layout is not None:
# when there's ground-truth layout, do not re-normalize prob
# and treat all tokens as valid
token_validity = tf.logical_or(token_validity, use_gt_layout)
validity_mult = tf.cast(token_validity, tf.float32)
# predict the next token (behavior depending on parameters)
if sampling:
token_scores_valid = token_scores - (1-validity_mult) * 50
# TODO:debug
sampled_token = tf.cast(tf.reshape(
tf.multinomial(token_scores_valid/self.temperature, 1), [-1]), tf.int32)
# make sure that the predictions are ALWAYS valid
# (it can be invalid with very small prob)
# If not, just fall back to min cases
# pred_mask has shape [N, num_vocab]
sampled_mask = tf.equal(mask_range, tf.reshape(sampled_token, [-1, 1]))
is_sampled_valid = tf.reduce_any(
tf.logical_and(sampled_mask, token_validity),
axis=1)
# Fall back to max score (no sampling)
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
max_score_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
predicted_token = tf.where(is_sampled_valid, sampled_token, max_score_token)
else:
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
# predicted_token has shape [N]
predicted_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
if use_gt_layout is not None:
predicted_token = (gt_layout_batch[time-1] * gt_layout_mult
+ predicted_token * pred_layout_mult)
# a robust version of softmax
# all_token_probs has shape [N, num_vocab]
all_token_probs = tf.nn.softmax(token_scores) * validity_mult
# tf.check_numerics(all_token_probs, 'NaN/Inf before div')
all_token_probs = all_token_probs / tf.reduce_sum(all_token_probs + 1e-10, axis=1, keep_dims=True)
# tf.check_numerics(all_token_probs, 'NaN/Inf after div')
# mask has shape [N, num_vocab]
mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1]))
# token_prob has shape [N], the probability of the predicted token
# although token_prob is not needed for predicting the next token
# it is needed in output (for policy gradient training)
# [N, num_vocab]
token_prob = tf.reduce_sum(all_token_probs * tf.cast(mask, tf.float32), axis=1)
# tf.assert_positive(token_prob)
neg_entropy = tf.reduce_sum(
all_token_probs * tf.log(all_token_probs + (1-validity_mult) + 1e-10),
axis=1)
# update states
updated_decoding_state = _update_decoding_state(
decoding_state, predicted_token, self.P)
# the prediction is from the cell output of the last step
# timestep (t-1), feed it as input into timestep t
next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token)
elements_finished = tf.greater_equal(time, T_max)
# loop_state is a 5-tuple, representing
# 1) the predicted_tokens
# 2) the prob of predicted_tokens
# 3) the decoding state (used for validity)
# 4) the negative entropy of policy (accumulated across timesteps)
# 5) the attention
if loop_state is None: # time == 0
# Write the predicted token into the output
predicted_token_array = tf.TensorArray(dtype=tf.int32, size=T_max,
infer_shape=False)
token_prob_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
init_decoding_state = tf.tile(to_T([[0, 0, T_max]], dtype=tf.int32), to_T([N, 1]))
att_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
next_loop_state = (predicted_token_array,
token_prob_array,
init_decoding_state,
tf.zeros(to_T([N]), dtype=tf.float32),
att_array)
else: # time > 0
t_write = time-1
next_loop_state = (loop_state[0].write(t_write, predicted_token),
loop_state[1].write(t_write, token_prob),
updated_decoding_state,
loop_state[3] + neg_entropy,
loop_state[4].write(t_write, att))
return (elements_finished, next_input, next_cell_state, cell_output,
next_loop_state)
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
_, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm')
predicted_tokens = decodes_ta[0].stack()
token_probs = decodes_ta[1].stack()
neg_entropy = decodes_ta[3]
# atts has shape [T_decoder, T_encoder, N, 1]
atts = decodes_ta[4].stack()
# static dimension recast
atts = tf.reshape(atts, [self.T_decoder, self.T_encoder, -1, 1])
self.atts = atts
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(atts*self.embedded_input_seq, axis=1)
predicted_tokens.set_shape([None, None])
token_probs.set_shape([None, None])
neg_entropy.set_shape([None])
#word_vecs.set_shape([None, None, self.encoder_embed_dim])
# static shapes
word_vecs.set_shape([self.T_decoder, None, self.encoder_embed_dim])
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.neg_entropy = neg_entropy
self.word_vecs = word_vecs
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/generator_attnet.py
|
corefnmn-main
|
models_vd/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Write a description about what this file contains and what
it does.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_vd import modules as lm
# the number of attention input to each module
_module_input_num = {
'_Find': 0,
'_Refer': 0,
'_Exclude': 0,
'_Transform': 1,
'_And': 2,
'_Describe': 1
}
# output type of each module
_module_output_type = {
'_Find': 'att',
'_Refer': 'att',
'_Exclude': 'att',
'_Transform': 'att',
'_And': 'att',
'_Describe': 'ans'
}
INVALID_EXPR = 'INVALID_EXPR'
# decoding validity: maintaining a state x of [#att, #ans, T_remain]
# when T_remain is T_decoder when decoding the first module token
# a token s can be predicted iff all(<x, w_s> - b_s >= 0)
# the validity token list is
# XW - b >= 0
# the state transition matrix is P, so the state update is X += S P,
# where S is the predicted tokens (one-hot vectors)
def _build_validity_mats(module_names):
state_size = 3
num_vocab_nmn = len(module_names)
num_constraints = 4
P = np.zeros((num_vocab_nmn, state_size), np.int32)
W = np.zeros((state_size, num_vocab_nmn, num_constraints), np.int32)
b = np.zeros((num_vocab_nmn, num_constraints), np.int32)
# collect the input and output numbers of each module
att_in_nums = np.zeros(num_vocab_nmn)
att_out_nums = np.zeros(num_vocab_nmn)
ans_out_nums = np.zeros(num_vocab_nmn)
for n_s, s in enumerate(module_names):
if s != '<eos>':
att_in_nums[n_s] = _module_input_num[s]
att_out_nums[n_s] = _module_output_type[s] == 'att'
ans_out_nums[n_s] = _module_output_type[s] == 'ans'
# construct the trasition matrix P
for n_s, s in enumerate(module_names):
P[n_s, 0] = att_out_nums[n_s] - att_in_nums[n_s]
P[n_s, 1] = ans_out_nums[n_s]
P[n_s, 2] = -1
# construct the validity W and b
att_absorb_nums = (att_in_nums - att_out_nums)
max_att_absorb_nonans = np.max(att_absorb_nums * (ans_out_nums == 0))
max_att_absorb_ans = np.max(att_absorb_nums * (ans_out_nums != 0))
for n_s, s in enumerate(module_names):
if s != '<eos>':
# constraint: a non-<eos> module can be outputted iff all the following
# hold:
# * 0) there's enough att in the stack
# #att >= att_in_nums[n_s]
W[0, n_s, 0] = 1
b[n_s, 0] = att_in_nums[n_s]
# * 1) for answer modules, there's no extra att in the stack
# #att <= att_in_nums[n_s]
# -#att >= -att_in_nums[n_s]
# for non-answer modules, T_remain >= 3
# (the last two has to be AnswerType and <eos>)
if ans_out_nums[n_s] != 0:
W[0, n_s, 1] = -1
b[n_s, 1] = -att_in_nums[n_s]
else:
W[2, n_s, 1] = 1
b[n_s, 1] = 3
# * 2) there's no answer in the stack (otherwise <eos> only)
# #ans <= 0
# -#ans >= 0
W[1, n_s, 2] = -1
# * 3) there's enough time to consume the all attentions, output answer
# plus <eos>
# 3.1) for non-answer modules, we already have T_remain>= 3 from
# constraint 2
# In maximum (T_remain-3) further steps
# (plus 3 steps for this, ans, <eos>) to consume atts
# (T_remain-3) * max_att_absorb_nonans + max_att_absorb_ans +
# att_absorb_nums[n_s] >= #att
# T_remain*MANA - #att >= 3*MANA - MAA - A[s]
# - #att + MANA * T_remain >= 3*MANA - MAA - A[s]
# 3.2) for answer modules, if it can be decoded then constraint 0&1
# ensures that there'll be no att left in stack after decoding
# this answer, hence no further constraints here
if ans_out_nums[n_s] == 0:
W[0, n_s, 3] = -1
W[2, n_s, 3] = max_att_absorb_nonans
b[n_s, 3] = (3 * max_att_absorb_nonans - max_att_absorb_ans -
att_absorb_nums[n_s])
else: # <eos>-case
# constraint: a <eos> token can be outputted iff all the following holds
# * 0) there's ans in the stack
# #ans >= 1
W[1, n_s, 0] = 1
b[n_s, 0] = 1
return P, W, b
#------------------------------------------------------------------------------
class Assembler:
def __init__(self, module_vocab_file):
# read the module list, and record the index of each module and <eos>
with open(module_vocab_file) as f:
self.module_names = [s.strip() for s in f.readlines()]
# find the index of <eos>
for n_s in range(len(self.module_names)):
if self.module_names[n_s] == '<eos>':
self.EOS_idx = n_s
break
# build a dictionary from module name to token index
self.name2idx_dict = {name: n_s for n_s, name in enumerate(self.module_names)}
self.num_vocab_nmn = len(self.module_names)
self.P, self.W, self.b = _build_validity_mats(self.module_names)
def module_list2tokens(self, module_list, T=None):
layout_tokens = [self.name2idx_dict[name] for name in module_list]
if T is not None:
if len(module_list) >= T:
raise ValueError('Not enough time steps to add <eos>')
layout_tokens += [self.EOS_idx]*(T-len(module_list))
return layout_tokens
def _layout_tokens2str(self, layout_tokens):
return ' '.join([self.module_names[idx] for idx in layout_tokens])
def assemble_refer(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
logits = []
for find_arg in reuse_stack:
# compute the weights for each of the attention map
inputs = (text_att, find_arg[1], round_id, find_arg[2])
logits.append(weaver.align_text(*inputs))
# exponential each logit
weights = []
for ii in logits: weights.append(weaver.exp(ii))
# normalize the weights
if len(weights) < 2:
norm = weights[0]
else:
norm = weaver.add(weights[0], weights[1])
for ii in weights[2:]: norm = weaver.add(norm, ii)
for index, ii in enumerate(weights):
weights[index] = weaver.divide(ii, norm)
# multiply the attention with softmax weight
prev_att = []
for (att, _, _, _, _), weight in zip(reuse_stack, weights):
prev_att.append(weaver.weight_attention(att, weight))
# add all attentions to get the result
if len(prev_att) < 2: out = prev_att[0]
else:
out = weaver.add_attention(prev_att[0], prev_att[1])
for ii in prev_att[2:]:
out = weaver.add_attention(out, ii)
return out, weights, logits
def assemble_exclude(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
weights = []
exclude_att = reuse_stack[0][0]
if len(reuse_stack) > 1:
for find_arg in reuse_stack:
exclude_att = weaver.max_attention(exclude_att, find_arg[0])
return weaver.normalize_exclude(exclude_att)
# code to check if the program makes sense
# typically contains all the checks from the _assemble_program method
def sanity_check_program(self, layout):
decode_stack = []
for t_id, cur_op_id in enumerate(layout):
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
return False, 'Insufficient inputs'
# read the inputs
inputs = []
for ii in range(num_inputs):
arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
return False, 'Intermediate not attention'
decode_stack.append(_module_output_type[cur_op_name])
# Check if only one element is left
if len(decode_stack) != 1:
return False, 'Left with more than one outputs'
# final output is not answer type
elif decode_stack[0] != 'ans':
return False, 'Final output not an answer'
return True, 'Valid program'
def assemble(self, layout_tokens, executor, visualize=False):
# layout_tokens_batch is a numpy array with shape [T, N],
# containing module tokens and <eos>, in Reverse Polish Notation.
# internalize executor and weaver
self.executor = executor
# build a weaver
if hasattr(self, 'weaver'): del self.weaver
weaver = executor.create_weaver()
self.weaver = weaver
# visualize flag
self.visualize = visualize
# get extent of layout tokens
max_time, batch_size = layout_tokens['ques'].shape
num_rounds = executor.params['num_rounds']
batch_size = batch_size // num_rounds
outputs = []
reuse = [[]] * batch_size
cap_invalid_prog = []
ques_invalid_prog = []
# program on questions and captions, if needed
cap_tokens = layout_tokens.get('caption', None)
ques_tokens = layout_tokens['ques']
for b_id in range(batch_size):
image = weaver.batch_input(executor._loom_types['image'], b_id)
if executor.params['use_fact']:
fact = weaver.batch_input(executor._loom_types['fact'], b_id)
else: fact = None
# run module networks on captions only if needed
if 'nmn-cap' in executor.params['model']:
# convert caption to text type
cap = weaver.batch_input(executor._loom_types['caption'], b_id)
cap_text = weaver.convert_cap_in(cap)
# convert cap feature to text feature for alignment
cap_feat = weaver.batch_input(executor._loom_types['cap_feat'], b_id)
cap_feat = weaver.convert_cap_feat(cap_feat)
# collect root node outputs for down the rounds
tokens = cap_tokens[:, num_rounds * b_id : num_rounds * (b_id + 1)]
inputs = (image, cap_text, None, cap_feat, tokens, [])
out, reuse[b_id], invalid_prog = self._assemble_program(*inputs)
cap_invalid_prog.extend(invalid_prog)
# convert context to align type
cap_out = [weaver.convert_cap_out(ii) for ii in out['comp']]
outputs.extend(cap_out)
# add the visualization outputs, if needed
if visualize:
outputs.extend([ii[0] for ii in out['vis']['att'] if ii[1]==0])
# Now run program on questions
text = weaver.batch_input(executor._loom_types['text'], b_id)
text_feat = weaver.batch_input(executor._loom_types['text_feat'], b_id)
# collect root node outputs for down the rounds
# tuples are immutable, recreate to ensure caption is round 0
round_zero = weaver.batch_input(executor._loom_types['round'], 0)
cur_reuse = [(ii[0], ii[1], round_zero, ii[3], ii[4])
for ii in reuse[b_id] if ii[3] == 0]
tokens = ques_tokens[:, num_rounds*b_id : num_rounds*(b_id+1)]
inputs = (image, text, fact, text_feat, tokens, cur_reuse)
out, _, invalid_prog = self._assemble_program(*inputs)
ques_invalid_prog.extend(invalid_prog)
outputs.extend(out['comp'])
if visualize:
outputs.extend([ii for ii, _ in out['vis']['att']])
outputs.extend(out['vis']['weights'])
invalid_prog = {'ques': ques_invalid_prog, 'cap': cap_invalid_prog}
return weaver, outputs, invalid_prog
def _assemble_program(self, image, text, fact, text_feat, tokens, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# get extent of layout tokens
max_time, batch_size = tokens.shape
num_rounds = executor.params['num_rounds']
outputs = []
validity = []
# for visualizing internal nodes
vis_outputs = {'att': [], 'weights': [], 'logits': []}
for r_id in range(num_rounds):
layout = tokens[:, r_id]
invalid_prog = False
round_id = weaver.batch_input(executor._loom_types['round'], r_id)
if fact is not None: fact_slice = weaver.slice_fact(fact, round_id)
# valid layout must contain <eos>. Assembly fails if it doesn't.
if not np.any(layout == self.EOS_idx): invalid_prog = True
decode_stack = []
penult_out = None # penultimate output
for t_id in range(len(layout)):
weights = None
time = weaver.batch_input(executor._loom_types['time'], t_id)
text_att = weaver.slice_text(text, round_id, time)
# slice the text feature
text_feat_slice = weaver.slice_text_feat(text_feat, round_id, time)
cur_op_id = layout[t_id]
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
invalid_prog = True
break
# read the inputs
inputs = []
for ii in range(num_inputs):
arg, arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
invalid_prog = True
break
inputs.append(arg)
# switch cases
if cur_op_name == '_Find':
out = weaver.find(image, text_att)
# collect in reuse stack (always)
#if fact is None:
reuse_stack.append((out, text_feat_slice, round_id, r_id, t_id))
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
if cur_op_name == '_Refer':
if len(reuse_stack) == 0:
print('Something wrong with Refer')
continue
# if baseline is in the model, take the last output
if 'baseline' in self.executor.params['model']:
out = reuse_stack[-1][0]
else:
inputs = (text_feat_slice, round_id, reuse_stack)
out, weights, logits = self.assemble_refer(*inputs)
if cur_op_name == '_Exclude':
# clean up reuse stack to avoid current finds
neat_stack = reuse_stack.copy()
for prev_time in range(t_id - 1, 0, -1):
if neat_stack[-1][-2] == prev_time: neat_stack.pop(-1)
inputs = (text_att, round_id, neat_stack)
out = self.assemble_exclude(*inputs)
# collect in reuse stack
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
elif cur_op_name == '_Transform':
out = weaver.transform(inputs[0], image, text_att)
elif cur_op_name == '_Describe':
out = weaver.describe(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_And':
out = weaver.and_op(inputs[0], inputs[1])
# collect outputs from all modules (visualize)
if self.visualize:
if _module_output_type[cur_op_name] == 'att':
vis_outputs['att'].append((out, r_id))
if weights is not None:
vis_outputs['weights'].extend(weights)
#vis_outputs['logits'].extend(logits)
# also add weights to usual outputs
#if weights is not None: print(r_id, len(weights))
if weights is not None:
if executor.params['train_mode']: outputs.extend(logits)
decode_stack.append((out, _module_output_type[cur_op_name]))
# Check if only one element is left
if len(decode_stack) != 1: invalid_prog = True
# final output is not answer type
elif decode_stack[0][1] != 'ans': invalid_prog = True
# record program validity
validity.append(invalid_prog)
# if program is invalid, return zeros
if invalid_prog: outputs.append(weaver.invalid(image))
else:
outputs.append(decode_stack[-1][0])
if fact is not None:
# record fact embedding against penultimate output
reuse_stack.append((penult_out, fact_slice, round_id, r_id, -1))
return {'comp': outputs, 'vis': vis_outputs}, reuse_stack, validity
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/assembler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main CorefNMN model class.
Explicit visual coreference resolution in visual dialog using neural module
networks. Takes parameters and assemblers as input.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from models_vd.generator import ProgramGenerator
from models_vd.executor import ProgramExecutor
from models_vd.decoder import AnswerDecoder
from util import support
class CorefNMN:
def __init__(self, params, assemblers, reuse=None):
# train mode
params['train_mode'] = 'test_split' not in params
print('Building model with train_model as: ' + str(params['train_mode']))
self.params = params
self.assemblers = assemblers
# module phases
self.phases = ['generate_program', 'execute_program', 'generate_answer']
# initializing input and output placeholders
self.inputs = {ii: {} for ii in self.phases}
self.outputs = self.inputs.copy()
# build place holders for inputs and outputs in the tensorflow graph
holders = self._build_placeholders(params)
self.holders = holders
with tf.variable_scope(params['model'], reuse=reuse):
# keep track of all outputs
output_pool = {}
# Part 1: Seq2seq RNN to generate module layout tokens
with tf.variable_scope('generate_program'):
self.generator = ProgramGenerator(holders, assemblers['ques'], params)
self.inputs['generate_program'] = self.generator.get_inputs()
self.outputs['generate_program'] = self.generator.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['generate_program'])
# Part 2: Neural Module Network
with tf.variable_scope('execute_program'):
self.executor = ProgramExecutor(holders, output_pool,
assemblers['cap'], params)
self.inputs['execute_program'] = self.executor.get_inputs()
self.outputs['execute_program'] = self.executor.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['execute_program'])
# Part 3: Seq2Seq decoding of the answer
with tf.variable_scope('generate_answer'):
self.decoder = AnswerDecoder(holders, output_pool, params)
self.inputs['generate_answer'] = self.decoder.get_inputs()
self.outputs['generate_answer'] = self.decoder.get_outputs()
# pool up all the outputs
pooled_dict = []
outputs = self.outputs.copy()
for ii in outputs:
pooled_dict += outputs[ii].items()
self.pooled_outputs = dict(pooled_dict)
#---------------------------------------------------------------------------
def _build_placeholders(self, params):
inputs = {}
# Phase 1 - program generation
size = [params['max_enc_len'], None]
inputs['ques'] = tf.placeholder(tf.int32, size, 'ques')
inputs['ques_len'] = tf.placeholder(tf.int32, [None], 'ques_len')
inputs['prog_gt'] = tf.placeholder(tf.int32, [None, None], 'prog')
size = [None, params['max_enc_len']]
inputs['cap'] = tf.placeholder(tf.int32, size, 'caption')
inputs['cap_len'] = tf.placeholder(tf.int32, [None], 'cap_len')
inputs['cap_prog_gt'] = tf.placeholder(tf.int32, [None, None],
'cap_prog_gt')
# mask for pairwise program token loss
inputs['prog_att_mask'] = tf.placeholder(tf.float32, [None, None, None],
'mask')
# for supervising placeholders
if params['supervise_attention']:
size = [params['max_dec_len'], params['max_enc_len'], None, 1]
inputs['prog_att_gt'] = tf.placeholder(tf.float32, size, 'gt_att')
inputs['cap_att_gt'] = tf.placeholder(tf.float32, size, 'cap_att')
# masking out relevant parts for complete supervision
inputs['ques_super_mask'] = tf.placeholder(tf.float32, size, 'q_mask')
inputs['cap_super_mask'] = tf.placeholder(tf.float32, size, 'c_mask')
inputs['supervise_switch'] = tf.placeholder(tf.bool, [],
'supervise_switch')
# tie encoder and decoder
size = [params['num_layers'], None, params['lstm_size']]
inputs['enc_dec_h'] = tf.placeholder(tf.float32, size, 'enc_dec_h')
inputs['enc_dec_c'] = tf.placeholder(tf.float32, size, 'enc_dec_c')
# Phase 2 - program execution
size = [None, params['h_feat'], params['w_feat'], params['d_feat']]
inputs['img_feat'] = tf.placeholder(tf.float32, size, 'img_feat')
inputs['prog_validity'] = tf.placeholder(tf.bool, [None])
# Phase 2.5 - caption execution
inputs['align_gt'] = tf.placeholder(tf.int32, [None], 'align_cap')
inputs['prog_validity_cap'] = tf.placeholder(tf.bool, [None])
# Phase 3 - answer generation
inputs['ans_in'] = tf.placeholder(tf.int32, [None, None], 'ans_in')
inputs['ans_out'] = tf.placeholder(tf.int32, [None, None], 'ans_out')
inputs['ans'] = tf.placeholder(tf.int32, [None, None], 'ans')
inputs['ans_len'] = tf.placeholder(tf.int32, [None], 'ans_len')
# if discriminative, encode options
# NOTE: num_options hard coded to 100
num_options = 100
size = [None, params['max_enc_len'], num_options]
inputs['opt'] = tf.placeholder(tf.int32, size, 'opt_out')
inputs['opt_len'] = tf.placeholder(tf.int32, [None, num_options], 'opt_len')
inputs['gt_ind'] = tf.placeholder(tf.int32, [None], 'gt_ind')
# history
size = [None, params['num_rounds'], 2 * params['max_enc_len']]
inputs['hist'] = tf.placeholder(tf.int32, size, 'history')
size = [None, params['num_rounds']]
inputs['hist_len'] = tf.placeholder(tf.int32, size, 'hist_len')
# place holders for fact
size = [None, params['max_enc_len']]
inputs['fact'] = tf.placeholder(tf.int32, size, 'fact')
inputs['fact_len'] = tf.placeholder(tf.int32, [None], 'fact_len')
if not self.params['train_mode']:
# additional placeholders during evaluation
size = [None, params['lstm_size']]
inputs['context'] = tf.placeholder(tf.float32, size, 'context')
size = [1, 1, None, params['lstm_size']]
inputs['cap_enc'] = tf.placeholder(tf.float32, size, 'cap_enc')
size = [None, None, None, params['lstm_size']]
inputs['ques_enc'] = tf.placeholder(tf.float32, size, 'ques_enc')
size = [None, params['lstm_size']]
inputs['hist_enc'] = tf.placeholder(tf.float32, size, 'hist_enc')
size = [params['max_dec_len'], None, params['text_embed_size']]
inputs['ques_attended'] = tf.placeholder(tf.float32, size, 'ques_att')
inputs['cap_attended'] = tf.placeholder(tf.float32, size, 'cap_att')
return inputs
#---------------------------------------------------------------------------
# method to initialize training related attributes
def setup_training(self):
# answer prediction loss
total_loss = self.outputs['generate_answer']['ans_token_loss']
# supervised sequence prediction loss
total_loss += self.outputs['generate_program']['prog_pred_loss']
if 'nmn-cap' in self.params['model'] and self.params['cap_alignment']:
total_loss += self.outputs['execute_program']['cap_align_loss']
# add the total loss to the list of outputs
self.pooled_outputs['total_loss'] = total_loss
# setters and getters
def get_total_loss(self):
return self.pooled_outputs['total_loss']
def add_solver_op(self, op):
self.pooled_outputs['solver'] = op
#---------------------------------------------------------------------------
def run_train_iteration(self, batch, sess):
iter_loss = {}
# collect feeds from all subcomponents
feeder = self.generator.produce_feed_dict(batch)
feeder.update(self.executor.produce_feed_dict(batch))
feeder.update(self.decoder.produce_feed_dict(batch))
# run all subcomponents together
output = sess.run(self.pooled_outputs, feed_dict=feeder)
# record all the loss values
iter_loss['prog'] = output['prog_pred_loss']
if 'nmn-cap' in self.params['model']:
iter_loss['align'] = output['cap_align_loss']
else:
iter_loss['align'] = 0.
iter_loss['ans'] = output['ans_token_loss']
iter_loss['total'] = output['total_loss']
return iter_loss, None
#---------------------------------------------------------------------------
def run_evaluate_iteration(self, batch, sess, eval_options=True):
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.run(self.outputs['generate_program'], feed_dict=feeder)
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['execute_program'], feed_dict=feeder))
if 'pred_tokens' in output:
output['matches'] = [batch['gt_layout'] == output['pred_tokens']]
# if options are not to be scored
if not eval_options: return None, outputs
# Part 3: Run the answer generation language model (disc | gen)
if self.params['decoder'] == 'gen':
option_batch = output.copy()
option_batch.update(batch)
phase_output = self.outputs['generate_answer']['llh']
num_options = len(batch['opt_len'])
batch_size = batch['opt_len'][0].shape[0]
option_scores = np.zeros((batch_size, num_options))
option_probs = np.zeros((batch_size, num_options))
for opt_id in range(num_options):
option_batch['ans_in'] = batch['opt_in'][opt_id]
option_batch['ans_out'] = batch['opt_out'][opt_id]
option_batch['ans_len'] = batch['opt_len'][opt_id]
feeder = self.decoder.produce_feed_dict(option_batch, output)
scores = sess.run(phase_output, feed_dict=feeder)
option_scores[:, opt_id] = scores
# Part 3: Run the decoder model
elif self.params['decoder'] == 'disc':
batch_size = batch['opt_len'][0].shape[0]
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
option_scores = output['scores']
# extract ground truth score, and get ranks
gt_scores = option_scores[(range(batch_size), batch['gt_ind'])]
ranks = np.sum(option_scores > gt_scores.reshape(-1, 1), axis=1) + 1
output['scores'] = option_scores
return ranks, output
#---------------------------------------------------------------------------
def run_visualize_iteration(self, batch, sess, eval_options=True):
output = batch.copy()
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output.update(sess.run(self.outputs['generate_program'], feeder))
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output, True)
output.update(sess.run(self.outputs['execute_program'], feeder))
# segregate weights and attention maps
output['intermediates'] = self.executor.segregrate_outputs(output)
if not eval_options: return None, output
# Part 3: Run the answer generation language model
if self.params['decoder'] == 'gen':
option_batch = output.copy()
option_batch.update(batch)
phase_output = self.outputs['generate_answer']['llh']
# Part 3: Run the answer generation language model for each option
num_options = len(batch['opt_len'])
batch_size = batch['opt_len'][0].shape[0]
option_scores = np.zeros((batch_size, num_options))
for opt_id in range(num_options):
option_batch['ans_in'] = batch['opt_in'][opt_id]
option_batch['ans_out'] = batch['opt_out'][opt_id]
option_batch['ans_len'] = batch['opt_len'][opt_id]
feeder = self.decoder.produce_feed_dict(option_batch, output)
scores = sess.run(phase_output, feed_dict=feeder)
option_scores[:, opt_id] = scores
# Part 3: Run the decoder model
elif self.params['decoder'] == 'disc':
batch_size = batch['opt_len'][0].shape[0]
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
option_scores = output['scores']
# extract ground truth score, and get ranks
gt_scores = option_scores[(range(batch_size), batch['gt_ind'])]
ranks = np.sum(option_scores > gt_scores.reshape(-1, 1), axis=1) + 1
output['scores'] = option_scores
return ranks, output
#-------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/model.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to generate programs for questions and captions.
Program generator for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
This subcomponent uses memory network augmentation to figure out if an entity
has been seen before and/or if it needs resolution using history.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from models_vd.generator_attnet import AttSeq2Seq
from util import support
# alias
linear = tf.contrib.layers.fully_connected
# behavior based on type of model
class ProgramGenerator:
def __init__(self, inputs, assembler, params):
"""Initialize program generator.
Args:
inputs:
assembler:
params:
"""
self.params = params
outputs = {}
used_inputs = []
# create embedding matrix
with tf.variable_scope('embed', reuse=None) as embed_scope:
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat', size)
# remember the scope for further use
params['embed_scope'] = embed_scope
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
#--------------------------------------------------------
# if program is to be predicted
if 'prog' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
self.rnn = AttSeq2Seq(inputs, use_gt_prog, assembler, params)
# if memory based generator is used
if params['generator'] == 'mem':
used_inputs.extend(['hist', 'hist_len'])
outputs['encoder_output'] = self.rnn.encoder_outputs
outputs['pred_tokens'] = self.rnn.predicted_tokens
outputs['neg_entropy'] = tf.reduce_mean(self.rnn.neg_entropy)
# check if attHistory exists
if hasattr(self.rnn, 'att_history'):
outputs['att_history'] = self.rnn.att_history
# also add the encoder states (based on the flag)
concat_list = [ii.h for ii in self.rnn.encoder_states]
outputs['enc_dec_h'] = tf.stack(concat_list)
concat_list = [ii.c for ii in self.rnn.encoder_states]
outputs['enc_dec_c'] = tf.stack(concat_list)
# alias
attention = self.rnn.atts
# if attention is to be supervised
if params['supervise_attention']:
# get mask out of the program supervision
mask = tf.cast(inputs['prog_att_gt'] > 0, tf.float32)
used_inputs.append('prog_att_gt')
# binary supervision loss
sum_mask = tf.reduce_sum(mask, 1)
sum_mask = tf.expand_dims(sum_mask, 1)
sum_mask = tf.cast(sum_mask > 0, tf.float32)
tile_size = (1, self.params['max_enc_len'], 1, 1)
tile_mask = tf.tile(sum_mask, tile_size)
num_tokens = tf.maximum(tf.reduce_sum(tile_mask), 1)
# stop gradients
num_tokens = tf.stop_gradient(num_tokens)
tile_mask = tf.stop_gradient(tile_mask)
criterion = tf.nn.sigmoid_cross_entropy_with_logits
att_loss = criterion(labels=mask,logits=attention)
att_loss = tf.reduce_sum(tf.multiply(att_loss, tile_mask))
att_loss = att_loss / num_tokens
outputs['att_loss'] = att_loss
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn.embedded_input_seq, axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention'] = attention
outputs['ques_attended'] = word_vecs
#outputs['ques_attended'] = self.rnn.word_vecs
# log probability of each generated sequence
outputs['log_seq_prob'] = tf.reduce_sum(
tf.log(self.rnn.token_probs + 1e-10), axis=0)
outputs['ques_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob'])
q_output = tf.transpose(self.rnn.encoder_outputs, perm=[1, 0, 2])
q_output = support.last_relevant(q_output, inputs['ques_len'])
# bloat the first two dimensions
q_output = tf.expand_dims(q_output, axis=0)
outputs['ques_enc'] = tf.expand_dims(q_output, axis=0)
# keep track of inputs actually used
used_inputs.extend(['ques', 'ques_len', 'prog_gt'])
#------------------------------------------------------------------
# programs for captions
if 'nmn-cap' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
# pretend captions to be questions for code reusability
fake_ins = {'ques': tf.transpose(inputs['cap'], perm=[1, 0]),
'ques_len': inputs['cap_len'],
'prog_gt': inputs['cap_prog_gt']}
function_ins = [fake_ins, use_gt_prog, assembler, params]
# if captions and questions share encoder
# default value for sharing encoding
self.params['share_encoder'] = self.params.get('share_encoder', False)
if not self.params['share_encoder']:
function_ins[0]['fake'] = True
else:
function_ins += [True]
self.rnn_cap = AttSeq2Seq(*function_ins)
used_inputs.extend(['cap', 'cap_len', 'cap_prog_gt'])
outputs['pred_tokens_cap'] = self.rnn_cap.predicted_tokens
outputs['neg_entropy_cap'] = tf.reduce_mean(self.rnn_cap.neg_entropy)
#------------------------------------------------------------------
# alias
attention = self.rnn_cap.atts
# if attention is to be supervised
if params['supervise_attention']:
# get mask out of the program supervision
mask = tf.cast(inputs['cap_att_gt'] > 0, tf.float32)
# binary supervision loss
sum_mask = tf.reduce_sum(mask, 1)
sum_mask = tf.expand_dims(sum_mask, 1)
sum_mask = tf.cast(sum_mask > 0, tf.float32)
tile_size = (1, self.params['max_enc_len'], 1, 1)
tile_mask = tf.tile(sum_mask, tile_size)
num_tokens = tf.maximum(tf.reduce_sum(tile_mask), 1)
# stop gradients
num_tokens = tf.stop_gradient(num_tokens)
tile_mask = tf.stop_gradient(tile_mask)
criterion = tf.nn.sigmoid_cross_entropy_with_logits
att_loss = criterion(labels=mask,logits=attention)
att_loss = tf.reduce_sum(tf.multiply(att_loss, tile_mask))
att_loss_cap = att_loss / num_tokens
# additional add the multiplier
outputs['att_loss_cap'] = att_loss_cap
used_inputs.append('cap_att_gt')
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn_cap.embedded_input_seq,
axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention_cap'] = attention
outputs['cap_attended'] = word_vecs
#outputs['cap_attended'] = self.rnn_cap.word_vecs
#------------------------------------------------------------------
# log probability of each generated sequence
log_prob_cap_token = tf.log(self.rnn_cap.token_probs + 1e-10)
outputs['log_seq_prob_cap'] = tf.reduce_sum(log_prob_cap_token, axis=0)
outputs['cap_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob_cap'])
c_output = tf.transpose(self.rnn_cap.encoder_outputs, perm=[1, 0, 2])
c_output = support.last_relevant(c_output, inputs['cap_len'])
# bloat the first two dimensions
c_output = tf.expand_dims(c_output, axis=0)
outputs['cap_enc'] = tf.expand_dims(c_output, axis=0)
used_inputs.extend(['cap', 'cap_len'])
#------------------------------------------------------------------
# setup the inputs and outputs
# should have at least one loss
total_loss = (outputs.get('ques_prog_loss', tf.constant(0.0)) +
outputs.get('cap_prog_loss', tf.constant(0.0)) +
outputs.get('att_loss', tf.constant(0.0)) +
outputs.get('att_loss_cap', tf.constant(0.0)))
outputs['prog_pred_loss'] = total_loss
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, prev_output=None):
feed_dict = {}
feed_dict[self.inputs['ques']] = batch['ques']
feed_dict[self.inputs['ques_len']] = batch['ques_len']
# add program
if 'prog' in self.params['model']:
feed_dict[self.inputs['prog_gt']] = batch['gt_layout']
# attention for program
if self.params['supervise_attention']:
feed_dict[self.inputs['prog_att_gt']] = batch['gt_att']
# add captions
if 'cap' in self.params['model']:
feed_dict[self.inputs['cap']] = batch['cap']
feed_dict[self.inputs['cap_len']] = batch['cap_len']
# add history
if self.params['generator'] == 'mem':
feed_dict[self.inputs['hist']] = batch['hist']
feed_dict[self.inputs['hist_len']] = batch['hist_len']
# nmn on captions
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['cap']] = batch['sh_cap']
feed_dict[self.inputs['cap_len']] = batch['sh_cap_len']
feed_dict[self.inputs['cap_prog_gt']] = batch['sh_cap_prog']
if self.params['supervise_attention']:
feed_dict[self.inputs['cap_att_gt']] = batch['sh_cap_att']
return feed_dict
#------------------------------------------------------------
|
corefnmn-main
|
models_vd/generator.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Module definitions for Loom API.
Explicit visual coreference resolution in visual dialog using neural module
networks. Neural module definitions.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow_fold.public import loom
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
def add_spatial_coord_map(image_feat_grid):
image_feat_shape = tf.shape(image_feat_grid)
N = image_feat_shape[0]
# static dimensions
#H = image_feat_shape[1]
#W = image_feat_shape[2]
H, W = image_feat_grid.shape.as_list()[1:3]
x_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., W), [1, 1, -1, 1]),
to_T([N, H, 1, 1]))
y_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., H), [1, -1, 1, 1]),
to_T([N, 1, W, 1]))
# stop gradient on coords_map (needed to fix the tile grad error on TF 1.0.0)
coords_map = tf.stop_gradient(tf.concat([x_map, y_map], axis=3))
image_feat_with_coords = tf.concat([image_feat_grid, coords_map], axis=3)
# set shapes of the new feature maps
image_feat_static_shape = image_feat_grid.get_shape().as_list()
image_feat_static_shape[3] += 2
image_feat_with_coords.set_shape(image_feat_static_shape)
image_feat_static_shape[3] = 2
coords_map.set_shape(image_feat_static_shape)
return image_feat_with_coords, coords_map
#------------------------------------------------------------------------------
# Simple tensorflow ops as loom ops
class BinaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(BinaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
arg1, arg2 = inputs
return [self._op(arg1, arg2)]
#------------------------------------------------------------------------------
class UnaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(UnaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, arg):
return [self._op(arg[0])]
#------------------------------------------------------------------------------
# slice text attention
class SliceTextLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
text, round_id, time = inputs
round_squeeze = tf.squeeze(round_id, -1)
time_squeeze = tf.squeeze(time, -1)
# select the right round
shape = text.shape.as_list()
B = tf.shape(text)[0]
num_rounds, T, text_dim = shape[1], shape[2], shape[3]
indices = round_squeeze + num_rounds * tf.range(B)
# flatten
result = tf.gather(tf.reshape(text, [-1, T, text_dim]), indices)
# select the right time
indices = time_squeeze + T * tf.range(B)
# flatten
result = tf.gather(tf.reshape(result, [-1, text_dim]), indices)
return [result]
#------------------------------------------------------------------------------
# slice answer embeddding
class SliceAnswerLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceAnswerLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
answer, round_id = inputs
round_squeeze = tf.squeeze(round_id, -1)
# select the right round
shape = answer.shape.as_list()
B = tf.shape(answer)[0]
num_rounds, text_dim = shape[1], shape[2]
indices = round_squeeze + num_rounds * tf.range(B)
result = tf.gather(tf.reshape(answer, [-1, text_dim]), indices)
return [result]
#--------------------------------------------------------------------
# attention weighting
class AttentionWeightLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(AttentionWeightLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
vis_att, scalar = inputs
# simple weighting
scalar = tf.expand_dims(tf.expand_dims(scalar, -1), -1)
att_grid = tf.multiply(vis_att, scalar)
return [att_grid]
#--------------------------------------------------------------------
# identity op to convert types
class IdentityLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(IdentityLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
return inputs
#--------------------------------------------------------------------
# normalize and complementary attention
class NormalizeExcludeLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(NormalizeExcludeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
att_grid = inputs[0]
# complement the attention
max_entry = tf.reduce_max(tf.reduce_max(att_grid, 1), 1)
max_entry = tf.expand_dims(tf.expand_dims(max_entry, 1), 1)
att_grid = att_grid / max_entry
att_grid = 1 - att_grid
# normalize
norms = tf.reduce_sum(tf.reduce_sum(att_grid, 1), 1)
norms = tf.expand_dims(tf.expand_dims(norms, 1), 1)
# cutoff
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#-------------------------------------------------------------------
class AlignTextLoomOp(loom.LoomOp):
"""
Takes in two text attention and computes the alignment between them
Mapping: text_param x text_param -> scalar
Input:
text_param: [N, D_txt]
text_param: [N, D_txt]
Output:
scalar: [N, 1]
Implementation:
Parameters typically contain:
map_dim = 1024
module_scope = alignTextOp
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'alignTextOp')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AlignTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
text_att1, text_att2, round_id1, round_id2 = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att1.shape.as_list()[-1]
map_dim = self._params['map_dim']
embed_dim = self._params['text_embed_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# concat both text attentions, along with round diff (if need be)
concat_list = [text_att1, text_att2]
# additional weight for the distance to the past
if self._params['amalgam_text_feats']:
round_diff = tf.cast(round_id1 - round_id2, tf.float32)
concat_list.append(round_diff)
concat = tf.concat(concat_list, axis=-1)
# deeper 2 layer align network
weights = tf.contrib.layers.fully_connected(concat, embed_dim)
weights = tf.contrib.layers.fully_connected(weights, 1,
activation_fn=None)
return [weights]
#--------------------------------------------------------------------
# Modules as Loom Ops
class FindLoomOp(loom.LoomOp):
"""
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Elementwise multiplication between image_feat_grid and text_param
2. L2-normalization
3. Linear classification
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'find_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(FindLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
class AndLoomOp(loom.LoomOp):
"""
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'and_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AndLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.minimum(input1, input2)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class InvalidLoomOp(loom.LoomOp):
"""
Mapping: returns a context of zeros
Output:
context: [N, encodeSize] of zeros
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'invalid_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(InvalidLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
img_feat = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
N = tf.shape(img_feat)[0]
context = tf.zeros([N, encode_size], tf.float32)
return [context]
#------------------------------------------------------------------------------
class DescribeLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> context vector
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, outputSize]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Element-wise multiplication of the two, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'describe_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DescribeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
# att_feat, att_feat_1 has shape [N, D_vis]
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
img_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, map_dim])
# nonlinearity
img_map = tf.nn.relu(img_map)
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 1)
context = fc('fc_eltwise', eltwise_mult, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class TransformLoomOp(loom.LoomOp):
"""
Mapping: att_grid x text_param -> att_grid
Input:
input_0: [N, H, W, 1]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Convolve image features to map_dim
4. Element-wise multiplication of the three, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'transform_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(TransformLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# nonlinearity
text_map = tf.nn.relu(text_map)
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
att_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map * att_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/modules.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to execute programs using tensorflow fold loom API.
Program execution for explicit visual coreference resolution model in visual
dialog using neural module networks. Uses low-level loom API in tensorflow
fold:
https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/py/loom.md
for dynamic creation and execution of computation graphs.
Author: Satwik Kottur
"""
import math
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow_fold.public import loom
import models_vd.modules as lm
from models_vd.assembler import INVALID_EXPR, _module_output_type
class ProgramExecutor:
def __init__(self, inputs, output_pool, assembler, params):
"""Initialize program execution subcomponent.
Args:
inputs:
output_pool:
assembler:
params:
"""
self.params = params
# assembler dynamically assembles the graph at run time
self._assembler = assembler
#--------------------------------------------------------------------------
# A. Create loom data inputs
loom_inputs, used_inputs = self._build_loom_inputs(inputs, output_pool)
# B. Create loom data types
types = self._build_loom_types()
self._loom_types = types
# C. Create loom operations
loom_ops_dict = self._build_loom_ops()
self._loom_ops = loom_ops_dict
# create a loom object
keys = ['text', 'image', 'answer', 'caption', 'time',
'fact', 'round', 'text_feat', 'cap_feat']
batch_ins = {types[k]: loom_inputs[k] for k in keys if k in loom_inputs}
self._loom = loom.Loom(batch_inputs=batch_ins, named_ops=loom_ops_dict)
# setup the inputs and outputs
self.outputs = {'context': self.get_loom_output(),
'att': self.get_loom_output(types['attention']),
'logits': self.get_loom_output(types['float'])}
# build alignment networks
if 'nmn-cap' in params['model']:
# binary classification over the alignment
align_context = self.get_loom_output(types['align'])
align_loss = self._build_align_network(align_context, inputs['align_gt'])
self.outputs['cap_align_loss'] = align_loss
used_inputs.append('align_gt')
# add invalid prog to used inputs
used_inputs.extend(['prog_validity', 'prog_validity_cap'])
self.inputs = {ii: inputs[ii] for ii in used_inputs}
# time/round place holder
self.inputs['time'] = loom_inputs['time']
self.inputs['round'] = loom_inputs['round']
def create_weaver(self):
"""Creates a weaver object within the current loom object.
"""
return self._loom.make_weaver()
def get_loom_output(self, type_shape=None):
"""Return the loom output given the type and shape.
"""
# default output is the context vector
if type_shape is None:
type_shape = self._loom_types['context']
return self._loom.output_tensor(type_shape)
#---------------------------------------------------------
def _adjust_text(self, text):
"""
takes text attention output from generator
modifies it to have certain dimensions
"""
params = self.params
# transpose text to have batch first dimension
text_mod = tf.transpose(text, [1, 0, 2])
# split across rounds
shape = text_mod.shape.as_list()
new_size = [-1, params['num_rounds'], shape[1], shape[2]]
return tf.reshape(text_mod, new_size)
def _build_fact_encoder(self, inputs):
"""
"""
# local alias
params = self.params
with tf.variable_scope(self.params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
# flatten
# embed the words
output = tf.nn.embedding_lookup(embed_mat, inputs['fact'])
# pass through encoder
cell = tf.contrib.rnn.BasicLSTMCell(params['text_embed_size'])
# begin decoding
for ii in range(0, params['num_layers']):
# dynamic rnn
output, states = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['fact_len'],
dtype=tf.float32,
scope='fact_layer_%d' % ii)
# split roundwise
fact_embed = states[1]
text_dim = fact_embed.shape.as_list()[-1]
fact_embed = tf.reshape(fact_embed, [-1, params['num_rounds'], text_dim])
return fact_embed
def _build_align_network(self, align_vec, align_gt):
"""
Takes the caption alignment vector in and produces a binary
classifier
"""
params = self.params
with tf.variable_scope('cap_align'):
# construct an mlp on top to a binary classification
align_vec = tf.contrib.layers.fully_connected(align_vec,
params['lstm_size']//2)
align_vec = tf.contrib.layers.fully_connected(align_vec, 2,
activation_fn=None)
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
align_loss = criterion(logits=align_vec, labels=align_gt)
align_loss = tf.reduce_mean(align_loss)
return align_loss
def _build_loom_inputs(self, inputs, output_pool):
'''
Sub routine to build the inputs to loom
'''
# --------- grab required inputs -------------
loom_inputs = {}
params = self.params
# A. image
loom_inputs['image'], _ = lm.add_spatial_coord_map(inputs['img_feat'])
#loom_inputs['image'] = inputs['img_feat']
used_inputs = ['img_feat']
# B. text -- both question and caption
key = 'ques_attended'
if params['train_mode']: text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
adjusted_text = self._adjust_text(text)
loom_inputs['text'] = adjusted_text
batch_size = tf.shape(adjusted_text)[0]
# C. Facts
if params['use_fact']:
loom_inputs['fact'] = self._build_fact_encoder(inputs)
used_inputs.extend(['fact', 'fact_len'])
concat_list = [adjusted_text]
loom_inputs['text_feat'] = tf.concat(concat_list, -1)
# C. Get captions, if needed
if 'nmn-cap' in params['model']:
key = 'cap_attended'
if params['train_mode']: text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
loom_inputs['caption'] = self._adjust_text(text)
loom_inputs['cap_feat'] = loom_inputs['caption']
# D. time steps (internal placeholder)
loom_inputs['time'] = tf.placeholder(tf.int32, (None, 1), 'time')
loom_inputs['round'] = tf.placeholder(tf.int32, (None, 1), 'round')
return loom_inputs, used_inputs
def _build_loom_types(self):
"""Method to build loom types for given setting.
"""
params = self.params
encode_size = params['lstm_size']
# create and save loom types
types = {}
types['time'] = loom.TypeShape('int32', (1,), 'time')
types['round'] = loom.TypeShape('int32', (1,), 'round')
types['float'] = loom.TypeShape('float32', (1,))
types['context'] = loom.TypeShape('float32', (encode_size,), 'context')
types['align'] = loom.TypeShape('float32', (encode_size,), 'align')
size = (params['num_rounds'], params['text_embed_size'])
types['fact'] = loom.TypeShape('float32', size, 'fact')
size = (params['num_rounds'], params['max_dec_len'],
params['text_embed_size'])
types['text'] = loom.TypeShape('float32', size, 'text')
types['caption'] = loom.TypeShape('float32', size, 'caption')
size = (params['text_embed_size'],)
types['text_slice'] = loom.TypeShape('float32', size, 'text_slice')
# this depends on whether we want all features
concat_dim = params['text_embed_size']
size = (params['num_rounds'], params['max_dec_len'], concat_dim)
types['text_feat'] = loom.TypeShape('float32', size, 'text_feat')
types['cap_feat'] = loom.TypeShape('float32', size, 'cap_feat')
size = (concat_dim,)
types['text_feat_slice'] = loom.TypeShape('float32', size, 'text_feat_slice')
# include spatial dimensions (x, y), add 2
size = (params['h_feat'], params['w_feat'], params['d_feat'] + 2)
types['image'] = loom.TypeShape('float32', size, 'image')
size = (params['h_feat'], params['w_feat'], 1)
types['attention'] = loom.TypeShape('float32', size, 'att')
return types
def _build_loom_ops(self):
"""TODO(satwik): Some helper text here
"""
params = self.params
types = self._loom_types
# create all modules under the same scope
wt = params.get('priority_weight', 1.0)
op_params = {'map_dim': 1024, 'priority_weight': wt}
with tf.variable_scope('loom_modules') as module_scope:
op_params['module_scope'] = module_scope
# creating ops
loom_ops_dict = {}
in_types = [types['float'], types['float']]
out_types = [types['float']]
loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
in_types = [types['float']]
loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
tf.maximum)
# basic attention manipulation ops
in_types = [types['attention'], types['float']]
out_types = [types['attention']]
loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
out_types)
in_types = [types['text_feat_slice'], types['text_feat_slice'],
types['round'], types['round']]
out_types = [types['float']]
op_params['amalgam_text_feats'] = params['amalgam_text_feats']
op_params['text_embed_size'] = params['text_embed_size']
loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)
# slicing ops
in_types = [types['text'], types['round'], types['time']]
out_types = [types['text_slice']]
loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)
in_types = [types['text_feat'], types['round'], types['time']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)
# slice_answer_embedding
in_types = [types['fact'], types['round']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)
# normalize and complement
in_types = [types['attention']]
out_types = [types['attention']]
loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
out_types)
#------------------------------------------------------------------
# find module
in_types = [types['image'], types['text_slice']]
out_types = [types['attention']]
loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)
# and module
in_types = [types['attention'], types['attention']]
loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
# transform module
in_types = [types['attention'], types['image'], types['text_slice']]
loom_ops_dict['transform'] = lm.TransformLoomOp(in_types, out_types, op_params)
# describe module
out_types = [types['context']]
op_params['encode_size'] = params['lstm_size']
loom_ops_dict['describe'] = lm.DescribeLoomOp(in_types, out_types, op_params)
# invalid Module
in_types = [types['image']]
loom_ops_dict['invalid'] = lm.InvalidLoomOp(in_types, out_types, op_params)
#------------------------------------------------------------------
# type converter ops
in_types, out_types = [types['caption']], [types['text']]
loom_ops_dict['convert_cap_in'] = lm.IdentityLoomOp(in_types, out_types)
in_types, out_types = [types['context']], [types['align']]
loom_ops_dict['convert_cap_out'] = lm.IdentityLoomOp(in_types, out_types)
in_types, out_types = [types['cap_feat']], [types['text_feat']]
loom_ops_dict['convert_cap_feat'] = lm.IdentityLoomOp(in_types, out_types)
return loom_ops_dict
#---------------------------------------------------------
# setters and getters
def get_outputs(self): return self.outputs
def get_inputs(self): return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None, visualize=False):
if 'prog' not in self.params['model']: return
# dynamically assemble the graph, based on predicted tokens
if self.params['train_mode']:
ques_programs = batch['gt_layout']
if 'nmn-cap' in self.params['model']:
cap_programs = batch['sh_cap_prog']
else:
ques_programs = output_pool['pred_tokens']
if 'nmn-cap' in self.params['model']:
cap_programs = output_pool['pred_tokens_cap']
tokens = {'ques': ques_programs}
if 'nmn-cap' in self.params['model']: tokens['caption'] = cap_programs
weaver, loom_outputs, invalid_prog \
= self._assembler.assemble(tokens, self, visualize)
# build feed dict from loom
feed_dict = weaver.build_feed_dict(loom_outputs)
# additional feeds
feed_dict.update(self._produce_add_feeds(batch, output_pool, invalid_prog))
return feed_dict
#------------------------------------------------------------
def _produce_add_feeds(self, batch, output_pool, invalid_prog):
feed_dict = {}
# feed invalid Prog
feed_dict[self.inputs['prog_validity']] = np.array(invalid_prog['ques'])
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['prog_validity_cap']] = np.array(invalid_prog['cap'])
# additional feeds
feed_dict[self.inputs['img_feat']] = batch['img_feat']
if self.params['use_fact']:
feed_dict[self.inputs['fact']] = batch['fact']
feed_dict[self.inputs['fact_len']] = batch['fact_len']
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['align_gt']] = batch['align_gt']
max_time = self.params['max_dec_len']
feed_dict[self.inputs['time']] = np.arange(max_time).reshape([-1, 1])
round_ranges = np.arange(self.params['num_rounds']).reshape([-1, 1])
feed_dict[self.inputs['round']] = round_ranges
if not self.params['train_mode']:
# list of labels to read from output pool conditionally
labels = ['ques_attended', 'cap_attended', 'ques_enc', 'cap_enc']
for label in labels:
if label in self.inputs:
feed_dict[self.inputs[label]] = output_pool[label]
feed_dict[self.inputs['img_feat']] = batch['img_feat']
return feed_dict
#------------------------------------------------------------
# segregating the outputs
def segregrate_outputs(self, output):
'''
Go over the outputs, cap tokens and ques tokens
'''
if 'nmn-cap' in self.params['model']:
cap_tokens = output['pred_tokens_cap'][:, 0]
ques_tokens = output['pred_tokens']
mod_out_type = _module_output_type
mod_dict = self._assembler.module_names
att = output['att']
# logits -> weights when visualizing
weights = output['logits']
# segregrated outputs
sep_att = []
sep_wts = []
wt_labels = []
num_reuse = 0
att_ind = 0
weight_ind = 0
# go over caption
if 'nmn-cap' in self.params['model']:
for t_id in range(self.params['max_dec_len']):
cur_module = mod_dict[cap_tokens[t_id]]
if cur_module == '<eos>': break
if mod_out_type[cur_module] == 'att':
sep_att.append(('cap', t_id, 0, att[att_ind]))
att_ind += 1
if cur_module == '_Find':
wt_labels.append('C_%d' % t_id)
num_reuse += 1
# assume a batch size of 1
for r_id in range(self.params['num_rounds']):
for t_id in range(self.params['max_dec_len']):
cur_module = mod_dict[ques_tokens[t_id, r_id]]
if cur_module == '<eos>':
# even answer has a weight now
if self.params['use_fact']:
wt_labels.append('A%d' % r_id)
num_reuse += 1
break
if mod_out_type[cur_module] == 'att':
sep_att.append(('ques', t_id, r_id, att[att_ind]))
att_ind += 1
if cur_module == '_Refer':
st = weight_ind
end = weight_ind + num_reuse
sep_wts.append((r_id, weights[st:end], wt_labels))
weight_ind += num_reuse
if cur_module == '_Find':
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
# do not assert if baseline
if 'baseline' in self.params['model']:
return sep_att, sep_wts
for arg in sep_wts:
assert(abs(np.sum(arg[1]) - 1.0) < 1e-5)
# Sanity checks to ensure Refer is not doing anything weird.
assert(weight_ind == weights.shape[0])
assert(att_ind == att.shape[0])
return sep_att, sep_wts
|
corefnmn-main
|
models_vd/executor.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Methods to compute metrics given the list of ranks.
Author: Satwik Kottur
"""
import numpy as np
# static list of metrics
metric_list = ['r1', 'r5', 'r10', 'mean', 'mrr']
# +1 - greater the better
# -1 - lower the better
trends = [1, 1, 1, -1, -1, 1]
def evaluate_metric(ranks, metric):
"""
Args:
ranks: List of ranks
metric: Name of the metric to be computed
Returns:
Appropriate evaluation of the metric
"""
if metric == 'r1':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 1)/float(ranks.shape[0])
if metric == 'r5':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 5)/float(ranks.shape[0])
if metric == 'r10':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 10)/float(ranks.shape[0])
if metric == 'mean':
ranks = ranks.reshape(-1)
return np.mean(ranks)
if metric == 'mrr':
ranks = ranks.reshape(-1)
return np.mean(1/ranks)
def compute_metrics(ranks, silent=False):
"""Compute standard metrics, given the ranks.
Args:
ranks: List of ranks
silent: To decide the verbosity
Returns:
results: Dictionary of metrics
"""
results = {metric: evaluate_metric(ranks, metric) for metric in metric_list}
# pretty print metrics
if not silent:
pretty_print_metrics(results)
return results
def pretty_print_metrics(results):
"""Pretty print the metrics given as a dictionary.
"""
# pretty print metrics
print('\n')
for metric in metric_list: print('\t%s : %.3f' % (metric, results[metric]))
class ExponentialSmoothing:
"""Class responsible to exponentially smooth and track losses.
"""
def __init__(self):
self.value = None
self.blur = 0.95
self.op = lambda x, y: self.blur * x + (1 - self.blur) * y
# add a new value
def report(self, new_val):
if self.value == None:
self.value = new_val
else:
self.value = {key: self.op(value, new_val[key])
for key, value in self.value.items()}
return self.value
|
corefnmn-main
|
util/metrics.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script with supporting functions for the main train program.
"""
import os
import sys
import subprocess
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage import transform, filters
from PIL import Image
def last_relevant(output, length):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[1]
out_size = int(output.shape[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
# blending attention map with an image
# source:
# github.com/abhshkdz/neural-vqa-attention/blob/master/attention_visualization.ipynb
def get_blend_map(img, att_map, blur=True, overlap=True):
# range it from -1 to 1
att_map -= att_map.min()
if att_map.max() != 0: att_map /= att_map.max()
image_size = img.shape[:2]
att_map = transform.resize(att_map, image_size, order = 3)
if blur:
att_map = filters.gaussian(att_map, 0.05 * max(img.shape))
#att_map -= att_map.min()
att_map /= att_map.max()
cmap = plt.get_cmap('jet')
att_map_v = cmap(att_map)
att_map_v = np.delete(att_map_v, 3, 2)
att_map_v *= 255
if overlap:
#vis_im = att_map_v * att_map + (1-att_reshaped)*all_white
#vis_im = att_map_v*im + (1-att_reshaped)*all_white
att_map = 1 * (1 - att_map**0.7).reshape(att_map.shape + (1,)) * img \
+ (att_map**0.7).reshape(image_size + (1,)) * att_map_v
return att_map
# pretty prints dictionary
def pretty_print_dict(parsed):
max_len = max([len(ii) for ii in parsed.keys()])
fmt_string = '\t%' + str(max_len) + 's : %s'
print('Arguments:')
#for key_pair in parsed.items(): print(fmt_string % key_pair)
# sort in alphabetical order
keys = [ii for ii in parsed.keys()]
keys.sort()
for key in keys: print(fmt_string % (key, parsed[key]))
# softmax
# correct solution:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum() # only difference
# interpolate attention
def interpolate_attention(im, att):
# steps:
# 1. reshape the attention to image size (with cubic)
#soft_att = softmax(att)
soft_att = att
att_reshaped = transform.resize(soft_att, im.shape[:2], order=3)
att_reshaped /= np.max(att_reshaped)
att_reshaped = att_reshaped[..., np.newaxis]
# heat map
#cmap = plt.get_cmap('jet')
#vis_im = cmap(att_reshaped)
#vis_im *= (255 if im.dtype == np.uint8 else 1)
# white + image
all_white = np.ones_like(im) * (255 if im.dtype == np.uint8 else 1)
vis_im = att_reshaped * im + (1 - att_reshaped) * all_white
vis_im = vis_im.astype(im.dtype)
return vis_im
# shuffling data for image - caption to train alignment
def shuffle(arg_list, batch_size):
# get the batch size
#batch_size = arg_list[0].shape[0] // 10
# first five remain the same
indices = np.random.randint(0, batch_size-1, 10*batch_size)
for ii in range(batch_size):
indices[10*ii:10*ii+5] = ii
diag = indices[10*ii+5:10*ii+10]
diag[diag >= ii] += 1
indices[10*ii+5:10*ii+10] = diag
shuffled = [None for args in arg_list]
for ii, args in enumerate(arg_list):
assert batch_size == args.shape[0]
shuffled[ii] = args[indices]
return shuffled
# loading an image and converting to numpy
def load_image(file_name) :
img = Image.open(file_name)
img.load()
data = np.asarray(img, dtype="int32")
return data
# temporary launching of evaluation job (slurm)
def launch_evaluation_job(output_path, checkpoint):
script_path = 'run_slurm_eval_mnist.sh'
# read and edit accordingly
with open(script_path, 'r') as file_id:
template = file_id.read();
# write a temporary script, run and remove
temp_path = script_path.replace('.sh', '_temp.sh');
with open(temp_path, 'w') as file_id:
file_id.write(template % (output_path, checkpoint));
subprocess.call('sbatch %s' % temp_path, shell=True);
def save_batch(batch, save_path, terminate=False):
"""Saves a batch to visualize or debug.
Args:
batch: List of intermediate outputs (see visualize_sl.py for example)
save_path: Path to save the batch
terminate: In debug mode, terminate the program
"""
print('Saved batch: {0}'.format(save_path))
np.save(save_path, batch);
assert not terminate, 'Program terminated!'
|
corefnmn-main
|
util/support.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
# Script that contains methods for processing answers and questions
# coding: utf-8
import re, pdb
from unidecode import unidecode
# Method used to clean up and convert non ascii to unicode
def clean_non_ascii(text):
try:
text = text.decode('ascii')
except:
# Contains non-ascii symbols
# Check if it needs to be converted to unicode
try: text = unicode(text, encoding = 'utf-8')
except: pass
text = unidecode(text)
return text
|
corefnmn-main
|
util/clean.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script that converts Stanford parser outputs to neural module
network layout outputs.
"""
import argparse
import copy
import json
import os
import pdb
import re
import sys
import sexpdata
import numpy as np
from models_vd.assembler import Assembler
from tqdm import tqdm as progressbar
def extract_parse(p):
"""Given string, extracts a parse.
"""
if isinstance(p, sexpdata.Symbol):
return p.value()
elif isinstance(p, int):
return str(p)
elif isinstance(p, bool):
return str(p).lower()
elif isinstance(p, float):
return str(p).lower()
return tuple(extract_parse(q) for q in p)
def parse_tree(p):
if "'" in p:
p = "none"
parsed = sexpdata.loads(p)
extracted = extract_parse(parsed)
return extracted
parse2module_dict = {'find': '_Find',
'relate': '_Transform',
'and': '_And',
'is': '_Describe', # All the top modules go to '_Describe'
'describe': '_Describe'
}
def flatten_layout(parse):
# Postorder traversal to generate Reverse Polish Notation (RPN)
if isinstance(parse, str):
return [parse2module_dict[parse]]
RPN = []
head = parse[0]
body = parse[1:]
module = parse2module_dict[head]
for m in body:
RPN += flatten_layout(m)
RPN += [module]
return RPN
def extract_set(params):
# assembler to look for incorrect programs
assembler = Assembler(params.prog_vocab_file)
# manual correction to layouts
layout_correct = {('_Find', '_Transform', '_And', '_Describe')
:['_Find', '_Transform', '_Describe'],
('_Transform', '_Describe')
:['_Find', '_Transform', '_Describe'],
('_Transform', '_Transform', '_And', '_Describe')
:['_Find', '_Transform', '_Transform', '_Describe'],
('_Describe',)
:['_Find', '_Describe'],
('_Transform', '_Find', '_And', '_Describe')
:['_Find', '_Transform', '_Describe']}
with open(params.nmn_file) as f:
# drop the spans
read_layouts = [re.sub(r'\[\d*,\d*\]', '', ll) for ll in f.readlines()]
layouts = [flatten_layout(parse_tree(ll)) for ll in read_layouts]
layouts = [layout_correct.get(tuple(ii), tuple(ii)) for ii in layouts]
with open(params.nmn_file) as f:
# extracting spans as well
lines = [ii for ii in f.readlines()]
attentions = []
for index, ii in enumerate(lines):
layout = layouts[index]
# extract the spans
matches = re.findall('(\w\w)\[(\d*),(\d*)\]', ii)
# match module with attention, if present
att = []
for token in layout:
candidates = []
if token == '_Find':
candidates = [jj for jj in matches if jj[0] == 'nd']
if token == '_Transform':
candidates = [jj for jj in matches if jj[0] == 'te']
if token == '_Describe':
candidates = [jj for jj in matches
if jj[0] != 'te' or jj[0] != 'nd']
if len(candidates) >= 1:
att.append((int(candidates[0][1]), int(candidates[0][2])))
matches.remove(candidates[0])
else:
att.append((0, 0))
# record attentions and layouts
attentions.append(att)
# correct the layouts according to the above dictionary
layouts = [layout_correct.get(tuple(ii), ii) for ii in layouts]
layout_set = {tuple(l) for l in layouts}
print('Found %d unique layouts' % len(layout_set))
for l in layout_set:
print(' ', ' '.join(list(l)))
# check whether the layout is valid
for l in layout_set:
batch = assembler.module_list2tokens(l, T=20)
validity, error = assembler.sanity_check_program(batch)
if not validity:
raise Exception('invalid expr:' + str(l) + ' ' + error)
# read the original data path
with open(params.visdial_file, 'r') as file_id:
vd_data = json.load(file_id)
# question id to layout dictionary
if params.question:
qid2layout_dict = {}
for datum in progressbar(vd_data['data']['dialogs']):
img_id = datum['image_id']
for r_id, round_datum in enumerate(datum['dialog']):
q_id = img_id * 10 + r_id
q_layout = layouts[round_datum['question']]
# record
qid2layout_dict[q_id] = q_layout
np.save(params.save_path, np.array(qid2layout_dict))
else:
np.save(params.save_path, np.array(layouts))
print('Saving to: ' + params.save_path)
save_file_att = params.save_path.replace('.layout', '.attention')
print('Saving (att) to: ' + save_file_att)
np.save(save_file_att, np.array(attentions))
set_layout_length = [len(l) for l in layouts]
return set_layout_length
def main(FLAGS):
# check if it is question or caption
FLAGS.question = 'ques' in FLAGS.nmn_file
FLAGS.save_path = FLAGS.nmn_file.replace('pgm', 'layout')
print('Saving at: %s' % FLAGS.save_path)
layout_length = extract_set(FLAGS)
print('Program length distribution:')
print(np.unique(layout_length, return_counts=True))
if __name__ == '__main__':
title = 'Converting parser outputs to neural module network programs'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--nmn_file', required=True,
help='Neural Module file path')
parser.add_argument('--visdial_file', required=True,
help='Path to the original visdial file')
parser.add_argument('--prog_vocab_file', required=True,
help='Path to program vocabulary file for the assembler')
FLAGS = parser.parse_args()
main(FLAGS)
|
corefnmn-main
|
util/convert_nmn_layouts.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Extracts coreference supervision for visdial dataset using off-the-shelf system.
"""
import argparse
import json
import sys
import neuralcoref
import spacy
from tqdm import tqdm as progressbar
def get_question_answer(data, i_dialog, i_question):
"""Extracts question + answer for a dialog turn.
Args:
data: Visdial data
i_dialog: Index for the dialog
i_question: Index for the turn
"""
dialog = data["data"]["dialogs"][i_dialog]["dialog"][i_question]
return (
data["data"]["questions"][dialog["question"]],
data["data"]["answers"][dialog["answer"]],
)
def get_coref_cluster_sentence(utterance_cluster):
"""Visualize the co-reference clusters as string using, e.g [1 ].
Args:
utterance_cluster: Cluster corresponding to the utterance
"""
sentence = ""
for utterance in utterance_cluster:
if not sentence:
# print(utterance["sentence"])
sentence = list(" " * (len(utterance["sentence"]) + 2))
s = utterance["start_char"]
sentence[s] = "["
sentence[utterance["end_char"]] = "]"
s += 1
if not sentence[s] == " ":
s += 2
id_str = str(utterance["cluster_id"])
sentence[s : (s + len(id_str))] = id_str
# print("".join(sentence))
return "".join(sentence)
def get_coref_cluster_list(utterance_cluster_map, ui):
if ui in utterance_cluster_map:
return utterance_cluster_map[ui]
else:
return []
def extract_corefs(data_file_name, out_file_name):
print("Reading: {}".format(data_file_name))
with open(data_file_name) as data_file:
data = json.load(data_file)
n_dialogs = len(data["data"]["dialogs"])
coref = neuralcoref.Coref()
# NOTE: neuralcoref gets stuck if there are numbers with an apostrophe.
# Replacing them with equally long strings as a temporary fix.
def remove_numbered_age(string):
REPLACE_STRINGS = {
"10's": "10ss",
"20's": "20ss",
"30's": "30ss",
"40's": "40ss",
"50's": "50ss",
"60's": "60ss",
"70's": "70ss",
"80's": "80ss",
"90's": "90ss",
"100's": "100ss",
}
final_string = string
for key, replacement in REPLACE_STRINGS.items():
final_string = final_string.replace(key, replacement)
return final_string
for i_dialog in progressbar(range(n_dialogs)):
dialog = data["data"]["dialogs"][i_dialog]
str_dialog = dialog["caption"] + ". "
list_dialog = [dialog["caption"] + "."]
for i_question in range(len(dialog["dialog"])):
q, a = get_question_answer(data, i_dialog, i_question)
str_dialog += q + "? " + a + ". "
list_dialog.append(q + "?")
list_dialog.append(a + ".")
list_dialog = [remove_numbered_age(ii) for ii in list_dialog]
clusters = coref.one_shot_coref(utterances=list_dialog)
mentions = coref.get_mentions()
cluster_keys = list(clusters.keys())
# match from utterance to cluster
utterance_cluster_map = {}
utterance_referrer_map = {}
utterance_reference_map = {}
for i_key in range(len(cluster_keys)):
# assume reference is the first occurance
reference = min(clusters[cluster_keys[i_key]])
cluster_dict_ref = {}
cluster_dict_ref["reference_sentence_id"] = mentions[
reference
].utterance_index
cluster_dict_ref["reference_start_word"] = mentions[reference].start
cluster_dict_ref["reference_end_word"] = mentions[reference].end
cluster_dict_ref["reference_start_char"] = mentions[reference].start_char
cluster_dict_ref["reference_end_char"] = mentions[reference].end_char
for i_mention in clusters[cluster_keys[i_key]]:
cluster_dict = {}
ui = mentions[i_mention].utterance_index
cluster_dict["cluster_id"] = i_key
cluster_dict["start_word"] = mentions[i_mention].start
cluster_dict["end_word"] = mentions[i_mention].end
cluster_dict["start_char"] = mentions[i_mention].start_char
cluster_dict["end_char"] = mentions[i_mention].end_char
cluster_dict["sentence"] = list_dialog[ui]
if ui not in utterance_cluster_map:
utterance_cluster_map[ui] = []
utterance_referrer_map[ui] = []
utterance_reference_map[ui] = []
utterance_cluster_map[ui].append(cluster_dict)
if i_mention == reference:
utterance_reference_map[ui].append(cluster_dict)
else:
cluster_dict.update(cluster_dict_ref)
utterance_referrer_map[ui].append(cluster_dict)
cluster_list = get_coref_cluster_list(utterance_cluster_map, 0)
data["data"]["dialogs"][i_dialog]["caption_coref_clusters"] = cluster_list
data["data"]["dialogs"][i_dialog][
"caption_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
data["data"]["dialogs"][i_dialog][
"caption_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, 0)
for i_question in range(len(dialog["dialog"])):
# set which utterance it came from
cluster_list = get_coref_cluster_list(
utterance_cluster_map, (i_question + 1) * 2
)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_coref_clusters"
] = cluster_list
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
cluster_list = get_coref_cluster_list(
utterance_cluster_map, (i_question) * 2 + 1
)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_coref_clusters"
] = cluster_list
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_referrer_clusters"
] = get_coref_cluster_list(utterance_referrer_map, (i_question + 1) * 2)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_referrer_clusters"
] = get_coref_cluster_list(utterance_referrer_map, (i_question) * 2 + 1)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, (i_question + 1) * 2)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, (i_question) * 2 + 1)
print("Saving: {}".format(out_file_name))
with open(out_file_name, "w") as outfile:
json.dump(data, outfile)
return clusters, coref, data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--input_data_path", required=True, help="Path to VisDial JSON files"
)
parser.add_argument(
"--output_save_path", default="-", help="Path to save the coreferences"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
extract_corefs(parsed_args["input_data_path"], parsed_args["output_save_path"])
|
corefnmn-main
|
util/extract_coreference_supervision.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
import re
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
def tokenize(sentence):
tokens = SENTENCE_SPLIT_REGEX.split(sentence.lower())
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
class VocabDict:
def __init__(self, vocab_file):
self.word_list = load_str_list(vocab_file)
self.word2idx_dict = {w:n_w for n_w, w in enumerate(self.word_list)}
self.num_vocab = len(self.word_list)
self.UNK_idx = self.word2idx_dict['<unk>'] \
if '<unk>' in self.word2idx_dict else None
def idx2word(self, n_w):
return self.word_list[n_w]
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_idx is not None:
return self.UNK_idx
else:
raise ValueError('word %s not in dictionary ' + \
'(while dictionary does not contain <unk>)' % w)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
# add new tokens for decoding
def add_new_tokens(self, new_token_list):
for new_token in new_token_list:
if new_token in self.word_list:
print('%d already exists in vocabulary!' % new_token)
continue
print('Adding %s to vocabulary' % new_token)
self.word2idx_dict[self.num_vocab] = new_token
self.num_vocab += 1
|
corefnmn-main
|
util/text_processing.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Given the data file, create a vocabulary file and extract the glove features
for embedding initializations.
"""
import argparse
from collections import defaultdict
import json
import re
import sys
from unidecode import unidecode
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
import numpy as np
import spacy
def main(args):
# initialize vocab from file
print('Reading vocabulary from: %s' % args.vocab_file)
with open(args.vocab_file, 'r') as fileId:
vocab_dict = json.load(fileId)
vocab_set = set(vocab_dict['word2ind'].keys())
# Though we have collected all the words from source vocabulary, add <UNK>
# and add other tokens for answer decoding
# <start> <end> <pad>
vocab_set.add('<unk>')
vocab_set.add('<start>')
vocab_set.add('<end>')
vocab_set.add('<pad>')
print('Vocabulary size: %d, keeping all of them ..' % len(vocab_set))
vocab_list = list(vocab_set)
vocab_list.sort()
print('Saving vocabulary: ' + args.save_path)
with open(args.save_path, 'w') as file_id:
file_id.writelines([w.replace('\u2019', '') + '\n' for w in vocab_list])
# Collect glove vectors for the words, and save.
glove_dim = 300
glove_mat = np.zeros((len(vocab_list), glove_dim), np.float32)
nlp = spacy.load('en_vectors_web_lg')
for index, word in enumerate(vocab_list):
glove_mat[index] = nlp(word).vector
glove_mat_file = args.save_path.replace('.txt', '_glove.npy')
print('Saving glove vectors: ' + glove_mat_file)
np.save(glove_mat_file, glove_mat)
if __name__ == '__main__':
title = 'Restructure Stanford Parser to a single line'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--vocab_file', required=True,
help='Vocabulary file from original visdial code')
parser.add_argument('--save_path', required=True,
help=('Path to save the vocabulary text file and '
'glove embeddings for corefnmn code'))
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/collect_glove_features.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Final preprocessing script to create the image dialog database that
can be used to serve batches by the batch loader while training and evaluation
for MNIST experiments.
"""
import argparse
from collections import defaultdict
import copy
import json
import os
import pdb
import sys
import numpy as np
from nltk.tokenize import word_tokenize
from tqdm import tqdm as progressbar
from util import text_processing, clean, support
# program supervision
# question types vs layouts (manually done)
prog_ques_type = {
'Qa': '_Find _Exist',
'Qb': '_Find _Count',
'Qc': '_Find _Describe',
'Qd': '_Refer _Transform _Describe',
'Qe': '_Refer _Not _Find _And _Exist'
}
def build_imdb(data, split, vocab, ans_list, FLAGS):
"""Function to build the image dialog dataset, given the data split.
Args:
data: MNIST Dialog dataset json
split: Data split -- train | valid | test
vocab: Vocabulary object created from question vocabulary (train only)
ans_list: List of answers, created from train set
FLAGS: Command line arguments
Returns:
imdb: Image dialog database to train corefnmn
"""
print('Building imdb for %s' % split)
source = data['%sExamples' % split]
ans_dict = {word: ii for ii, word in enumerate(ans_list)}
# process and tokenize all questions and answers
tokenizer = lambda x: [vocab.word2idx(ii) for ii in
word_tokenize(clean.clean_non_ascii(x))]
print('Collecting and tokenizing questions')
ques_dict = {}
ques_list = []
for datum in progressbar(source):
for round_datum in datum['qa']:
ques = round_datum['question']
if ques in ques_dict: continue
else:
ques_list.append(ques)
ques_dict[ques] = len(ques_dict)
clean_ques = [tokenizer(ques.lower()) for ques in progressbar(ques_list)]
max_ques_len = max([len(ii) for ii in clean_ques])
ques_tokens = np.zeros((len(clean_ques), max_ques_len)).astype('int32')
ques_tokens.fill(vocab.word2idx('<pad>'))
ques_lens = np.zeros(len(clean_ques)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_ques)):
ques_lens[q_id] = len(tokens)
ques_tokens[q_id, :ques_lens[q_id]] = np.array(tokens)
#--------------------------------------------------------------------------
imdb = {}
# number of entries in the database
num_dialogs = len(source)
imdb['data'] = [None] * num_dialogs
imdb['ans_inds'] = ans_list
imdb['ques'], imdb['ques_len'] = ques_tokens, ques_lens
#--------------------------------------------------------------------------
for dialog_id, datum in progressbar(enumerate(source)):
img_id = datum['img']
img_path = os.path.join(FLAGS.image_root, split, '%05d.jpg' % img_id)
# compact bundle with all the information
bundle = {'image_name': img_id, 'image_path': img_path,
'question_id': [], 'question_ind': [], 'answer_ind': [],
'gt_layout_tokens': []}
# bundle as questions in a conversation together
for r_id, round_data in enumerate(datum['qa']):
q_id = img_id * 10 + r_id
bundle['question_id'].append(q_id)
ques_ind = ques_dict[round_data['question']]
bundle['question_ind'].append(ques_ind)
answer = ans_dict.get(round_data['answer'], '<unk>')
bundle['answer_ind'].append(answer)
# sanity check
if answer == '<unk>':
print(answer)
# layout
layout = prog_ques_type[round_data['metaInfo'][0]]
# replace find with refer
if r_id > 0 and round_data['metaInfo'][0] in ['Qa', 'Qb']:
layout = layout.replace('_Find', '_Refer _Find _And');
if r_id > 0 and round_data['metaInfo'][0] == 'Qc':
layout = layout.replace('_Find', '_Refer');
"""Layout modifications for NMN version (baseline)
if round_data['metaInfo'][0] == 'Qd':
layout = layout.replace('Refer', 'Find')
if round_data['metaInfo'][0] == 'Qe':
layout = '_Find _Exist'
"""
# layout for independent questions
bundle['gt_layout_tokens'].append(layout)
# record
imdb['data'][dialog_id] = bundle
return imdb
def save_vocabularies(train_examples, FLAGS):
"""Extract and save vocabularies for questions and answers.
Args:
train_examples: Training examples
Returns:
words: Vocabulary (dictionary) extracted from the questions
ans_list: List of possible answers, extracted from train set
"""
words = {}
ans_list = {}
for datum in progressbar(train_examples):
for ques_datum in datum['qa']:
token = ques_datum['answer'].lower()
words[token] = words.get(token, 0) + 1
ans_list[token] = 1
for token in word_tokenize(ques_datum['question']):
token = token.lower()
words[token] = words.get(token, 0) + 1
# additional tokens
words['<pad>'] = 1
words['<start>'] = 1
words['<end>'] = 1
words['<unk>'] = 1
print('Saving to: ' + FLAGS.vocab_save_path)
with open(FLAGS.vocab_save_path, 'w') as file_id:
file_id.write('\n'.join(sorted(words.keys())))
# answer lists
ans_list = list(ans_list.keys())
ans_list.append('<unk>')
print('Saving to: ' + FLAGS.answers_save_path)
with open(FLAGS.answers_save_path, 'w') as file_id:
file_id.write('\n'.join(ans_list))
def save_mean_std_image(FLAGS):
"""Compute and save mean and std image from train images.
Args:
FLAGS: Commandline arguments
"""
import pdb
image_list = os.listdir(os.path.join(FLAGS.image_root, 'train'))
# compute the mean of the train images and save
mean_img = None
std_img = None
for image_name in progressbar(image_list):
image_path = os.path.join(FLAGS.image_root, 'train', image_name)
image = support.load_image(image_path)
if mean_img is None:
mean_img = image
std_img = image ** 2
else:
mean_img += image
std_img += image ** 2
mean_img = mean_img / len(image_list)
std_img = std_img / len(image_list)
mean_img = np.mean(np.mean(mean_img, 0), 0)
std_img = np.mean(np.mean(std_img, 0), 0)
std_img = np.sqrt(std_img - mean_img ** 2)
print('Saving mean and std at: %s' % FLAGS.mean_save_path)
np.save(FLAGS.mean_save_path, {'mean_img': mean_img, 'std_img': std_img})
def main(FLAGS):
"""Main function.
1. Extracts vocabularies from questions and answers.
2. Creates and saves image dialog databases for train | valid | test splits.
Args:
FLAGS: Command-line options.
"""
# Read the dataset.
with open(FLAGS.json_path) as file_id:
data = json.load(file_id)
# Extract vocabulary and answer list.
save_vocabularies(data['trainExamples'], FLAGS)
# Extract mean and std of train images.
save_mean_std_image(FLAGS)
# Read the vocabulary files (questions | answers) and create objects
vocab = text_processing.VocabDict(FLAGS.vocab_save_path)
with open(FLAGS.answers_save_path, 'r') as file_id:
ans_list = [ii.strip('\n') for ii in file_id.readlines()]
# data splits
for split in ['train', 'valid', 'test']:
imdb_split = build_imdb(data, split, vocab, ans_list, FLAGS)
save_path = os.path.join(FLAGS.imdb_save_path, 'imdb_%s.npy' % split)
print('Saving imdb build: %s' % save_path)
np.save(save_path, np.array(imdb_split))
if __name__ == '__main__':
title = 'Process all the information into a database for easier access'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--json_path', required=True,
help='Path to MNIST Dialog dataset json file')
parser.add_argument('--image_root', required=True,
help='Path to root folder of all the images')
parser.add_argument('--vocab_save_path', required=True,
help='Path to save the vocabulary from training set')
parser.add_argument('--answers_save_path', required=True,
help='Path to save the answers file from training set')
parser.add_argument('--imdb_save_path', required=True,
help='Path to save the image dialog dataset')
parser.add_argument('--mean_save_path', required=True,
help='Path to save the mean and std of train images')
FLAGS = parser.parse_args()
main(FLAGS)
|
corefnmn-main
|
util/build_imdb_mnist.py
|
corefnmn-main
|
util/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
return conv
def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
conv = conv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(conv)
return relu
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None):
# input_shape is [batch, in_height, in_width, in_channels]
input_shape = bottom.get_shape().as_list()
batch_size, input_height, input_width, input_dim = input_shape
output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, out_channels, in_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, output_dim, input_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
deconv = tf.nn.conv2d_transpose(bottom, filter=weights,
output_shape=output_shape, strides=[1, stride, stride, 1],
padding=padding)
if bias_term:
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(deconv)
return relu
def pooling_layer(name, bottom, kernel_size, stride):
pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1], padding='SAME', name=name)
return pool
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
# flatten bottom input
# input has shape [batch, in_height, in_width, in_channels]
shape = bottom.get_shape().as_list()
input_dim = 1
for d in shape[1:]:
input_dim *= d
flat_bottom = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# weights has shape [input_dim, output_dim]
weights = tf.get_variable("weights", [input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
if bias_term:
fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
else:
fc = tf.matmul(flat_bottom, weights)
return fc
def fc_relu_layer(name, bottom, output_dim, bias_term=True,
weights_initializer=None, biases_initializer=None, reuse=None):
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
relu = tf.nn.relu(fc)
return relu
|
corefnmn-main
|
util/cnn.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to flatten the dataset for Stanford parser.
"""
import argparse
import json
import sys
from unidecode import unidecode
from tqdm import tqdm as progressbar
def clean_non_ascii(text):
"""Method to clean up and convert non-ascii to unicode.
"""
try:
text = text.decode('ascii')
except:
# Contains non-ascii symbols
# Check if it needs to be converted to unicode
try:
text = unicode(text, encoding = 'utf-8')
except:
pass
text = unidecode(text)
return text
def main(args):
# reading data
print('Reading from: ' + args.data_file)
with open(args.data_file, 'r') as file_id:
data = json.load(file_id)
# open a text file to write the questions
save_path = args.data_file.replace('.json', '_ques_flat.txt')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
for ques in progressbar(data['data']['questions']):
file_id.write(clean_non_ascii(ques) + ' ?\n')
# open a text file to write the captions
save_path = args.data_file.replace('.json', '_cap_flat.txt')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
captions = [ii['caption'] for ii in data['data']['dialogs']]
for cap in captions:
file_id.write(clean_non_ascii(cap) + ' .\n')
if __name__ == '__main__':
title = 'Flattening the dataset to a text file'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--data_file', required=True,
help='Data file path')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/dataset_to_text.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read the data files and emit sentences as a file.
"""
import argparse
import sys
def main(args):
print('Reading : ' + args.parser_file)
with open(args.parser_file, 'r') as file_id:
lines = [ii.strip('\n') for ii in file_id.readlines()]
# compress trees from multiple lines -> single line
trees = []
cur_tree = ''
for line in lines:
if line == '':
trees.append(cur_tree)
cur_tree = ''
else:
cur_tree += line
# write back to another file
save_path = args.parser_file.replace('.sps', '_compress.sps')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
file_id.write('\n'.join(trees))
if __name__ == '__main__':
title = 'Restructure Stanford Parser to a single line'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--parser_file', required=True,
help='Stanford parser output file')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/compress_parser_trees.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Final preprocessing script to create the image dialog database that
can be used to serve batches by the batch loader while training and evaluation.
"""
import argparse
from collections import defaultdict
import copy
import json
import os
import pdb
import sys
import numpy as np
from nltk.tokenize import word_tokenize
from tqdm import tqdm as progressbar
from util import text_processing, clean
stop_words = ['the', 'a', 'an', 'you', 'was', 'and', 'are']
def build_imdb(FLAGS):
"""Method to construct and save the image-database for the dataset
"""
print('Building imdb for visdial split: %s' % FLAGS.visdial_file)
qid2layout_dict = np.load(FLAGS.ques_prog_file)[()]
ques_att_file = FLAGS.ques_prog_file.replace('.layout', '.attention')
ques_prog_att = np.load(ques_att_file)[()]
cap_progs = np.load(FLAGS.cap_prog_file)[()]
cap_att_file = FLAGS.cap_prog_file.replace('.layout', '.attention')
cap_prog_att = np.load(cap_att_file)[()]
vocab = text_processing.VocabDict(FLAGS.vocab_file)
# load the data
with open(FLAGS.visdial_file, 'r') as file_id:
vd_data = json.load(file_id)
# load the reference data
with open(FLAGS.coreference_file, 'r') as file_id:
references = json.load(file_id)
references = references['data']['dialogs']
# coco_name = img_split + '2014'
# img_root = os.path.abspath(image_dir % coco_name)
# feat_root = os.path.abspath(feature_dir % coco_name)
# img_name_format = 'COCO_' + coco_name + '_%012d'
# process and tokenize all questions and answers
tokenizer = lambda x, suff: [vocab.word2idx(ii) for ii in
word_tokenize(clean.clean_non_ascii(x + suff))]
print('Tokenizing captions')
caption_list = [ii['caption'] for ii in vd_data['data']['dialogs']]
clean_cap = [tokenizer(cap, '') for cap in progressbar(caption_list)]
max_cap_len = max([len(ii) for ii in clean_cap])
cap_tokens = np.zeros((len(clean_cap), max_cap_len)).astype('int32')
cap_tokens.fill(vocab.word2idx('<pad>'))
cap_lens = np.zeros(len(clean_cap)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_cap)):
cap_lens[q_id] = len(tokens)
cap_tokens[q_id, :cap_lens[q_id]] = np.array(tokens)
print('Tokenizing questions')
question_list = vd_data['data']['questions']
clean_ques = [tokenizer(ques, '?') for ques in progressbar(question_list)]
max_ques_len = max([len(ii) for ii in clean_ques])
ques_tokens = np.zeros((len(clean_ques), max_ques_len)).astype('int32')
ques_tokens.fill(vocab.word2idx('<pad>'))
ques_lens = np.zeros(len(clean_ques)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_ques)):
ques_lens[q_id] = len(tokens)
ques_tokens[q_id, :ques_lens[q_id]] = np.array(tokens)
print('Tokenizing answers')
answer_list = vd_data['data']['answers']
clean_ans = [tokenizer(ans, '') for ans in progressbar(answer_list)]
max_ans_len = max([len(ii) for ii in clean_ans])
ans_tokens = np.zeros((len(clean_ans), max_ans_len)).astype('int32')
ans_tokens.fill(vocab.word2idx('<pad>'))
ans_lens = np.zeros(len(clean_ans)).astype('int32')
ans_in = np.zeros((len(clean_ans), max_ans_len + 1)).astype('int32')
ans_out = np.zeros((len(clean_ans), max_ans_len + 1)).astype('int32')
ans_in.fill(vocab.word2idx('<pad>'))
ans_out.fill(vocab.word2idx('<pad>'))
start_token_id = vocab.word2idx('<start>')
end_token_id = vocab.word2idx('<end>')
ans_in[:, 0] = start_token_id
for a_id, tokens in progressbar(enumerate(clean_ans)):
ans_lens[a_id] = len(tokens)
answer = np.array(tokens)
ans_tokens[a_id, :ans_lens[a_id]] = answer
ans_in[a_id, 1:ans_lens[a_id]+1] = answer
ans_out[a_id, :ans_lens[a_id]] = answer
ans_out[a_id, ans_lens[a_id]] = end_token_id
ans_lens += 1
imdb = {}
# number of entries in the database
num_dialogs = len(vd_data['data']['dialogs'])
imdb['data'] = [None] * num_dialogs
imdb['ans'], imdb['ans_len'] = ans_tokens, ans_lens
imdb['ans_in'], imdb['ans_out'] = ans_in, ans_out
imdb['ques'], imdb['ques_len'] = ques_tokens, ques_lens
imdb['cap'], imdb['cap_len'] = cap_tokens, cap_lens
imdb['cap_prog'], imdb['cap_prog_att'] = cap_progs, np.array(cap_prog_att)
for dialog_id, datum in progressbar(enumerate(vd_data['data']['dialogs'])):
img_id = datum['image_id']
img_path = FLAGS.image_path_format % img_id
feat_path = FLAGS.feature_path % img_id
# compact bundle with all the information
bundle = {'image_name': img_id, 'image_path': img_path,
'feature_path': feat_path, 'caption_ind': dialog_id,
'question_id': [], 'question_ind': [], 'answer_ind': [],
'option_ind': [], 'gt_ind' : [], 'gt_layout_tokens': [],
'gt_layout_att': []}
# reference datum
refer_datum = references[dialog_id]
assert(refer_datum['image_id'] == img_id)
# for each cluster, get the first mention
clusters = {}
caption_clusters = (refer_datum['caption_reference_clusters'] +
refer_datum['caption_coref_clusters'])
for ii in caption_clusters:
c_id = ii['cluster_id']
clusters[c_id] = clusters.get(c_id, 'c')
# each round
for r_id in range(10): # assuming 10 rounds for now
referrer = refer_datum['dialog'][r_id]
for ii in referrer['question_reference_clusters']:
c_id = ii['cluster_id']
clusters[c_id] = clusters.get(c_id, 'q%d' % r_id)
for ii in referrer['answer_reference_clusters']:
c_id = ii['cluster_id']
# to distinguish answer
clusters[c_id] = clusters.get(c_id, 'a%d' % r_id)
# bundle as questions in a conversation together
num_refers = 0
for r_id, round_data in enumerate(datum['dialog']):
q_id = img_id * 10 + r_id
bundle['question_id'].append(q_id)
bundle['question_ind'].append(round_data['question'])
bundle['answer_ind'].append(round_data['answer'])
bundle['option_ind'].append(round_data['answer_options'])
bundle['gt_ind'].append(round_data['gt_index'])
# gt attention for parsed layout
attention = np.array(ques_prog_att[round_data['question']])
# check if references is non-empty and replace with _Refer
layout = copy.deepcopy(list(qid2layout_dict[q_id]))
referrer = refer_datum['dialog'][r_id]['question_referrer_clusters']
if len(referrer) > 0:
refer = referrer[0]
# pick _Find module with max attention overlap
max_overlap = (0, 0)
for pos, token in enumerate(layout):
if token == '_Find':
start = max(attention[pos][0], refer['start_word'])
end = min(attention[pos][1], refer['end_word'])
overlap = min(0, end - start)
if max_overlap[1] < overlap: max_overlap = (pos, overlap)
# reset it to _Refer
pos, _ = max_overlap
layout[pos] = '_Refer'
attention[pos] = [refer['start_word'], refer['end_word']]
# get that cluster id, and corresponding history attention
num_refers += 1
bundle['gt_layout_tokens'].append(layout)
# check for the words attending to
ques_tokens = imdb['ques'][round_data['question']]
ques_words = [vocab.idx2word(ii) for ii in ques_tokens]
for index, pos in enumerate(attention):
# if single word, 'the', 'a', 'of', 'you'
try:
if (pos[1] - pos[0]) == 1 and ques_words[pos[0]] in stop_words:
attention[index] = [0, 0]
except: pdb.set_trace()
bundle['gt_layout_att'].append(attention)
# record
imdb['data'][dialog_id] = bundle
return imdb
if __name__ == '__main__':
title = 'Process all the information into a database for easier access'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--ques_prog_file', required=True,
help='Path to question ground truth programs')
parser.add_argument('--cap_prog_file', required=True,
help='Path to caption ground truth programs')
parser.add_argument('--image_path_format', required=True,
help='Path to find the image given the COCO id')
parser.add_argument('--feature_path', required=True,
help='Path to find the features given the COCO id')
parser.add_argument('--coreference_file', required=True,
help='Visdial file infused with coreference supervision')
parser.add_argument('--visdial_file', required=True,
help='Original visdial file')
parser.add_argument('--vocab_file', required=True,
help='Visual Dialog vocabulary file')
parser.add_argument('--save_path', required=True,
help='Path to save the image dialog dataset')
FLAGS = parser.parse_args()
imdb_data = build_imdb(FLAGS)
print('Saving imdb build: %s' % FLAGS.save_path)
np.save(FLAGS.save_path, np.array(imdb_data))
|
corefnmn-main
|
util/build_imdb.py
|
#!/usr/bin/env python2
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Parse the stanford output into NMN programs.
Adapted from: https://github.com/ronghanghu/n2nmn
"""
from nltk.tree import Tree, ParentedTree
import sys
import re, pdb
from tqdm import tqdm as progressbar
KEEP = [
("WHNP", "WH"),
("WHADVP", "WH"),
(r"NP", "NP"),
("VP", "VP"),
("PP", "PP"),
("ADVP", "AP"),
("ADJP", "AP"),
("this", "null"),
("these", "null"),
("it", "null"),
("EX", "null"),
("PRP$", "null"),
]
KEEP = [(re.compile(k), v) for k, v in KEEP]
def flatten(tree):
if not isinstance(tree, list):
return [tree]
return sum([flatten(s) for s in tree], [])
def collect_span(term):
parts = flatten(term)
lo = 1000
hi = -1000
for part in parts:
assert isinstance(part, tuple) and len(part) == 2
lo = min(lo, part[1][0])
hi = max(hi, part[1][1])
assert lo < 1000
assert hi > -1000
return (lo, hi)
def finalize(col, top=True):
dcol = despan(col)
is_wh = isinstance(dcol, list) and len(dcol) > 1 and flatten(dcol[0])[0] == "WH"
out = []
if not top:
rest = col
elif is_wh:
whspan = flatten(col[0])[0][1]
#out.append("describe")
out.append("describe[%s,%s]" % (whspan))
rest = col[1:]
else:
out.append("is")
rest = col
if len(rest) == 0:
return out
elif len(rest) == 1:
body = out
else:
body = ["and"]
out.append(body)
for term in rest:
if term[0][0] == "PP":
span_below = collect_span(term[1:])
span_full = term[0][1]
span_here = (span_full[0], span_below[0])
#body.append(["relate"])
body.append(["relate[%s,%s]" % span_here, finalize(term[1:], top=False)])
elif isinstance(term, tuple) and isinstance(term[0], str):
#body.append("find")
body.append("find[%s,%s]" % term[1])
else:
# TODO more structure here
#body.append("find")
body.append("find[%s,%s]" % collect_span(term))
if len(body) > 3:
del body[3:]
if isinstance(out, list) and len(out) == 1:
out = out[0]
return out
def strip(tree):
if not isinstance(tree, Tree):
label = tree
flat_children = []
span = ()
else:
label = tree.label()
# children = [strip(child) for child in tree.subtrees().next()]
children = [strip(child) for child in next(tree.subtrees())]
flat_children = sum(children, [])
leaves = tree.leaves()
span = (int(leaves[0]), int(leaves[-1]) + 1)
proj_label = [v for m, v in KEEP if m.match(label)]
if len(proj_label) == 0:
return flat_children
else:
return [[(proj_label[0], span)] + flat_children]
def despan(rr):
out = []
for r in rr:
if isinstance(r, tuple) and len(r) == 2 and isinstance(r[1], tuple):
out.append(r[0])
elif isinstance(r, list):
out.append(despan(r))
else:
out.append(r)
return out
def collapse(tree):
if not isinstance(tree, list):
return tree
rr = [collapse(st) for st in tree]
rr = [r for r in rr if r != []]
drr = despan(rr)
if drr == ["NP", ["null"]]:
return []
if drr == ["null"]:
return []
if drr == ["PP"]:
return []
members = set(flatten(rr))
if len(members) == 1:
return list(members)
if len(drr) == 2 and drr[0] == "VP" and isinstance(drr[1], list):
if len(drr[1]) == 0:
return []
elif drr[1][0] == "VP" and len(drr[1]) == 2:
return [rr[1][0], rr[1][1]]
return rr
def pp(lol):
if isinstance(lol, str):
return lol
return "(%s)" % " ".join([pp(l) for l in lol])
with open(sys.argv[1]) as ptb_f:
for line in progressbar(ptb_f):
tree = ParentedTree.fromstring(line)
# record the list of substitutions
lookup = {};
index = 0
for st in tree.subtrees():
if len(list(st.subtrees())) == 1:
lookup[index] = st[0];
st[0] = str(index)
index += 1
colparse = collapse(strip(tree))
final = finalize(colparse)
print(pp(final))
#print(lookup)
#print('')
#pdb.set_trace();
#print pp(final)
#print " ".join(tree.leaves())
#print colparse
#print finalize(colparse)
#print
|
corefnmn-main
|
util/parse.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import conv_layer as conv
def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for 1x1 convolution in modules to avoid the crash.
bottom_shape = tf.shape(bottom)
N = bottom_shape[0]
# NOTE: these are now static shapes
H, W = bottom.shape.as_list()[1:3]
#H = bottom_shape[1]
#W = bottom_shape[2]
input_dim = bottom.get_shape().as_list()[-1]
bottom_flat = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
weights_initializer = tf.contrib.layers.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
weights = tf.get_variable('weights', [input_dim, output_dim],
initializer=weights_initializer)
biases = tf.get_variable('biases', output_dim,
initializer=biases_initializer)
conv_flat = tf.nn.xw_plus_b(bottom_flat, weights, biases)
conv = tf.reshape(conv_flat, to_T([N, H, W, output_dim]))
return conv
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for arbitrary convolution in modules to avoid the crash.
def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
g = tf.get_default_graph()
with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):
return conv(name, bottom, kernel_size, stride, output_dim,
padding, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
@tf.RegisterGradient('Conv2D_handle_empty_batch')
def _Conv2DGrad(op, grad):
with tf.device('/cpu:0'):
return [tf.nn.conv2d_backprop_input(
tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format')),
tf.nn.conv2d_backprop_filter(op.inputs[0],
tf.shape(op.inputs[1]), grad,
op.get_attr('strides'),
op.get_attr('padding'),
op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format'))]
# @tf.RegisterGradient('Conv2D_handle_empty_batch')
# def _Conv2DGrad(op, grad):
# def _input_nonempty():
# return tf.nn.conv2d_backprop_input(
# tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
# op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _filter_nonempty():
# return tf.nn.conv2d_backprop_filter(op.inputs[0],
# tf.shape(op.inputs[1]), grad,
# op.get_attr('strides'),
# op.get_attr('padding'),
# op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _input_empty():
# return tf.zeros_like(op.inputs[0])
# def _filter_empty():
# return tf.zeros_like(op.inputs[1])
# is_nonempty = tf.greater(tf.size(op.inputs[0]), 0)
# return [tf.cond(is_nonempty, _input_nonempty, _input_empty),
# tf.cond(is_nonempty, _filter_nonempty, _filter_empty)]
|
corefnmn-main
|
util/empty_safe_conv.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read command line flags.
Uses argparse library to read command line flags.
Author: Satwik Kottur
"""
import argparse
import os
import pdb
from util import support
# read command line arguments
def read_command_line():
title = 'Train explicit coreference resolution visual dialog model'
parser = argparse.ArgumentParser(description=title)
#-------------------------------------------------------------------------
# data input settings
parser.add_argument('--dataset', default='mnist', help='Visdial dataset type')
parser.add_argument('--input_img', default='data/resnet_res5c/',\
help='Path with image features')
parser.add_argument('--data_root', default='data/',\
help='HDF5 file with preprocessed questions')
parser.add_argument('--text_vocab_path', default='',
help='Path to the vocabulary for text')
parser.add_argument('--prog_vocab_path', default='',
help='Path to the vocabulary for programs')
parser.add_argument('--snapshot_path', default='checkpoints/',
help='Path to save checkpoints')
#--------------------------------------------------------------------------
# specify encoder/decoder
parser.add_argument('--model', default='nmn', help='Name of the model')
parser.add_argument('--generator', default='ques',
help='Name of the generator to use (ques | memory)')
parser.add_argument('--img_norm', default=1, type=int,
help='Normalize the image feature. 1=yes, 0=no')
#-------------------------------------------------------------------------
# model hyperparameters
parser.add_argument('--h_feat', default=7, type=int,
help='Height of visual conv feature')
parser.add_argument('--w_feat', default=7, type=int,
help='Width of visual conv feature')
parser.add_argument('--d_feat', default=64, type=int,
help='Size of visual conv feature')
parser.add_argument('--text_embed_size', default=32, type=int,
help='Size of embedding for text')
parser.add_argument('--map_size', default=128, type=int,
help='Size of the final mapping')
parser.add_argument('--prog_embed_size', default=32, type=int,
help='Size of embedding for program tokens')
parser.add_argument('--lstm_size', default=64, type=int,
help='Size of hidden state in LSTM')
parser.add_argument('--enc_dropout', default=True, type=bool,
help='Dropout in encoder')
parser.add_argument('--dec_dropout', default=True, type=bool,
help='Dropout in decoder')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of layers in LSTM')
parser.add_argument('--max_enc_len', default=14, type=int,
help='Maximum encoding length for sentences (ques|cap)')
parser.add_argument('--max_dec_len', default=8, type=int,
help='Maximum decoding length for programs (ques|cap)')
parser.add_argument('--dec_sampling', default=False, type=bool,
help='Sample while decoding program')
parser.add_argument('--use_refer', dest='use_refer',
action='store_true', help='Flag for Refer Module')
parser.set_defaults(use_refer=False)
parser.add_argument('--remove_aux_find', dest='remove_aux_find',
action='store_true',
help='Flag to remove auxilliary find modules')
parser.set_defaults(remove_aux_find=False)
parser.add_argument('--use_fact', dest='use_fact',
action='store_true', help='Flag to use Q+A as fact')
parser.set_defaults(use_fact=False)
parser.add_argument('--amalgam_text_feats', dest='amalgam_text_feats',
action='store_true',
help='Flag to amalgamate text features')
parser.set_defaults(amalgam_text_feats=False)
#-------------------------------------------------------------------------
# optimization params
parser.add_argument('--batch_size', default=30, type=int,
help='Training batch size (adjust based on GPU memory)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate for training')
parser.add_argument('--dropout', default=0.5, type=float, help='Dropout')
parser.add_argument('--num_epochs', default=200, type=int,
help='Maximum number of epochs to run training')
parser.add_argument('--gpu_id', type=int, default=0,
help='GPU id to use for training, -1 for CPU')
#-------------------------------------------------------------------------
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if parsed_args['gpu_id'] < 0 else str(parsed_args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# pretty print arguments and return
support.pretty_print_dict(parsed_args)
return parsed_args
|
corefnmn-main
|
exp_mnist/options.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
# script to visualize intermediate outputs from a trained checkpoint
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import pdb, sys, argparse, os, json
from time import gmtime, strftime
from tqdm import tqdm as progressbar
from exp_mnist import options
from util import support
# read command line options
parser = argparse.ArgumentParser();
parser.add_argument('-checkpoint', required=True, \
help='Checkpoint to load the models');
parser.add_argument('-batchSize', type=int, default=10, \
help='Batch size for evaluation / visualization');
parser.add_argument('-testSplit', default='valid', \
help='Which split to run evaluation on');
parser.add_argument('-gpuID', type=int, default=0)
try: args = vars(parser.parse_args());
except (IOError) as msg: parser.error(str(msg));
# set the cuda environment variable for the gpu to use
if args['gpuID'] >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpuID']);
print(os.environ['CUDA_VISIBLE_DEVICES'])
else: os.environ['CUDA_VISIBLE_DEVICES'] = '';
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
from models_mnist.assembler import Assembler
from models_mnist.model import NMN3Model
from util.mnist_train.data_reader import DataReader
from util.metrics import computeMetrics, ExpSmoothing
# setting random seeds
np.random.seed(1234);
tf.set_random_seed(1234);
# read the train args from checkpoint
paramPath = args['checkpoint'].replace('.tmodel', '_params.json');
with open(paramPath, 'r') as fileId: savedArgs = json.load(fileId);
savedArgs.update(args);
args = savedArgs;
args['preloadFeats'] = False;
args['superviseAttention'] = False;
args['useFact'] = args.get('useFact', False);
print('Current model: ' + args['model'])
# Data files
imdbPathVal = os.path.join(args['dataRoot'],'imdb/imdb_%s.npy'%args['testSplit']);
imdbPathVal = imdbPathVal.replace('.npy', '_%s.npy' % args['dataLabel']);
# assembler
assembler = Assembler(args['progVocabPath']);
# dataloader for val
inputDict = {'path':imdbPathVal, 'shuffle':False, 'onePass':True, 'args':args,\
'assembler': assembler, 'useCount': False, 'fetchOptions': True};
valLoader = DataReader(inputDict);
# The model for training
evalParams = args.copy();
evalParams['useGTProg'] = False; # for training
evalParams['encDropout'] = False;
evalParams['decDropout'] = False;
evalParams['decSampling'] = False; # do not sample, take argmax
# for models trained later
if 'numRounds' not in evalParams:
evalParams['numRounds'] = valLoader.batchLoader.numRounds;
# model for evaluation
# create another assembler of caption
assemblers = {'ques': assembler, 'cap': Assembler(args['progVocabPath'])};
model = NMN3Model(evalParams, assemblers);
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None); # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint']);
print('Evaluating on %s' % args['testSplit'])
ansMatches = []; progMatches = [];
totalIter = int(valLoader.batchLoader.numInst / args['batchSize']);
maxIters = 100; curIter = 0;
toSave = {'output': [], 'batch': []};
for batch in progressbar(valLoader.batches(), total=totalIter):
_, outputs = model.runVisualizeIteration(batch, sess);
toSave['output'].append(outputs);
toSave['batch'].append(batch);
# debug -- also compute the ranks during visualization
#ranks.append(batchRanks);
curIter += 1;
if curIter >= maxIters: break;
# save the output + batch
batchPath = args['checkpoint'] + '.100_batches.npy';
print('Printing the batches: ' + batchPath)
support.saveBatch(toSave, batchPath);
# debug evaluate
#metrics = computeMetrics(np.hstack(ranks));
|
corefnmn-main
|
exp_mnist/visualize_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train Visual Dialog model using supervised learning.
Trains visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_mnist/eval_sl.py --gpu_id=0 --test_split='valid' \
--checkpoint='checkpoints/model_epoch_005.tmodel'
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_mnist import options
# read command line options
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True)
parser.add_argument('--test_split', default='valid', \
help='Which split to run evaluation on')
parser.add_argument('--gpu_id', type=int, default=0)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_mnist.assembler import Assembler
from models_mnist.model import CorefNMN
from loader_mnist.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assembler
question_assembler = Assembler(args['prog_vocab_path'])
copy_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'copy': copy_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# model for evaluation
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ans_matches = []
prog_matches = []
total_iter = int(val_loader.batch_loader.num_inst / args['batch_size'])
num_iters = 0
for batch in progressbar(val_loader.batches(), total=total_iter):
batch_matches, outputs = model.run_evaluate_iteration(batch, sess)
# batch['ans_ind'] = np.argmax(outputs['ans_logits'], 1)
# np.save('batch_model.npy', batch)
# sys.exit(1)
ans_matches.append(batch_matches)
if 'matches' in outputs:
prog_matches.append(outputs['matches'])
if len(prog_matches) > 0:
prog_matches = np.concatenate(prog_matches)
percent = 100 * np.sum(prog_matches) / prog_matches.size
print('Program accuracy: %f percent\n' % percent)
ans_matches = np.concatenate(ans_matches)
percent = 100 * np.sum(ans_matches) / ans_matches.size
print('Answer accuracy: %f percent\n' % percent)
|
corefnmn-main
|
exp_mnist/eval_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train MNIST Dialog model using supervised learning.
Trains mnist dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_mnist import options
# read command line options
args = options.read_command_line()
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_mnist.assembler import Assembler
from models_mnist.model import CorefNMN
from loader_mnist.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# Data files
args['data_root'] = os.path.join(args['data_root'], args['dataset'])
args['text_vocab_path'] = os.path.join(args['data_root'], 'vocabulary_mnist.txt')
root = args['data_root']
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_mnist.txt')
args['answer_list_path'] = os.path.join(root, 'answers_mnist.txt')
imdb_path_train = os.path.join(root, 'imdb_train.npy')
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
copy_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'copy': copy_assembler}
# Dataloader for train
input_dict = {'path': imdb_path_train, 'shuffle': True, 'one_pass': False,
'assembler': question_assembler, 'use_count': False,
'args': args}
train_loader = DataReader(input_dict)
# model params for training
train_params = args.copy()
# use the ground truth program for training
train_params['use_gt_prog'] = True
train_params['text_vocab_size'] = train_loader.batch_loader.vocab_dict.num_vocab
train_params['prog_vocab_size'] = len(question_assembler.module_names)
train_params['pad_id'] = train_loader.batch_loader.vocab_dict.word2idx('<pad>')
train_params['num_rounds'] = train_loader.batch_loader.num_rounds
train_params['num_choices'] = train_loader.num_choices
print('Using a vocab size: %d' % train_params['text_vocab_size'])
# model for training
model = CorefNMN(train_params, assemblers)
model.setup_training()
# train with Adam, optimization ops
solver = tf.train.AdamOptimizer(learning_rate=train_params['learning_rate'])
gradients = solver.compute_gradients(model.get_total_loss())
# clip gradients based on value
gradients = [(tf.clip_by_value(g, -2.0, 2.0), v) if g is not None else (g, v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS);
with tf.control_dependencies([solver_op]):
model.set_train_step(tf.constant(0));
with tf.control_dependencies(update_ops):
model.set_train_step(tf.constant(0));
# add it to the output
# model.add_solver_op(solver_op)
# adjust snapshot to have a time stamp folder
cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())
args['snapshot_path'] = os.path.join(args['snapshot_path'], cur_time)
os.makedirs(args['snapshot_path'], exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
print('Saving checkpoints at: %s' % args['snapshot_path'])
# initialize all variables
sess.run(tf.global_variables_initializer())
# forget about embed and module scopes
del train_params['embed_scope']
if 'module_scope' in train_params:
del train_params['module_scope']
#-------------------------------------------------------------------------
print('Running training iteration..')
num_iter_per_epoch = int(train_loader.batch_loader.num_inst/args['batch_size'])
print('Number of iterations per epoch: %d' % num_iter_per_epoch)
# exponential smoothing for loss
smoother = metrics.ExponentialSmoothing()
for n_iter, batch in enumerate(train_loader.batches()):
# add epoch and iteration
epoch = float(n_iter) / num_iter_per_epoch
batch['epoch'] = epoch
batch['n_iter'] = n_iter
if n_iter >= args['num_epochs'] * num_iter_per_epoch:
break
# perform training iteration
losses, _ = model.run_train_iteration(batch, sess)
losses = smoother.report(losses)
# printing log
if n_iter % 10 == 0:
cur_time = time.strftime('%a %d%b%y %X', time.gmtime())
print_format = ('[%s][It: %d][Ep: %.2f][Loss: %.3f Prog: %.3f Ans: %.3f]')
print_info = (cur_time, n_iter, epoch, losses['total'], losses['prog'],
losses['ans'])
print(print_format % print_info)
# save snapshot after every epoch
if n_iter % num_iter_per_epoch == 0:
epoch = float(n_iter) / num_iter_per_epoch
# Save snapshot at every epoch
file_name = 'model_epoch_%03d.tmodel' % epoch
snapshot_path = os.path.join(args['snapshot_path'], file_name)
snapshot_saver.save(sess, snapshot_path, write_meta_graph=False)
# also save the arguments
params_path = snapshot_path.replace('.tmodel', '_params.json')
with open(params_path, 'w') as file_id:
json.dump(train_params, file_id)
print('Snapshot saved to: ' + snapshot_path)
print('Launching evaluation job')
log_path = snapshot_path.replace('.tmodel', '_eval.log')
support.launch_evaluation_job(log_path, snapshot_path)
|
corefnmn-main
|
exp_mnist/train_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Dataloader file for Visual Dialog experiments.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import h5py
import json
import os
import threading
import queue
import numpy as np
from tqdm import tqdm as progressbar
from util import text_processing, support
class BatchLoaderMNIST:
"""Subclass to DataReader that serves batches during training.
"""
def __init__(self, imdb, params):
"""Initialize by reading the data and pre-processing it.
"""
self.imdb = imdb
self.params = params
self.num_inst = len(self.imdb['data'])
self.num_rounds = len(self.imdb['data'][0]['question_ind'])
# load vocabulary
vocab_path = params['text_vocab_path']
self.vocab_dict = text_processing.VocabDict(vocab_path)
self.T_encoder = params['max_enc_len']
# record special token ids
self.start_token_id = self.vocab_dict.word2idx('<start>')
self.end_token_id = self.vocab_dict.word2idx('<end>')
self.pad_token_id = self.vocab_dict.word2idx('<pad>')
# Load answers
with open(params['args']['answer_list_path'], 'r') as file_id:
choices = [ii.strip('\n') for ii in file_id.readlines()]
self.num_choices = len(choices)
self.choices2ind = {ii: index for index, ii in enumerate(choices)}
self.ind2choices = {index: ii for index, ii in enumerate(choices)}
# peek one example to see whether answer and gt_layout are in the data
test_data = self.imdb['data'][0]
self.load_gt_layout = test_data.get('gt_layout_tokens', False)
if 'load_gt_layout' in params:
self.load_gt_layout = params['load_gt_layout']
if self.load_gt_layout:
self.T_decoder = params['max_dec_len']
self.assembler = params['assembler']
# load the mean of the images
load_path = params['path'].split('/')[:-1] + ['train_image_mean.npy']
load_path = '/'.join(load_path)
print('Loading training image stats from: ' + load_path)
img_stats = np.load(load_path)[()]
mean_img = img_stats['mean_img'].reshape([1, 1, -1])
std_img = img_stats['std_img'].reshape([1, 1, -1])
# read all the images
images = {}
print('Reading images..')
#TODO: Change this back!
for datum in progressbar(self.imdb['data'][::3]):
img_path = datum['image_path']
if img_path not in images:
cur_img = support.load_image(img_path)
cur_img = (cur_img - mean_img) / std_img
images[img_path] = cur_img
self.images = images
# get the shape from random image
for _, sample in self.images.items():
self.img_size = sample.shape
break
# convert to tokens
self.digitizer = lambda x: [self.vocab_dict.word2idx(w) for w in x]
# use history if needed by the program generator
self.use_history = self.params['generator'] == 'mem'
if self.use_history:
self._construct_history()
# if fact is to be used
if self.params['use_fact']:
self._construct_fact()
#--------------------------------------------------------------------------
def _construct_fact(self):
"""Method to construct facts.
Facts are previous question and answers strings concatenated as one. These
serve as memory units that the model can refer back to.
For example, 'Q: What is the man wearing? A: Sweater.' will have a fact
'What is the man wearing? Sweater.' so that the model can address follow-up
questions like 'What color is it?' by referring to this fact.
"""
print('Constructing facts..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder + 1 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
fact = np.zeros((num_diags, num_rounds, max_len))
fact_len = np.zeros((num_diags, num_rounds))
fact.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans = self.vocab_dict.word2idx(self.ind2choices[a_id])
# handle overflow
bound = min(q_len, max_len)
fact[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
fact[diag_id, r_id, bound] = ans
fact_len[diag_id, r_id] = bound + 1
# flatten
self.imdb['fact'] = fact
self.imdb['fact_len'] = fact_len
#--------------------------------------------------------------------------
def _construct_history(self):
"""Method to construct history, which concatenates entire dialogs so far.
"""
print('Constructing history..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder + 1 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
history = np.zeros((num_diags, num_rounds, max_len))
hist_len = np.zeros((num_diags, num_rounds))
history.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans = self.vocab_dict.word2idx(self.ind2choices[a_id])
# handle overflow
bound = min(q_len, max_len)
history[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
history[diag_id, r_id, bound] = ans
hist_len[diag_id, r_id] = bound + 1
self.imdb['hist'] = history
self.imdb['hist_len'] = hist_len
#--------------------------------------------------------------------------
def load_one_batch(self, sample_ids):
"""Load data given the sample ids.
"""
actual_batch_size = len(sample_ids)
batch = {}
eos_token = self.assembler.name2idx_dict['<eos>']
num_rounds = self.num_rounds
# questions
ques_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_ind']]
ques_batch = self.imdb['ques'][ques_inds][:, :self.T_encoder].transpose()
ques_len = self.imdb['ques_len'][ques_inds]
ques_ids = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_id']]
# answers
ans_inds_batch = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['answer_ind']]
image_path = [None] * actual_batch_size
# load fact
if self.params['use_fact']:
fact = self.imdb['fact'][sample_ids]
fact_len = self.imdb['fact_len'][sample_ids]
# flatten
fact = np.reshape(fact, [-1, fact.shape[-1]])
fact_len = np.reshape(fact_len, [-1])
else:
fact, fact_len = None, None
# programs
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder,
num_rounds * actual_batch_size), np.int32)
gt_layout_batch.fill(eos_token)
# if features are needed, load images
if 'prog' in self.params['model']:
image_feats = np.zeros((actual_batch_size,) + self.img_size, np.float32)
for n in range(len(sample_ids)):
iminfo = self.imdb['data'][sample_ids[n]]
image_path[n] = iminfo['image_path']
image_feats[n] = self.images[iminfo['image_path']]
# programs
if self.load_gt_layout:
# go over all the questions
for r_id, layout in enumerate(iminfo['gt_layout_tokens']):
split_layout = layout.split(' ')
gt_layout_batch[:, num_rounds * n + r_id] = \
self.assembler.module_list2tokens(split_layout,
self.T_decoder)
# if history is needed
if self.use_history:
history = self.imdb['hist'][sample_ids]
hist_len = self.imdb['hist_len'][sample_ids]
else:
history, hist_len = None, None
batch = {'ques': ques_batch, 'ques_len': ques_len,
'fact': fact, 'fact_len': fact_len,
'hist': history, 'hist_len': hist_len,
'ans_ind': ans_inds_batch,
'img_path': image_path, 'imgs': image_feats,
'ques_id': ques_ids, 'gt_layout': gt_layout_batch}
return batch
class DataReader:
"""Main dataloader class for experiments on Visual Dialog.
"""
def __init__(self, params):
imdb_path = params['path']
print('Loading imdb from: %s' % params['path'])
if imdb_path.endswith('.npy'): imdb = np.load(imdb_path)
else: raise TypeError('unknown imdb format.')
self.imdb = imdb[()]
self.shuffle = params.get('shuffle', True)
self.one_pass = params.get('one_pass', False)
self.prefetch_num = params.get('num_prefetch', 8)
self.params = params
copy_args = {'max_enc_len', 'max_dec_len', 'text_vocab_path', 'model',
'batch_size', 'use_fact', 'answer_list_path', 'generator'}
self.params.update({ii: params['args'][ii] for ii in copy_args
if ii in params['args'] and
params['args'][ii] is not None})
# MNIST data loader
self.batch_loader = BatchLoaderMNIST(self.imdb, self.params)
self.num_choices = self.batch_loader.num_choices
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty(): pass
#print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle,
one_pass, params):
num_samples = len(imdb['data'])
batch_size = params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
|
corefnmn-main
|
loader_mnist/data_reader.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
import pdb
import sys
import numpy as np
class HTML():
def __init__(self, cols, header_file='vis/jquery_header.html'):
self.template = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"+\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">' +\
'<html xmlns="http://www.w3.org/1999/xhtml"><head>'
self.template += '<style>'+\
'table#t01{width:100%; background-color:#fff}'\
+'table#t01 tr:nth-child(odd){background-color:#ddd;}'\
+'table#t01 tr:nth-child(even){background-color:#fff;}'\
+'table#t01 tr td tr:nth-child(odd){background-color:#ddd;}'\
+'table#t01 tr td tr:nth-child(even){background-color:#fff;}'\
+'table#t01 th{background-color:black;color:white}'+\
'</style>'
self.colors = ['maroon', 'red', 'purple', 'fuchsia',
'green', 'lime', 'olive', 'yellow',
'navy', 'blue', 'teal', 'aqua', 'orange']
with open(header_file, 'r') as file_id: self.template += file_id.read()
self.template += '</head><body><table id ="t01">'
self.end = '</table></body></html>'
self.content = ''
self.row_content = '<tr>'+'<td valign="top">%s</td>'*cols+'</tr>'
self.span_first_content = '<tr>'+'<td valign="top" rowspan="%s">%s</td>'+\
'<td valign="top">%s</td>' * (cols-1)+'</tr>'
self.span_other_content = '<tr>'+ '<td valign="top">%s</td>'*(cols-1)\
+'</tr>'
self.att_template = '<mark style="background-color:rgba(255,0,0,%f)"> %s </mark>|'
self.img_template = '<img src="%s" height="%d" width="%d"></img>'
# creating table
self.num_rows = None
self.num_cols = cols
# Add a new row
def add_spanning_row(self, mega_row, *entries):
# if first element is list, take it
if type(entries[0]) == list: entries = entries[0]
for index, ii in enumerate(entries):
if len(ii) != self.num_cols - 1:
print('Warning: Incompatible entries.\n_taking needed!')
if len(ii) < self.num_cols - 1: # add 'null'
for jj in range(self.num_cols - 1 - len(entries)):
entries[index].append('NULL')
num_rows = len(entries)
content = (num_rows, mega_row)+tuple(entries[0])
new_row = self.span_first_content % content
for ii in range(1, num_rows):
new_row += self.span_other_content % tuple(entries[ii])
# Add new_row to content
self.content += new_row
# Add a new row
def add_row(self, *entries):
# if first element is list, take it
if type(entries[0]) == list: entries = entries[0]
if len(entries) != self.num_cols:
print('Warning: Incompatible number of entries.\n_taking needed!')
if len(entries) < self.num_cols: # add 'null'
for ii in range(self.num_cols - len(entries)):
entries.append('NULL')
new_row = self.row_content % tuple(entries)
# Add new_row to content
self.content += new_row
# setting the title
def set_title(self, titles):
new_titles = []
for ii in titles: new_titles.append('<strong>%s</strong>' % ii)
self.add_row(new_titles)
# coloring text
def get_colored_text(self, text, group_id=None):
''' If group id is None, pick a random color '''
if group_id is None: color = self.colors[1]
else: color = self.colors[group_id % len(self.colors)]
return '<b><font color="%s">%s</font></b>' % (color, text)
# render and save page
def save_page(self, file_path):
# allow new page and tab space
self.content = self.content.replace('\n', '</br>')
self.content = self.content.replace('\t', ' '*10)
page_content = self.template + self.content + self.end
with open(file_path, 'w') as file_id: file_id.write(page_content)
print('Written page to: %s' % file_path)
# Return the string for an image
def link_image(self, img_path, caption=None, height=100):
# No caption provided
if caption == None: return self.img_template % (img_path, height, height)
string = 'Caption: %s</br>' % caption
return string + (self.img_template % (img_path, height, height))
# add table with question encoding
def add_question_attention(self, question, program, att):
table = '<table class="heat-map" id="heat-map-3"><thead><tr><th></th>'
row = ''.join(['<th>%s</th>' % ii for ii in program])
row += '</tr></thead><tbody>'
table += row
for ii in range(len(question)):
table += '<tr class="stats-row"><td class="stats-title">%s</td>'\
% question[ii]
table += ''.join(['<td>%2d</td>' % att[ii, jj] \
for jj in range(len(program))])
table += '</tr>'
table += '</tbody></table>'
return table
# add history attention
def add_history_attention(self, att_wt, att_labels = None):
num_ques = att_wt.size
if att_labels is None:
titles = ['Cap']
titles.extend(['%02d' % ii for ii in range(1, num_ques)])
else: titles = att_labels
max_att = np.max(att_wt)
string = ''
for ii in range(0, num_ques):
if ii % 6 == 0: string += '\n'
string += self.att_template % (att_wt[ii]/max_att, titles[ii])
return string
|
corefnmn-main
|
vis/html.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Visualizing the dialog output from the model.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
import argparse
import numpy as np
import sys
import h5py
import json
import os
from vis import html
from util import support
# PIL
from PIL import Image
import requests
from io import BytesIO
from util import support
from tqdm import tqdm as progressbar
from skimage import io, transform
def main(args):
titles = ['Image', 'Answers', 'Predictions', 'Modules', 'Attention']
# load the batch
data = np.load(args.batch_path)[()]
batch, outputs = data['batch'], data['output']
# load dictionary
with open(args.text_vocab_path, 'r') as file_id:
word2ind = {word.strip('\n'): ind
for ind, word in enumerate(file_id.readlines())}
ind2word = {ind: word for word, ind in word2ind.items()}
# get the program dictionary
with open(args.prog_vocab_path, 'r') as file_id:
word2ind_prog = {word.strip('\n'): ind
for ind, word in enumerate(file_id.readlines())}
ind2word_prog = {ind: word for word, ind in word2ind_prog.items()}
stringify = lambda vector: ' '.join([ind2word[w] for w in vector])
stringify_prog = lambda vector: ' '.join([ind2word_prog[w] for w in vector])
# Get html related info
page = html.HTML(len(titles))
page.set_title(titles)
template = 'Q%d: %s\nA [GT]: %s\nP [GT]: %s\nP: %s'
pred_template = 'GT Rank: %d\n_top-5: \n%s'
# saving intermediate outputs
end_prog_token = word2ind_prog['<eos>']
server_save = './attention/%d_%d_%d_%d.png'
local_save = os.path.join(args.image_save_root, 'attention/%d_%d_%d_%d.png')
# Create folders.
os.makedirs(args.image_save_root, exist_ok=True)
os.makedirs(os.path.join(args.image_save_root, 'attention'), exist_ok=True)
for ii in progressbar(range(args.num_examples)):
# Read image.
img_name = '/'.join(batch[ii]['img_path'][0].split('/')[-2:])
image = io.imread(os.path.join(args.image_load_root, img_name))
# Deal with black and white images.
if len(image.shape) < 3:
image = np.expand_dims(image, -1)
image = np.tile(image, [1, 1, 3])
# Caption.
if batch[ii]['cap_len'].ndim == 2:
cap_len = batch[ii]['cap_len'][0]
cap_string = stringify(batch[ii]['cap'][0, :cap_len])
else:
cap_len = batch[ii]['cap_len'][0]
cap_string = stringify(batch[ii]['cap'][0, :cap_len])
span_content = page.link_image('coco_images/' + img_name, cap_string, 400)
# decide length based on first appearance of 14 <eos>
if 'pred_tokens_cap' in outputs[ii]:
caption_prog = outputs[ii]['pred_tokens_cap']
prog_len = np.where(caption_prog[:, 0] == end_prog_token)[0][0]
cap_tokens = [ind2word[w] for w in batch[ii]['cap'][0, :cap_len]]
prog_tokens = [ind2word_prog[w] for w in caption_prog[:prog_len, 0]]
att = 100 * outputs[ii]['attention_cap'][:, :, 0, 0].transpose()
word_att_str = page.add_question_attention(cap_tokens, prog_tokens, att)
# caption module outputs
stack = outputs[ii]['intermediates'][0]
cap_stack = [datum for datum in stack if datum[0] == 'cap']
string = {'c_1':'', 'c_2':''}
for _, step, _, attention in cap_stack:
# reshape and renormalize
att = attention[:, :, 0]
att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (2, ii, 0, step), 'png')
# caption first row
string['c_1'] += page.link_image(server_save % (2, ii, 0, step))
att = attention[:, :, 0]
att_image = support.interpolate_attention(image, att)
#att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (3, ii, 0, step), 'png')
# caption second row
string['c_2'] += page.link_image(server_save % (3, ii, 0, step))
# add the neural module visualization for captions
span_content += '\n'.join(['', string['c_1'], string['c_2'], word_att_str])
ques_content = []
for jj in range(10):
row_content = []
# question
ques_len = batch[ii]['ques_len'][jj]
ques_string = stringify(batch[ii]['ques'][:ques_len, jj])
# answer
ans_len = batch[ii]['ans_len'][jj]
ans_in = stringify(batch[ii]['ans_in'][jj, :ans_len])
ans_out = stringify(batch[ii]['ans_out'][jj, :ans_len])
# program
gt_prog_str = stringify_prog(batch[ii]['gt_layout'][:, jj])
cur_prog = outputs[ii]['pred_tokens'][:, jj]
prog_pred = stringify_prog(outputs[ii]['pred_tokens'][:, jj])
print_slot = (jj, ques_string, ans_in, gt_prog_str, prog_pred)
row_content.append(template % print_slot)
# get predictions
sort_arg = np.argsort(outputs[ii]['scores'][jj])[::-1][:args.top_options]
gt_score = outputs[ii]['scores'][jj][batch[ii]['gt_ind'][jj]]
gt_rank = np.sum(outputs[ii]['scores'][jj] > gt_score) + 1
options = [stringify(batch[ii]['opt_in'][kk][jj]) for kk in sort_arg]
row_content.append(pred_template % (gt_rank, '\n'.join(options)))
# visualizing intermediate outputs for each question
stack = outputs[ii]['intermediates'][0]
ques_stack = [datum for datum in stack
if (datum[0] == 'ques') and (datum[2] == jj)]
string = {'q_1':'', 'q_2':''}
for _, step, _, attention in ques_stack:
# reshape and renormalize
att = attention[:, :, 0]
#att_image = support.interpolate_attention(image, att)
att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (0, ii, jj, step), 'png')
# string for first row
string['q_1'] += page.link_image(server_save % (0, ii, jj, step))
att = attention[:, :, 0]
att_image = support.interpolate_attention(image, att)
#att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (1, ii, jj, step), 'png')
# string for second row
string['q_2'] += page.link_image(server_save % (1, ii, jj, step))
# if refer module, add weights
if ind2word_prog[cur_prog[step]] == '_Refer':
wt_stack = outputs[ii]['intermediates'][1]
cur_wt = [datum for datum in wt_stack if datum[0] == jj]
assert (len(cur_wt) == 1), 'Weights over history do not sum to one'
wts = cur_wt[0][1]
wt_labels = cur_wt[0][2]
if len(wts) > 0:
string['q_1'] = page.add_history_attention(wts, wt_labels)
string['q_1'] += ('\n' + string['q_1'])
row_content.append('\n'.join(['', string['q_1'], string['q_2']]))
# decide length based on first appearance of 14 <eos>
ques_prog = outputs[ii]['pred_tokens'][:, jj]
prog_len = np.where(ques_prog == end_prog_token)[0][0]
ques_tokens = [ind2word[w] for w in batch[ii]['ques'][:ques_len, jj]]
prog_tokens = [ind2word_prog[w] for w in ques_prog[:prog_len]]
att = 100 * outputs[ii]['attention'][:, :, jj, 0].transpose()
string = page.add_question_attention(ques_tokens, prog_tokens, att)
row_content.append(string)
ques_content.append(row_content)
# Add the span row
page.add_spanning_row(span_content, ques_content)
# render page and save
page.save_page(args.save_path)
if __name__ == '__main__':
# read command line arguments
title = 'Visualizing dialog by creating a HTML page.'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--batch_path', default='logs/sample_run_batches.npy',
help='Path to batches saved by visualize_sl.py')
parser.add_argument('--text_vocab_path', default='data/vocab_vd.txt',
help='Text vocabulary to decode sentence outputs')
parser.add_argument('--prog_vocab_path', default='data/vocab_layout.txt',
help='Program vocabulary to decode program outputs')
parser.add_argument('--save_path', default='vis/sample_run_examples.html',
help='Save the HTML file that visualizes examples')
parser.add_argument('--image_load_root', default='vis/coco_images/',
help='Path to the COCO images')
parser.add_argument('--image_save_root', default='vis/images/',
help='Path to the images to load in HTML')
parser.add_argument('--num_examples', default=50, type=int,
help='Number of examples to visualize')
parser.add_argument('--top_options', default=5, type=int,
help='Number of top ranked options to show')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
vis/visualize_dialogs.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to decode and produce an answer.
Answer decoder for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import dropout
from tensorflow.contrib.layers import fully_connected as FC
from util import support
class AnswerDecoder:
def __init__(self, inputs, output_pool, params):
"""Initialize answer decoder.
Args:
inputs:
output_pool:
params:
"""
self.params = params
# keep track of inputs and outputs
used_inputs = []
outputs = {}
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
# decide the source based on train / evaluation
source = output_pool if params['train_mode'] else inputs
# a linear to number of choices
logits = FC(source['context'], params['num_choices'], activation_fn=None)
outputs['ans_logits'] = logits
# add program context vector, if not training
if not self.params['train_mode']:
used_inputs.append('context')
# softmax over the choices
answer_loss = criterion(logits=logits, labels=inputs['ans_ind'])
used_inputs.append('ans_ind')
outputs['ans_token_loss'] = tf.reduce_mean(answer_loss)
# setup the inputs and outputs
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#----------------------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#----------------------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None):
"""Produces the feed dict for this subcomponent.
Args:
batch: Batch returned from dataloader
output_pool: Outputs from previous subcomponents, mostly when evaluating
Returns:
feed_dict: Returns the feed dictionary
"""
feed_dict = {}
feed_dict[self.inputs['ans_ind']] = batch['ans_ind']
# if not training, use previous outputs, else inputs
if not self.params['train_mode']:
feed_dict[self.inputs['context']] = output_pool['context']
return feed_dict
|
corefnmn-main
|
models_mnist/decoder.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Add a reasonable description to the file.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_relu_layer as conv_relu
from tensorflow.contrib.layers import fully_connected as FC
from tensorflow.contrib.rnn import LSTMStateTuple
from util import support
def _get_valid_tokens(X, W, b):
constraints_validity = tf.greater_equal(tf.tensordot(X, W, axes=1) - b, 0)
token_validity = tf.reduce_all(constraints_validity, axis=2)
return tf.stop_gradient(token_validity)
#------------------------------------------------------------------------------
def _update_decoding_state(X, s, P):
X = X + tf.nn.embedding_lookup(P, s) # X = X + S P
return tf.stop_gradient(X)
#------------------------------------------------------------------------------
def _get_lstm_cell(num_layers, lstm_dim, apply_dropout):
if isinstance(lstm_dim, list): # Different layers have different dimensions
if not len(lstm_dim) == num_layers:
raise ValueError('the length of lstm_dim must be equal to num_layers')
cell_list = []
for l in range(num_layers):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim[l], state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout and l < num_layers-1:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list.append(dropout_cell)
else: # All layers has the same dimension.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list = [dropout_cell] * (num_layers-1) + [lstm_cell]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
#------------------------------------------------------------------------------
# Sequence to Sequence with attention
class AttSeq2Seq:
def __init__(self, holders, use_gt_prog, assembler, params, reuse=None):
self.T_decoder = params['max_dec_len']
self.encoder_num_vocab = params['text_vocab_size']
self.encoder_embed_dim = params['text_embed_size']
self.decoder_num_vocab = params['prog_vocab_size']
self.decoder_embed_dim = params['prog_embed_size']
self.lstm_dim = params['lstm_size']
self.num_layers = params['num_layers']
self.EOS_token = assembler.EOS_idx
self.embed_scope = params['embed_scope']
self.temperature = params.get('temperature', 1)
# if word vectors need to be used or lstm outputs for attention
params['use_word_vectors'] = 'wv-att' in params['model']
params['generator'] = params.get('generator', 'ques')
self.params = params
# decoding transition variables
self.P = to_T(assembler.P, dtype=tf.int32)
self.W = to_T(assembler.W, dtype=tf.int32)
self.b = to_T(assembler.b, dtype=tf.int32)
self.encoder_dropout = params['enc_dropout']
self.decoder_dropout = params['dec_dropout']
self.decoder_sampling = params['dec_sampling']
# detect fake inputs
if 'fake' in holders: scope = 'enc_dec_cap'
else: scope = 'enc_dec'
with tf.variable_scope(scope, reuse=reuse):
# build a special encoder, if needed
if 'fake' not in holders and params['generator'] == 'mem':
self._build_memory_encoder(holders)
else:
# build a normal encoder
self._build_encoder(holders['ques'], holders['ques_len'])
self._build_decoder(use_gt_prog, holders['prog_gt'])
# build a usual encoder, ques based
def _build_encoder(self, input_seq_batch, seq_len_batch, scope='encoder',
reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
with tf.variable_scope(scope, reuse=reuse):
#T = tf.shape(input_seq_batch)[0]
T = input_seq_batch.shape.as_list()[0]
N = tf.shape(input_seq_batch)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embedding_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embedded_seq = tf.nn.embedding_lookup(embedding_mat, input_seq_batch)
self.embedded_input_seq = embedded_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell, embedded_seq,
seq_len_batch,
dtype=tf.float32,
time_major=True,
scope='lstm')
self.encoder_outputs = encoder_outputs
self.encoder_states = encoder_states
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished has shape [T, N, 1], where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
seq_len_batch[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
# build a special encoder
def _build_memory_encoder(self, holders, scope='encoder', reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
input_seq = holders['ques']
input_seq_len = holders['ques_len']
# facts/memories
hist_size = holders['hist'].shape.as_list()
hist_flat = tf.reshape(holders['hist'], [-1, hist_size[2]])
hist_len_flat = tf.reshape(holders['hist_len'], [-1])
with tf.variable_scope(scope, reuse=reuse):
T = input_seq.shape.as_list()[0]
N = tf.shape(input_seq)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embed_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embed_seq = tf.nn.embedding_lookup(embed_mat, input_seq)
self.embedded_input_seq = embed_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
embed_seq, input_seq_len, dtype=tf.float32,
time_major=True, scope='lstm')
self.encoder_outputs = encoder_outputs
# batch first encoder outputs
batch_encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
ques_enc = support.last_relevant(batch_encoder_outputs, input_seq_len)
size = [-1, self.params['num_rounds'], self.params['lstm_size']]
ques_enc = tf.reshape(ques_enc, size)
self.encoder_states = encoder_states
# similarly encode history
hist_out = tf.nn.embedding_lookup(embed_mat, hist_flat)
# rnns to encode history
cell = tf.contrib.rnn.BasicLSTMCell(self.params['lstm_size'])
for ii in range(0, self.params['num_layers']):
# dynamic rnn
hist_out, states = tf.nn.dynamic_rnn(cell, hist_out, \
sequence_length=hist_len_flat, \
dtype=tf.float32, scope='hist_layer_%d' % ii)
# get output from last timestep
hist_enc = support.last_relevant(hist_out, hist_len_flat)
# reshape back
size = [-1, hist_size[1], self.params['lstm_size']]
hist_enc = tf.reshape(hist_enc, size)
# concatenate, mlp and tanh
num_r = self.params['num_rounds']
# dot product
attention = tf.matmul(ques_enc, hist_enc, transpose_b=True)
# a very small large number
u_mat = np.full((num_r, num_r), -1e10)
suppress_mat = tf.constant(np.triu(u_mat, 1), dtype=tf.float32)
l_mat = np.full((num_r, num_r), 1)
mask_mat = tf.constant(np.tril(l_mat), dtype=tf.float32)
attention = tf.nn.softmax(tf.multiply(attention, mask_mat)
+ suppress_mat)
self.att_history = attention
att_hist_enc = tf.matmul(attention, hist_enc)
# flatten out
size = [-1, self.params['lstm_size']]
att_hist_flat = tf.reshape(att_hist_enc, size)
# concatenate attended history and encoder state for the last layer
concat = tf.concat([encoder_states[-1].h, att_hist_flat], -1)
new_state = LSTMStateTuple(encoder_states[-1].c,
FC(concat, self.params['lstm_size']))
# make it mutable
encoder_states = list(encoder_states)
encoder_states[-1] = new_state
self.encoder_states = tuple(encoder_states)
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
input_seq_len[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
def _build_decoder(self, use_gt_layout, gt_layout_batch, scope='decoder',
reuse=None):
# The main difference from before is that the decoders now takes another
# input (the attention) when computing the next step
# T_max is the maximum length of decoded sequence (including <eos>)
#
# This function is for decoding only. It performs greedy search or sampling.
# the first input is <go> (its embedding vector) and the subsequent inputs
# are the outputs from previous time step
# num_vocab does not include <go>
#
# use_gt_layout is None or a bool tensor, and gt_layout_batch is a tenwor
# with shape [T_max, N].
# If use_gt_layout is not None, then when use_gt_layout is true, predict
# exactly the tokens in gt_layout_batch, regardless of actual probability.
# Otherwise, if sampling is True, sample from the token probability
# If sampling is False, do greedy decoding (beam size 1)
N = self.N
encoder_states = self.encoder_states
T_max = self.T_decoder
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.decoder_dropout
EOS_token = self.EOS_token
sampling = self.decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
embedding_mat = tf.get_variable('embedding_mat',
[self.decoder_num_vocab, self.decoder_embed_dim])
# we use a separate embedding for <go>, as it is only used in the
# beginning of the sequence
go_embedding = tf.get_variable('go_embedding', [1, self.decoder_embed_dim])
with tf.variable_scope('att_prediction'):
v = tf.get_variable('v', [lstm_dim])
W_a = tf.get_variable('weights', [lstm_dim, lstm_dim],
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable('biases', lstm_dim,
initializer=tf.constant_initializer(0.))
# The parameters to predict the next token
with tf.variable_scope('token_prediction'):
W_y = tf.get_variable('weights', [lstm_dim*2, self.decoder_num_vocab],
initializer=tf.contrib.layers.xavier_initializer())
b_y = tf.get_variable('biases', self.decoder_num_vocab,
initializer=tf.constant_initializer(0.))
# Attentional decoding
# Loop function is called at time t BEFORE the cell execution at time t,
# and its next_input is used as the input at time t (not t+1)
# c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
mask_range = tf.reshape(tf.range(self.decoder_num_vocab, dtype=tf.int32),
[1, -1])
if use_gt_layout is not None:
gt_layout_mult = tf.cast(use_gt_layout, tf.int32)
pred_layout_mult = 1 - gt_layout_mult
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
next_cell_state = encoder_states
next_input = tf.tile(go_embedding, to_T([N, 1]))
else: # time > 0
next_cell_state = cell_state
# compute the attention map over the input sequence
# a_raw has shape [T, N, 1]
att_raw = tf.reduce_sum(
tf.tanh(tf.nn.xw_plus_b(cell_output, W_a, b_a) +
self.encoder_h_transformed) * v,
axis=2, keep_dims=True)
# softmax along the first dimension (T) over not finished examples
# att has shape [T, N, 1]
att = tf.nn.softmax(att_raw, dim=0)*self.seq_not_finished
att = att / tf.reduce_sum(att + 1e-10, axis=0, keep_dims=True)
# d has shape [N, lstm_dim]
d2 = tf.reduce_sum(att*self.encoder_outputs, axis=0)
# token_scores has shape [N, num_vocab]
token_scores = tf.nn.xw_plus_b(
tf.concat([cell_output, d2], axis=1),
W_y, b_y)
decoding_state = loop_state[2]
# token_validity has shape [N, num_vocab]
token_validity = _get_valid_tokens(decoding_state, self.W, self.b)
token_validity.set_shape([None, self.decoder_num_vocab])
if use_gt_layout is not None:
# when there's ground-truth layout, do not re-normalize prob
# and treat all tokens as valid
token_validity = tf.logical_or(token_validity, use_gt_layout)
validity_mult = tf.cast(token_validity, tf.float32)
# predict the next token (behavior depending on parameters)
if sampling:
token_scores_valid = token_scores - (1-validity_mult) * 50
# TODO:debug
sampled_token = tf.cast(tf.reshape(
tf.multinomial(token_scores_valid/self.temperature, 1), [-1]), tf.int32)
# make sure that the predictions are ALWAYS valid
# (it can be invalid with very small prob)
# If not, just fall back to min cases
# pred_mask has shape [N, num_vocab]
sampled_mask = tf.equal(mask_range, tf.reshape(sampled_token, [-1, 1]))
is_sampled_valid = tf.reduce_any(
tf.logical_and(sampled_mask, token_validity),
axis=1)
# Fall back to max score (no sampling)
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
max_score_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
predicted_token = tf.where(is_sampled_valid, sampled_token, max_score_token)
else:
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
# predicted_token has shape [N]
predicted_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
if use_gt_layout is not None:
predicted_token = (gt_layout_batch[time-1] * gt_layout_mult
+ predicted_token * pred_layout_mult)
# a robust version of softmax
# all_token_probs has shape [N, num_vocab]
all_token_probs = tf.nn.softmax(token_scores) * validity_mult
# tf.check_numerics(all_token_probs, 'NaN/Inf before div')
all_token_probs = all_token_probs / tf.reduce_sum(all_token_probs + 1e-10, axis=1, keep_dims=True)
# tf.check_numerics(all_token_probs, 'NaN/Inf after div')
# mask has shape [N, num_vocab]
mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1]))
# token_prob has shape [N], the probability of the predicted token
# although token_prob is not needed for predicting the next token
# it is needed in output (for policy gradient training)
# [N, num_vocab]
token_prob = tf.reduce_sum(all_token_probs * tf.cast(mask, tf.float32), axis=1)
# tf.assert_positive(token_prob)
neg_entropy = tf.reduce_sum(
all_token_probs * tf.log(all_token_probs + (1-validity_mult) + 1e-10),
axis=1)
# update states
updated_decoding_state = _update_decoding_state(
decoding_state, predicted_token, self.P)
# the prediction is from the cell output of the last step
# timestep (t-1), feed it as input into timestep t
next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token)
elements_finished = tf.greater_equal(time, T_max)
# loop_state is a 5-tuple, representing
# 1) the predicted_tokens
# 2) the prob of predicted_tokens
# 3) the decoding state (used for validity)
# 4) the negative entropy of policy (accumulated across timesteps)
# 5) the attention
if loop_state is None: # time == 0
# Write the predicted token into the output
predicted_token_array = tf.TensorArray(dtype=tf.int32, size=T_max,
infer_shape=False)
token_prob_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
init_decoding_state = tf.tile(to_T([[0, 0, T_max]], dtype=tf.int32), to_T([N, 1]))
att_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
next_loop_state = (predicted_token_array,
token_prob_array,
init_decoding_state,
tf.zeros(to_T([N]), dtype=tf.float32),
att_array)
else: # time > 0
t_write = time-1
next_loop_state = (loop_state[0].write(t_write, predicted_token),
loop_state[1].write(t_write, token_prob),
updated_decoding_state,
loop_state[3] + neg_entropy,
loop_state[4].write(t_write, att))
return (elements_finished, next_input, next_cell_state, cell_output,
next_loop_state)
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
_, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm')
predicted_tokens = decodes_ta[0].stack()
token_probs = decodes_ta[1].stack()
neg_entropy = decodes_ta[3]
# atts has shape [T_decoder, T_encoder, N, 1]
atts = decodes_ta[4].stack()
# static dimension recast
atts = tf.reshape(atts, [self.T_decoder, self.T_encoder, -1, 1])
self.atts = atts
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(atts*self.embedded_input_seq, axis=1)
predicted_tokens.set_shape([None, None])
token_probs.set_shape([None, None])
neg_entropy.set_shape([None])
#word_vecs.set_shape([None, None, self.encoder_embed_dim])
# static shapes
word_vecs.set_shape([self.T_decoder, None, self.encoder_embed_dim])
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.neg_entropy = neg_entropy
self.word_vecs = word_vecs
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/generator_attnet.py
|
corefnmn-main
|
models_mnist/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Write a description about what this file contains and what
it does.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_mnist import modules as lm
# the number of attention input to each module
_module_input_num = {
'_Find': 0,
'_Refer': 0,
'_Exclude': 0,
'_Transform': 1,
'_Exist': 1,
'_Count': 1,
'_And': 2,
'_Diff': 2,
'_Not': 1,
'_Describe': 1
}
# output type of each module
_module_output_type = {
'_Find': 'att',
'_Refer': 'att',
'_Exclude': 'att',
'_Exist': 'ans',
'_Count': 'ans',
'_Transform': 'att',
'_And': 'att',
'_Diff': 'att',
'_Not': 'att',
'_Describe': 'ans'
}
INVALID_EXPR = 'INVALID_EXPR'
# decoding validity: maintaining a state x of [#att, #ans, T_remain]
# when T_remain is T_decoder when decoding the first module token
# a token s can be predicted iff all(<x, w_s> - b_s >= 0)
# the validity token list is
# XW - b >= 0
# the state transition matrix is P, so the state update is X += S P,
# where S is the predicted tokens (one-hot vectors)
def _build_validity_mats(module_names):
state_size = 3
num_vocab_nmn = len(module_names)
num_constraints = 4
P = np.zeros((num_vocab_nmn, state_size), np.int32)
W = np.zeros((state_size, num_vocab_nmn, num_constraints), np.int32)
b = np.zeros((num_vocab_nmn, num_constraints), np.int32)
# collect the input and output numbers of each module
att_in_nums = np.zeros(num_vocab_nmn)
att_out_nums = np.zeros(num_vocab_nmn)
ans_out_nums = np.zeros(num_vocab_nmn)
for n_s, s in enumerate(module_names):
if s != '<eos>':
att_in_nums[n_s] = _module_input_num[s]
att_out_nums[n_s] = _module_output_type[s] == 'att'
ans_out_nums[n_s] = _module_output_type[s] == 'ans'
# construct the trasition matrix P
for n_s, s in enumerate(module_names):
P[n_s, 0] = att_out_nums[n_s] - att_in_nums[n_s]
P[n_s, 1] = ans_out_nums[n_s]
P[n_s, 2] = -1
# construct the validity W and b
att_absorb_nums = (att_in_nums - att_out_nums)
max_att_absorb_nonans = np.max(att_absorb_nums * (ans_out_nums == 0))
max_att_absorb_ans = np.max(att_absorb_nums * (ans_out_nums != 0))
for n_s, s in enumerate(module_names):
if s != '<eos>':
# constraint: a non-<eos> module can be outputted iff all the following
# hold:
# * 0) there's enough att in the stack
# #att >= att_in_nums[n_s]
W[0, n_s, 0] = 1
b[n_s, 0] = att_in_nums[n_s]
# * 1) for answer modules, there's no extra att in the stack
# #att <= att_in_nums[n_s]
# -#att >= -att_in_nums[n_s]
# for non-answer modules, T_remain >= 3
# (the last two has to be AnswerType and <eos>)
if ans_out_nums[n_s] != 0:
W[0, n_s, 1] = -1
b[n_s, 1] = -att_in_nums[n_s]
else:
W[2, n_s, 1] = 1
b[n_s, 1] = 3
# * 2) there's no answer in the stack (otherwise <eos> only)
# #ans <= 0
# -#ans >= 0
W[1, n_s, 2] = -1
# * 3) there's enough time to consume the all attentions, output answer
# plus <eos>
# 3.1) for non-answer modules, we already have T_remain>= 3 from
# constraint 2
# In maximum (T_remain-3) further steps
# (plus 3 steps for this, ans, <eos>) to consume atts
# (T_remain-3) * max_att_absorb_nonans + max_att_absorb_ans +
# att_absorb_nums[n_s] >= #att
# T_remain*MANA - #att >= 3*MANA - MAA - A[s]
# - #att + MANA * T_remain >= 3*MANA - MAA - A[s]
# 3.2) for answer modules, if it can be decoded then constraint 0&1
# ensures that there'll be no att left in stack after decoding
# this answer, hence no further constraints here
if ans_out_nums[n_s] == 0:
W[0, n_s, 3] = -1
W[2, n_s, 3] = max_att_absorb_nonans
b[n_s, 3] = (3 * max_att_absorb_nonans - max_att_absorb_ans -
att_absorb_nums[n_s])
else: # <eos>-case
# constraint: a <eos> token can be outputted iff all the following holds
# * 0) there's ans in the stack
# #ans >= 1
W[1, n_s, 0] = 1
b[n_s, 0] = 1
return P, W, b
#------------------------------------------------------------------------------
class Assembler:
def __init__(self, module_vocab_file):
# read the module list, and record the index of each module and <eos>
with open(module_vocab_file) as f:
self.module_names = [s.strip() for s in f.readlines()]
# find the index of <eos>
for n_s in range(len(self.module_names)):
if self.module_names[n_s] == '<eos>':
self.EOS_idx = n_s
break
# build a dictionary from module name to token index
self.name2idx_dict = {name: n_s for n_s, name in enumerate(self.module_names)}
self.num_vocab_nmn = len(self.module_names)
self.P, self.W, self.b = _build_validity_mats(self.module_names)
def module_list2tokens(self, module_list, T=None):
layout_tokens = [self.name2idx_dict[name] for name in module_list]
if T is not None:
if len(module_list) >= T:
raise ValueError('Not enough time steps to add <eos>')
layout_tokens += [self.EOS_idx]*(T-len(module_list))
return layout_tokens
def _layout_tokens2str(self, layout_tokens):
return ' '.join([self.module_names[idx] for idx in layout_tokens])
def assemble_refer(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
logits = []
for find_arg in reuse_stack:
# compute the weights for each of the attention map
inputs = (text_att, find_arg[1], round_id, find_arg[2])
logits.append(weaver.align_text(*inputs))
# exponential each logit
weights = []
for ii in logits: weights.append(weaver.exp(ii))
# normalize the weights
if len(weights) < 2:
norm = weights[0]
else:
norm = weaver.add(weights[0], weights[1])
for ii in weights[2:]: norm = weaver.add(norm, ii)
for index, ii in enumerate(weights):
weights[index] = weaver.divide(ii, norm)
# multiply the attention with softmax weight
prev_att = []
for (att, _, _, _, _), weight in zip(reuse_stack, weights):
prev_att.append(weaver.weight_attention(att, weight))
# add all attentions to get the result
if len(prev_att) < 2: out = prev_att[0]
else:
out = weaver.add_attention(prev_att[0], prev_att[1])
for ii in prev_att[2:]:
out = weaver.add_attention(out, ii)
return out, weights, logits
def assemble_exclude(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
weights = []
exclude_att = reuse_stack[0][0]
if len(reuse_stack) > 1:
for find_arg in reuse_stack:
exclude_att = weaver.max_attention(exclude_att, find_arg[0])
return weaver.normalize_exclude(exclude_att)
# code to check if the program makes sense
# typically contains all the checks from the _assemble_program method
def sanity_check_program(self, layout):
decode_stack = []
for t_id, cur_op_id in enumerate(layout):
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
return False, 'Insufficient inputs'
# read the inputs
inputs = []
for ii in range(num_inputs):
arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
return False, 'Intermediate not attention'
decode_stack.append(_module_output_type[cur_op_name])
# Check if only one element is left
if len(decode_stack) != 1:
return False, 'Left with more than one outputs'
# final output is not answer type
elif decode_stack[0] != 'ans':
return False, 'Final output not an answer'
return True, 'Valid program'
def assemble(self, layout_tokens, executor, visualize=False):
# layout_tokens_batch is a numpy array with shape [T, N],
# containing module tokens and <eos>, in Reverse Polish Notation.
# internalize executor and weaver
self.executor = executor
# build a weaver
weaver = executor.create_weaver()
self.weaver = weaver
# visualize flag
self.visualize = visualize
# get extent of layout tokens
max_time, batch_size = layout_tokens['ques'].shape
num_rounds = executor.params['num_rounds']
batch_size = batch_size // num_rounds
outputs = []
reuse = [None] * batch_size
ques_invalid_prog = []
# program on questions and captions, if needed
ques_tokens = layout_tokens['ques']
for b_id in range(batch_size):
image = weaver.batch_input(executor._loom_types['image'], b_id)
if executor.params['use_fact']:
fact = weaver.batch_input(executor._loom_types['fact'], b_id)
else: fact = None
# Now run program on questions
text = weaver.batch_input(executor._loom_types['text'], b_id)
text_feat = weaver.batch_input(executor._loom_types['text_feat'], b_id)
# collect root node outputs for down the rounds
# tuples are immutable, recreate to ensure caption is round 0
round_zero = weaver.batch_input(executor._loom_types['round'], 0)
tokens = ques_tokens[:, num_rounds*b_id : num_rounds*(b_id+1)]
inputs = (image, text, fact, text_feat, tokens, [])
out, _, invalid_prog = self._assemble_program(*inputs)
ques_invalid_prog.extend(invalid_prog)
outputs.extend(out['comp'])
if visualize:
outputs.extend([ii for ii, _ in out['vis']['att']])
outputs.extend(out['vis']['weights'])
invalid_prog = {'ques': ques_invalid_prog}
return weaver, outputs, invalid_prog
def _assemble_program(self, image, text, fact, text_feat, tokens, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# get extent of layout tokens
max_time, batch_size = tokens.shape
num_rounds = executor.params['num_rounds']
outputs = []
validity = []
# for visualizing internal nodes
vis_outputs = {'att': [], 'weights': []}
for r_id in range(num_rounds):
layout = tokens[:, r_id]
invalid_prog = False
round_id = weaver.batch_input(executor._loom_types['round'], r_id)
if fact is not None: fact_slice = weaver.slice_fact(fact, round_id)
# valid layout must contain <eos>. Assembly fails if it doesn't.
if not np.any(layout == self.EOS_idx): invalid_prog = True
decode_stack = []
penult_out = None # penultimate output
for t_id in range(len(layout)):
weights = None
time = weaver.batch_input(executor._loom_types['time'], t_id)
text_att = weaver.slice_text(text, round_id, time)
# slice the text feature
text_feat_slice = weaver.slice_text_feat(text_feat, round_id, time)
cur_op_id = layout[t_id]
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
invalid_prog = True
break
# read the inputs
inputs = []
for ii in range(num_inputs):
arg, arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
invalid_prog = True
break
inputs.append(arg)
# switch cases
if cur_op_name == '_Find':
out = weaver.find(image, text_att)
elif cur_op_name == '_Refer':
# nothing to refer to, wrong program
if len(reuse_stack) == 0:
invalid_prog = True
break
# if baseline is in the model, take the last output
if 'baseline' in self.executor.params['model']:
out = reuse_stack[-1][0]
else:
inputs = (text_feat_slice, round_id, reuse_stack)
out, weights, logits = self.assemble_refer(*inputs)
elif cur_op_name == '_Exclude':
# clean up reuse stack to avoid current finds
neat_stack = reuse_stack.copy()
for prev_time in range(t_id - 1, 0, -1):
if neat_stack[-1][-2] == prev_time: neat_stack.pop(-1)
# nothing to exclude to, wrong program
if len(neat_stack) == 0:
invalid_prog = True
break
inputs = (text_att, round_id, neat_stack)
out = self.assemble_exclude(*inputs)
# collect in reuse stack
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
elif cur_op_name == '_Transform':
out = weaver.transform(inputs[0], image, text_att)
elif cur_op_name == '_Describe':
out = weaver.describe(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_Exist':
out = weaver.exist(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_Count':
out = weaver.count(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_And':
out = weaver.and_op(inputs[0], inputs[1])
elif cur_op_name == '_Diff':
out = weaver.diff_op(inputs[0], inputs[1])
# just invert the attention
elif cur_op_name == '_Not':
out = weaver.normalize_exclude(inputs[0])
else:
print('Current operand not defined: ' + cur_op_name)
invalid_prog = True
# collect outputs from all modules (visualize)
if self.visualize:
if _module_output_type[cur_op_name] == 'att':
vis_outputs['att'].append((out, r_id))
if weights is not None:
vis_outputs['weights'].extend(weights)
decode_stack.append((out, _module_output_type[cur_op_name]))
# Check if only one element is left
if len(decode_stack) != 1: invalid_prog = True
# final output is not answer type
elif decode_stack[0][1] != 'ans': invalid_prog = True
# record program validity
validity.append(invalid_prog)
# if program is invalid, return zeros
if invalid_prog: outputs.append(weaver.invalid(image))
else:
outputs.append(decode_stack[-1][0])
# if fact is to be used, take the penultimate output
if executor.params['use_fact']:
reuse_stack.append((penult_out, fact_slice, round_id, r_id, -1))
return {'comp': outputs, 'vis': vis_outputs}, reuse_stack, validity
|
corefnmn-main
|
models_mnist/assembler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main CorefNMN model class.
Explicit visual coreference resolution in visual dialog using neural module
networks. Takes parameters and assemblers as input.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from models_mnist.generator import ProgramGenerator
from models_mnist.executor import ProgramExecutor
from models_mnist.decoder import AnswerDecoder
from util import support
class CorefNMN:
def __init__(self, params, assemblers, reuse=None):
# train mode
params['train_mode'] = 'test_split' not in params
print('Building model with train_model as: ' + str(params['train_mode']))
self.params = params
self.assemblers = assemblers
# module phases
self.phases = ['generate_program', 'execute_program', 'generate_answer']
# initializing input and output placeholders
self.inputs = {ii: {} for ii in self.phases}
self.outputs = self.inputs.copy()
# build place holders for inputs and outputs in the tensorflow graph
holders = self._build_placeholders(params)
self.holders = holders
with tf.variable_scope(params['model'], reuse=reuse):
# keep track of all outputs
output_pool = {}
# Part 1: Seq2seq RNN to generate module layout tokens
with tf.variable_scope('generate_program'):
self.generator = ProgramGenerator(holders, assemblers['ques'], params)
self.inputs['generate_program'] = self.generator.get_inputs()
self.outputs['generate_program'] = self.generator.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['generate_program'])
# Part 2: Neural Module Network
with tf.variable_scope('execute_program'):
self.executor = ProgramExecutor(holders, output_pool,
assemblers['copy'], params)
self.inputs['execute_program'] = self.executor.get_inputs()
self.outputs['execute_program'] = self.executor.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['execute_program'])
# Part 3: Seq2Seq decoding of the answer
with tf.variable_scope('generate_answer'):
self.decoder = AnswerDecoder(holders, output_pool, params)
self.inputs['generate_answer'] = self.decoder.get_inputs()
self.outputs['generate_answer'] = self.decoder.get_outputs()
# pool up all the outputs
pooled_dict = []
outputs = self.outputs.copy()
for ii in outputs:
pooled_dict += outputs[ii].items()
self.pooled_outputs = dict(pooled_dict)
self.run_outputs = [ii for _, jj in self.outputs.items()
for _, ii in jj.items()]
self.run_inputs = list(set([ii for _, jj in self.inputs.items()
for _, ii in jj.items()]))
# add additional input tensorflow fold
if 'prog' in params['model']:
self.run_inputs.append(self.executor._loom._loom_input_tensor)
#---------------------------------------------------------------------------
def _build_placeholders(self, params):
inputs = {}
# Phase 1 - program generation
size = [params['max_enc_len'], None]
inputs['ques'] = tf.placeholder(tf.int32, size, 'ques')
inputs['ques_len'] = tf.placeholder(tf.int32, [None], 'ques_len')
inputs['prog_gt'] = tf.placeholder(tf.int32, [None, None], 'prog')
# place holders for fact
size = [None, params['max_enc_len'] + 1]
inputs['fact'] = tf.placeholder(tf.int32, size, 'fact')
inputs['fact_len'] = tf.placeholder(tf.int32, [None], 'fact_len')
# tie encoder and decoder
size = [params['num_layers'], None, params['lstm_size']]
inputs['enc_dec_h'] = tf.placeholder(tf.float32, size, 'enc_dec_h')
inputs['enc_dec_c'] = tf.placeholder(tf.float32, size, 'enc_dec_c')
# Phase 2 - program execution
size = [None, 112, 112, 3]
inputs['image'] = tf.placeholder(tf.float32, size, 'image')
inputs['prog_validity'] = tf.placeholder(tf.bool, [None])
# for the answer indices
inputs['ans_ind'] = tf.placeholder(tf.int32, [None], 'ans_ind')
# history
size = [None, params['num_rounds'], params['max_enc_len'] + 1]
inputs['hist'] = tf.placeholder(tf.int32, size, 'history')
size = [None, params['num_rounds']]
inputs['hist_len'] = tf.placeholder(tf.int32, size, 'hist_len')
if not self.params['train_mode']:
# additional placeholders during evaluation
size = [None, params['lstm_size']]
inputs['context'] = tf.placeholder(tf.float32, size, 'context')
size = [None, None, None, params['lstm_size']]
inputs['ques_enc'] = tf.placeholder(tf.float32, size, 'ques_enc')
size = [None, params['lstm_size']]
inputs['hist_enc'] = tf.placeholder(tf.float32, size, 'hist_enc')
size = [params['max_dec_len'], None, params['text_embed_size']]
inputs['ques_attended'] = tf.placeholder(tf.float32, size, 'ques_att')
return inputs
#---------------------------------------------------------------------------
# method to initialize training related attributes
def setup_training(self):
# answer prediction loss
total_loss = self.outputs['generate_answer']['ans_token_loss']
# supervised sequence prediction loss
total_loss += self.outputs['generate_program']['prog_pred_loss']
# add the total loss to the list of outputs
self.pooled_outputs['total_loss'] = total_loss
self.total_loss = total_loss
self.run_outputs.append(self.total_loss)
# setters and getters
def get_total_loss(self):
return self.total_loss
# return self.pooled_outputs['total_loss']
def set_train_step(self, step):
if hasattr(self, 'train_step'):
self.train_step.append(step)
else:
self.train_step = [step]
self.run_outputs.append(step)
def add_solver_op(self, op):
self.pooled_outputs['solver'] = op
#---------------------------------------------------------------------------
def run_train_iteration(self, batch, sess):
iter_loss = {}
h = sess.partial_run_setup(self.run_outputs, self.run_inputs)
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.partial_run(h, self.outputs['generate_program'], feeder)
iter_loss['prog'] = output['prog_pred_loss'] # record loss
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.partial_run(h, self.outputs['execute_program'], feeder))
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.partial_run(h, self.outputs['generate_answer'], feeder))
iter_loss['ans'] = output['ans_token_loss'] # record loss
# End: perform the gradient steps
output = sess.partial_run(h, self.train_step + [self.total_loss])
iter_loss['total'] = output[-1] # record loss
return iter_loss, None
#---------------------------------------------------------------------------
def run_train_iteration_legacy(self, batch, sess):
iter_loss = {}
# collect feeds from all subcomponents
feeder = self.generator.produce_feed_dict(batch)
feeder.update(self.executor.produce_feed_dict(batch))
feeder.update(self.decoder.produce_feed_dict(batch))
# run all subcomponents together
output = sess.run(self.pooled_outputs, feed_dict=feeder)
# record all the loss values
iter_loss['prog'] = output['prog_pred_loss']
iter_loss['ans'] = output['ans_token_loss']
iter_loss['total'] = output['total_loss']
return iter_loss, None
#---------------------------------------------------------------------------
def run_evaluate_iteration(self, batch, sess, eval_options=True):
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.run(self.outputs['generate_program'], feed_dict=feeder)
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['execute_program'], feed_dict=feeder))
if 'pred_tokens' in output:
prog_matches = []
prog_matches.append(batch['gt_layout'] == output['pred_tokens'])
output['matches'] = prog_matches
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
# use the logits and get the prediction
matches = np.argmax(output['ans_logits'], 1) == batch['ans_ind']
return matches, output
#---------------------------------------------------------------------------
def run_visualize_iteration(self, batch, sess, eval_options=True):
output = batch.copy()
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output.update(sess.run(self.outputs['generate_program'], feeder))
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output, True)
output.update(sess.run(self.outputs['execute_program'], feeder))
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
# segregate weights and attention maps
output['intermediates'] = self.executor.segregrate_outputs(output)
return None, output
#-------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/model.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to generate programs for questions and captions.
Program generator for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
This subcomponent uses memory network augmentation to figure out if an entity
has been seen before and/or if it needs resolution using history.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from models_mnist.generator_attnet import AttSeq2Seq
from util import support
# alias
linear = tf.contrib.layers.fully_connected
# behavior based on type of model
class ProgramGenerator:
def __init__(self, inputs, assembler, params):
"""Initialize program generator.
Args:
inputs:
assembler:
params:
"""
self.params = params
outputs = {}
used_inputs = []
# create embedding matrix
with tf.variable_scope('embed', reuse=None) as embed_scope:
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat', size)
# remember the scope for further use
params['embed_scope'] = embed_scope
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
#--------------------------------------------------------
# if program is to be predicted
if 'prog' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
self.rnn = AttSeq2Seq(inputs, use_gt_prog, assembler, params)
# if memory based generator is used
if params['generator'] == 'mem':
used_inputs.extend(['hist', 'hist_len'])
outputs['encoder_output'] = self.rnn.encoder_outputs
outputs['pred_tokens'] = self.rnn.predicted_tokens
outputs['neg_entropy'] = tf.reduce_mean(self.rnn.neg_entropy)
# check if attHistory exists
if hasattr(self.rnn, 'att_history'):
outputs['att_history'] = self.rnn.att_history
# also add the encoder states (based on the flag)
concat_list = [ii.h for ii in self.rnn.encoder_states]
outputs['enc_dec_h'] = tf.stack(concat_list)
concat_list = [ii.c for ii in self.rnn.encoder_states]
outputs['enc_dec_c'] = tf.stack(concat_list)
# alias
attention = self.rnn.atts
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn.embedded_input_seq, axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention'] = attention
outputs['ques_attended'] = word_vecs
#outputs['ques_attended'] = self.rnn.word_vecs
# log probability of each generated sequence
outputs['log_seq_prob'] = tf.reduce_sum(
tf.log(self.rnn.token_probs + 1e-10), axis=0)
outputs['ques_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob'])
q_output = tf.transpose(self.rnn.encoder_outputs, perm=[1, 0, 2])
q_output = support.last_relevant(q_output, inputs['ques_len'])
# bloat the first two dimensions
q_output = tf.expand_dims(q_output, axis=0)
outputs['ques_enc'] = tf.expand_dims(q_output, axis=0)
# keep track of inputs actually used
used_inputs.extend(['ques', 'ques_len', 'prog_gt'])
#------------------------------------------------------------------
#------------------------------------------------------------------
# setup the inputs and outputs
# should have at least one loss
total_loss = outputs.get('ques_prog_loss', tf.constant(0.0))
outputs['prog_pred_loss'] = outputs['ques_prog_loss']
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, prev_output=None):
feed_dict = {}
feed_dict[self.inputs['ques']] = batch['ques']
feed_dict[self.inputs['ques_len']] = batch['ques_len']
# add program
if 'prog' in self.params['model']:
feed_dict[self.inputs['prog_gt']] = batch['gt_layout']
# add history
if self.params['generator'] == 'mem':
feed_dict[self.inputs['hist']] = batch['hist']
feed_dict[self.inputs['hist_len']] = batch['hist_len']
return feed_dict
|
corefnmn-main
|
models_mnist/generator.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Module definitions for Loom API.
Explicit visual coreference resolution in visual dialog using neural module
networks. Neural module definitions.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow_fold.public import loom
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
def add_spatial_coord_map(image_feat_grid):
image_feat_shape = tf.shape(image_feat_grid)
N = image_feat_shape[0]
# static dimensions
#H = image_feat_shape[1]
#W = image_feat_shape[2]
H, W = image_feat_grid.shape.as_list()[1:3]
x_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., W), [1, 1, -1, 1]),
to_T([N, H, 1, 1]))
y_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., H), [1, -1, 1, 1]),
to_T([N, 1, W, 1]))
# stop gradient on coords_map (needed to fix the tile grad error on TF 1.0.0)
coords_map = tf.stop_gradient(tf.concat([x_map, y_map], axis=3))
image_feat_with_coords = tf.concat([image_feat_grid, coords_map], axis=3)
# set shapes of the new feature maps
image_feat_static_shape = image_feat_grid.get_shape().as_list()
image_feat_static_shape[3] += 2
image_feat_with_coords.set_shape(image_feat_static_shape)
image_feat_static_shape[3] = 2
coords_map.set_shape(image_feat_static_shape)
return image_feat_with_coords, coords_map
#------------------------------------------------------------------------------
# Simple tensorflow ops as loom ops
class BinaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(BinaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
arg1, arg2 = inputs
return [self._op(arg1, arg2)]
#------------------------------------------------------------------------------
class UnaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(UnaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, arg):
return [self._op(arg[0])]
#------------------------------------------------------------------------------
# slice text attention
class SliceTextLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
text, round_id, time = inputs
round_squeeze = tf.squeeze(round_id, -1)
time_squeeze = tf.squeeze(time, -1)
# select the right round
shape = text.shape.as_list()
B = tf.shape(text)[0]
num_rounds, T, text_dim = shape[1], shape[2], shape[3]
indices = round_squeeze + num_rounds * tf.range(B)
# flatten
result = tf.gather(tf.reshape(text, [-1, T, text_dim]), indices)
# select the right time
indices = time_squeeze + T * tf.range(B)
# flatten
result = tf.gather(tf.reshape(result, [-1, text_dim]), indices)
return [result]
#------------------------------------------------------------------------------
# slice answer embeddding
class SliceAnswerLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceAnswerLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
answer, round_id = inputs
round_squeeze = tf.squeeze(round_id, -1)
# select the right round
shape = answer.shape.as_list()
B = tf.shape(answer)[0]
num_rounds, text_dim = shape[1], shape[2]
indices = round_squeeze + num_rounds * tf.range(B)
result = tf.gather(tf.reshape(answer, [-1, text_dim]), indices)
return [result]
#--------------------------------------------------------------------
# attention weighting
class AttentionWeightLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(AttentionWeightLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
vis_att, scalar = inputs
# simple weighting
scalar = tf.expand_dims(tf.expand_dims(scalar, -1), -1)
att_grid = tf.multiply(vis_att, scalar)
return [att_grid]
#--------------------------------------------------------------------
# identity op to convert types
class IdentityLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(IdentityLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
return inputs
#--------------------------------------------------------------------
# normalize and complementary attention
class NormalizeExcludeLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(NormalizeExcludeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
att_grid = inputs[0]
# complement the attention
max_entry = tf.reduce_max(tf.reduce_max(att_grid, 1), 1)
max_entry = tf.expand_dims(tf.expand_dims(max_entry, 1), 1)
att_grid = att_grid / max_entry
att_grid = 1 - att_grid
# normalize
norms = tf.reduce_sum(tf.reduce_sum(att_grid, 1), 1)
norms = tf.expand_dims(tf.expand_dims(norms, 1), 1)
# cutoff
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#-------------------------------------------------------------------
class AlignTextLoomOp(loom.LoomOp):
"""
Takes in two text attention and computes the alignment between them
Mapping: text_param x text_param -> scalar
Input:
text_param: [N, D_txt]
text_param: [N, D_txt]
Output:
scalar: [N, 1]
Implementation:
Parameters typically contain:
map_dim = 1024
module_scope = alignTextOp
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'alignTextOp')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AlignTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
text_att1, text_att2, round_id1, round_id2 = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att1.shape.as_list()[-1]
map_dim = self._params['map_dim']
embed_dim = self._params['text_embed_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# concat both text attentions, along with round diff (if need be)
concat_list = [text_att1, text_att2]
# additional weight for the distance to the past
if self._params['amalgam_text_feats']:
round_diff = tf.cast(round_id1 - round_id2, tf.float32)
concat_list.append(round_diff)
concat = tf.concat(concat_list, axis=-1)
# deeper 2 layer align network
weights = tf.contrib.layers.fully_connected(concat, embed_dim)
weights = tf.contrib.layers.fully_connected(weights, 1,
activation_fn=None)
return [weights]
#--------------------------------------------------------------------
# Modules as Loom Ops
class FindLoomOp(loom.LoomOp):
"""
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Elementwise multiplication between image_feat_grid and text_param
2. L2-normalization
3. Linear classification
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'find_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(FindLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
class AndLoomOp(loom.LoomOp):
"""
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'and_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AndLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.minimum(input1, input2)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class CountLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> answer probs
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, self.num_choices]
Implementation:
1. linear transform of the attention map (also including max and min)
Parameters typically contain:
map_dim = 1024
module_scope = count_module
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'count_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(CountLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, _ = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
H, W = img_feat.shape.as_list()[1:3]
att_all = tf.reshape(vis_att, to_T([-1, H * W]))
att_min = tf.reduce_min(vis_att, axis=[1, 2])
att_max = tf.reduce_max(vis_att, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all, att_min, att_max], axis=1)
context = fc('fc_scores', att_concat, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class ExistLoomOp(loom.LoomOp):
'''
Mapping: att_grid -> answer probs
Input:
att_grid: [N, H, W, 1]
Output:
answer_scores: [N, self.num_choices]
Implementation:
1. Max-pool over att_grid
2. a linear mapping layer (without Re_lU)
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
'''
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'exist_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(ExistLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
'''
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
'''
vis_att, _, _ = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_min = tf.reduce_min(vis_att, axis=[1, 2])
att_avg = tf.reduce_mean(vis_att, axis=[1, 2])
att_max = tf.reduce_max(vis_att, axis=[1, 2])
# att_reduced has shape [N, 3]
att_reduced = tf.concat([att_min, att_avg, att_max], axis=1)
context = fc('fc_scores', att_reduced, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class DiffLoomOp(loom.LoomOp):
'''
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise diff and lower caps it to zero
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
'''
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'diff_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DiffLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
'''
Inputs:
visual attention outputs
time id for current module
'''
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.maximum(input1 - input2, 0.)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class InvalidLoomOp(loom.LoomOp):
"""
Mapping: returns a context of zeros
Output:
context: [N, encode_size] of zeros
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'invalid_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(InvalidLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
img_feat = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
N = tf.shape(img_feat)[0]
context = tf.zeros([N, encode_size], tf.float32)
return [context]
#------------------------------------------------------------------------------
class DescribeLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> context vector
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, outputSize]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Element-wise multiplication of the two, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'describe_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DescribeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
# att_feat, att_feat_1 has shape [N, D_vis]
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
img_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, map_dim])
# nonlinearity
img_map = tf.nn.relu(img_map)
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 1)
context = fc('fc_eltwise', eltwise_mult, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class TransformLoomOp(loom.LoomOp):
"""
Mapping: att_grid x text_param -> att_grid
Input:
input_0: [N, H, W, 1]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Convolve image features to map_dim
4. Element-wise multiplication of the three, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'transform_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(TransformLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# nonlinearity
text_map = tf.nn.relu(text_map)
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
att_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map * att_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/modules.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to execute programs using tensorflow fold loom API.
Program execution for explicit visual coreference resolution model in visual
dialog using neural module networks. Uses low-level loom API in tensorflow
fold:
https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/py/loom.md
for dynamic creation and execution of computation graphs.
Author: Satwik Kottur
"""
import math
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow_fold.public import loom
import models_mnist.modules as lm
from models_mnist.assembler import INVALID_EXPR, _module_output_type
class ProgramExecutor:
def __init__(self, inputs, output_pool, assembler, params) :
"""Initialize program execution subcomponent.
Args:
inputs:
output_pool:
assembler:
params:
"""
self.params = params
# assembler dynamically assembles the graph at run time
self._assembler = assembler
#--------------------------------------------------------------------------
# A. Create loom data inputs
loom_inputs, used_inputs = self._build_loom_inputs(inputs, output_pool)
# B. Create loom data types
types = self._build_loom_types()
self._loom_types = types
# C. Create loom operations
loom_ops_dict = self._build_loom_ops()
self._loom_ops = loom_ops_dict
# create a loom object
keys = ['text', 'image', 'fact', 'time', 'round', 'text_feat']
batch_ins = {types[k]: loom_inputs[k] for k in keys if k in loom_inputs}
self._loom = loom.Loom(batch_inputs=batch_ins, named_ops=loom_ops_dict)
# setup the inputs and outputs
self.outputs = {'context': self.get_loom_output(),
'att': self.get_loom_output(types['attention']),
'logits': self.get_loom_output(types['float'])}
# add invalid prog to used inputs
used_inputs.extend(['prog_validity'])
self.inputs = {ii: inputs[ii] for ii in used_inputs}
# time/round place holder
self.inputs['time'] = loom_inputs['time']
self.inputs['round'] = loom_inputs['round']
def create_weaver(self):
"""Creates a weaver object within the current loom object.
"""
self._weaver = self._loom.make_weaver()
return self._weaver
def get_loom_output(self, type_shape=None):
"""Return the loom output given the type and shape.
"""
# default output is the context vector
if type_shape is None:
type_shape = self._loom_types['context']
return self._loom.output_tensor(type_shape)
#---------------------------------------------------------
def _adjust_text(self, text):
"""
takes text attention output from generator
modifies it to have certain dimensions
"""
params = self.params
# transpose text to have batch first dimension
text_mod = tf.transpose(text, [1, 0, 2])
# split across rounds
shape = text_mod.shape.as_list()
new_size = [-1, params['num_rounds'], shape[1], shape[2]]
return tf.reshape(text_mod, new_size)
def _build_image_feature_network(self, image):
"""
Takes in images and build features for the program
"""
output = image
# local aliases
BN = tf.contrib.layers.batch_norm
max_pool = tf.layers.max_pooling2d
# Four convolutions networks followed by pooling
for ii in range(2):
# Convolutional Layer
output = tf.layers.conv2d(inputs=output, filters=32,
kernel_size=[3, 3], padding="same",
activation=None)
# if batch norm is to be used
output = BN(output, center=True, scale=True,
is_training=self.params['train_mode'])
# Re_lU
output = tf.nn.relu(output, 'relu')
# Pooling Layer
output = max_pool(output, pool_size=[2, 2], strides=2)
for ii in range(2):
# Convolutional Layer
output = tf.layers.conv2d(inputs=output, filters=64,
kernel_size=[3, 3], padding="same",
activation=None)
# if batch norm is to be used
output = BN(output, center=True, scale=True,
is_training=self.params['train_mode'])
# Re_lU
output = tf.nn.relu(output, 'relu')
# Pooling Layer
output = max_pool(output, pool_size=[2, 2], strides=2)
return output
def _build_fact_encoder(self, inputs):
"""
"""
# local alias
params = self.params
with tf.variable_scope(self.params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
# flatten
# embed the words
output = tf.nn.embedding_lookup(embed_mat, inputs['fact'])
# pass through encoder
cell = tf.contrib.rnn.BasicLSTMCell(params['text_embed_size'])
# begin decoding
for ii in range(0, params['num_layers']):
# dynamic rnn
output, states = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['fact_len'],
dtype=tf.float32,
scope='fact_layer_%d' % ii)
# split roundwise
fact_embed = states[1]
text_dim = fact_embed.shape.as_list()[-1]
fact_embed = tf.reshape(fact_embed, [-1, params['num_rounds'], text_dim])
return fact_embed
def _build_loom_inputs(self, inputs, output_pool):
'''
Sub routine to build the inputs to loom
'''
# --------- grab required inputs -------------
loom_inputs = {}
params = self.params
# A. image
# build image feature network
image_feat = self._build_image_feature_network(inputs['image'])
loom_inputs['image'], _ = lm.add_spatial_coord_map(image_feat)
used_inputs = ['image']
# B. text -- both question and caption
key = 'ques_attended'
if params['train_mode']:
text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
adjusted_text = self._adjust_text(text)
loom_inputs['text'] = adjusted_text
batch_size = tf.shape(adjusted_text)[0]
# C. Facts
if params['use_fact']:
loom_inputs['fact'] = self._build_fact_encoder(inputs)
used_inputs.extend(['fact', 'fact_len'])
concat_list = [adjusted_text]
loom_inputs['text_feat'] = tf.concat(concat_list, -1)
# D. time steps (internal placeholder)
loom_inputs['time'] = tf.placeholder(tf.int32, (None, 1), 'time')
loom_inputs['round'] = tf.placeholder(tf.int32, (None, 1), 'round')
return loom_inputs, used_inputs
def _build_loom_types(self):
"""Method to build loom types for given setting.
"""
params = self.params
encode_size = params['lstm_size']
# create and save loom types
types = {}
types['time'] = loom.TypeShape('int32', (1,), 'time')
types['round'] = loom.TypeShape('int32', (1,), 'round')
types['float'] = loom.TypeShape('float32', (1,))
types['context'] = loom.TypeShape('float32', (encode_size,), 'context')
types['align'] = loom.TypeShape('float32', (encode_size,), 'align')
size = (params['num_rounds'], params['text_embed_size'])
types['fact'] = loom.TypeShape('float32', size, 'fact')
size = (params['num_rounds'], params['max_dec_len'],
params['text_embed_size'])
types['text'] = loom.TypeShape('float32', size, 'text')
size = (params['text_embed_size'],)
types['text_slice'] = loom.TypeShape('float32', size, 'text_slice')
# this depends on whether we want all features
concat_dim = params['text_embed_size']
size = (params['num_rounds'], params['max_dec_len'], concat_dim)
types['text_feat'] = loom.TypeShape('float32', size, 'text_feat')
size = (concat_dim,)
types['text_feat_slice'] = loom.TypeShape('float32', size, 'text_feat_slice')
# TODO: cleaner way to include spatial dimensions for img_feat
size = (params['h_feat'], params['w_feat'], params['d_feat'] + 2)
types['image'] = loom.TypeShape('float32', size, 'image')
size = (params['h_feat'], params['w_feat'], 1)
types['attention'] = loom.TypeShape('float32', size, 'att')
return types
def _build_loom_ops(self):
"""TODO(satwik): Some helper text here
"""
params = self.params
types = self._loom_types
# create all modules under the same scope
op_params = {'map_dim': params['map_size']}
with tf.variable_scope('loom_modules') as module_scope:
op_params['module_scope'] = module_scope
# creating ops
loom_ops_dict = {}
in_types = [types['float'], types['float']]
out_types = [types['float']]
loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
in_types = [types['float']]
loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
tf.maximum)
# basic attention manipulation ops
in_types = [types['attention'], types['float']]
out_types = [types['attention']]
loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
out_types)
in_types = [types['text_feat_slice'], types['text_feat_slice'],
types['round'], types['round']]
out_types = [types['float']]
op_params['amalgam_text_feats'] = params['amalgam_text_feats']
op_params['text_embed_size'] = params['text_embed_size']
loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)
# slicing ops
in_types = [types['text'], types['round'], types['time']]
out_types = [types['text_slice']]
loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)
in_types = [types['text_feat'], types['round'], types['time']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)
# slice_answer_embedding
in_types = [types['fact'], types['round']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)
# normalize and complement
in_types = [types['attention']]
out_types = [types['attention']]
loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
out_types)
#------------------------------------------------------------------
# find module
in_types = [types['image'], types['text_slice']]
out_types = [types['attention']]
loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)
# and module
in_types = [types['attention'], types['attention']]
loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
# diff module
loom_ops_dict['diff_op'] = lm.DiffLoomOp(in_types, out_types, op_params)
# transform module
in_types = [types['attention'], types['image'], types['text_slice']]
loom_ops_dict['transform'] = lm.TransformLoomOp(in_types, out_types, op_params)
# describe module
out_types = [types['context']]
op_params['encode_size'] = params['lstm_size']
loom_ops_dict['describe'] = lm.DescribeLoomOp(in_types, out_types, op_params)
# exist module
loom_ops_dict['exist'] = lm.ExistLoomOp(in_types, out_types, op_params)
# count module
loom_ops_dict['count'] = lm.CountLoomOp(in_types, out_types, op_params)
# invalid Module
in_types = [types['image']]
loom_ops_dict['invalid'] = lm.InvalidLoomOp(in_types, out_types, op_params)
return loom_ops_dict
#---------------------------------------------------------
# setters and getters
def get_outputs(self): return self.outputs
def get_inputs(self): return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None, visualize=False):
if 'prog' not in self.params['model']: return
# dynamically assemble the graph, based on predicted tokens
if self.params['train_mode']:
ques_programs = batch['gt_layout']
else:
ques_programs = output_pool['pred_tokens']
tokens = {'ques': ques_programs}
weaver, loom_outputs, invalid_prog \
= self._assembler.assemble(tokens, self, visualize)
# build feed dict from loom
feed_dict = weaver.build_feed_dict(loom_outputs)
# feed invalid Prog
feed_dict[self.inputs['prog_validity']] = np.array(invalid_prog['ques'])
# additional feeds
feed_dict[self.inputs['image']] = batch['imgs']
max_time = self.params['max_dec_len']
feed_dict[self.inputs['time']] = np.arange(max_time).reshape([-1, 1])
round_ranges = np.arange(self.params['num_rounds']).reshape([-1, 1])
feed_dict[self.inputs['round']] = round_ranges
# fact is needed
if self.params['use_fact']:
feed_dict[self.inputs['fact']] = batch['fact']
feed_dict[self.inputs['fact_len']] = batch['fact_len']
if not self.params['train_mode']:
# list of labels to read from output pool conditionally
labels = ['ques_attended', 'ques_enc']
for label in labels:
if label in self.inputs:
feed_dict[self.inputs[label]] = output_pool[label]
return feed_dict
#------------------------------------------------------------
# segregating the outputs
def segregrate_outputs(self, output):
'''
Go over the outputs, cap tokens and ques tokens
'''
ques_tokens = output['pred_tokens']
mod_out_type = _module_output_type
mod_dict = self._assembler.module_names
att = output['att']
weights = output['weight']
# segregrated outputs
sep_att = []
sep_wts = []
wt_labels = []
num_reuse = 0
att_ind = 0
weight_ind = 0
# assume a batch size of 1
for r_id in range(self.params['num_rounds']):
#refer_seen = False
for t_id in range(self.params['max_dec_length']):
cur_module = mod_dict[ques_tokens[t_id, r_id]]
if cur_module == '<eos>':
# even answer has a weight now
if self.params['use_answer'] or self.params['use_fact']:
wt_labels.append('A%d' % r_id)
num_reuse += 1
break
if mod_out_type[cur_module] == 'att':
sep_att.append(('ques', t_id, r_id, att[att_ind]))
att_ind += 1
if cur_module == '_Refer':
refer_seen = True
st = weight_ind
end = weight_ind + num_reuse
sep_wts.append((r_id, weights[st:end], wt_labels))
weight_ind += num_reuse
'''
if self.params['reuse_refer'] and cur_module == '_Refer':
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
if cur_module == '_Find':
if refer_seen and self.params['remove_aux_find']: continue
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
'''
for arg in sep_wts: assert(abs(np.sum(arg[1]) - 1.0) < 1e-5)
assert(weight_ind == weights.shape[0])
#assert(att_ind == att.shape[0])
return sep_att, sep_wts
|
corefnmn-main
|
models_mnist/executor.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Dataloader file for Visual Dialog experiments.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import h5py
import json
import os
import threading
import queue
import numpy as np
from tqdm import tqdm as progressbar
from util import text_processing, support
class BatchLoaderVD:
"""Subclass to DataReader that serves batches during training.
"""
# adjust for current directory
def _adjust_image_dir(self, path):
# split before data, and append with pwd
return os.path.join(os.getcwd(), 'data', path.split('data/')[-1])
def __init__(self, imdb, params):
"""Initialize by reading the data and pre-processing it.
"""
self.imdb = imdb
self.params = params
self.fetch_options = self.params.get('fetch_options', False)
self.preload_features = params['preload_features']
self.num_inst = len(self.imdb['data'])
self.num_rounds = len(self.imdb['data'][0]['question_ind'])
# check if vgg features are to be used
self.use_vgg = 'vgg' in self.params['feature_path']
# load vocabulary
vocab_path = params['text_vocab_path']
self.vocab_dict = text_processing.VocabDict(vocab_path)
self.T_encoder = params['max_enc_len']
# record special token ids
self.start_token_id = self.vocab_dict.word2idx('<start>')
self.end_token_id = self.vocab_dict.word2idx('<end>')
self.pad_token_id = self.vocab_dict.word2idx('<pad>')
# peek one example to see whether answer and gt_layout are in the data
test_data = self.imdb['data'][0]
self.load_gt_layout = test_data.get('gt_layout_tokens', False)
if 'load_gt_layout' in params:
self.load_gt_layout = params['load_gt_layout']
# decide whether or not to load gt textatt
self.supervise_attention = params['supervise_attention']
self.T_decoder = params['max_dec_len']
self.assembler = params['assembler']
# load one feature map to peek its size
feats = np.load(self._adjust_image_dir(test_data['feature_path']))
self.feat_H, self.feat_W, self.feat_D = feats.shape[1:]
# convert to tokens
self.digitizer = lambda x: [self.vocab_dict.word2idx(w) for w in x]
if 'prog' in self.params['model']:
# preload features
if self.preload_features:
img_paths = set([ii['feature_path'] for ii in self.imdb['data']])
self.img_feats = {ii:np.load(ii) for ii in progressbar(img_paths)}
# if VGG is to be used
if self.use_vgg:
# inform the dataloader to use self.img_feats
self.preload_features = True
img_paths = set([ii['feature_path'] for ii in self.imdb['data']])
# first read the index file
index_file = os.path.join(self.params['input_img'], 'img_id.json')
with open(index_file, 'r') as file_id:
index_data = json.load(file_id)
# get the split -- either train / val
for ii in img_paths: break
split = ii.split('/')[-2][:-4]
# read the features for that particular split
self.img_index = {img_id: index for index, img_id
in enumerate(index_data[split])}
feature_file = os.path.join(self.params['input_img'],
'data_img_%s.h5' % split)
key = 'images_test' if split == 'val' else 'images_train'
self.img_feats = h5py.File(feature_file)[key]
# check if all the images in img_paths are in img_index
count = 0
for ii in img_paths:
img_id = '/'.join(ii.split('/')[-2:])
if img_id.replace('npy', 'jpg') not in self.img_index:
count += 1
print('Missing: %d image features' % count)
# adjust the feature sizes
self.feat_H, self.feat_W, self.feat_D = self.img_feats.shape[1:]
self.zero_feature = np.zeros((1,) + self.img_feats.shape[1:])
# use history if needed by the program generator
self.use_history = self.params['generator'] == 'mem'
if self.use_history:
self._construct_history()
# if fact is to be used
if self.params['use_fact']:
self._construct_fact()
#--------------------------------------------------------------------------
def _construct_fact(self):
"""Method to construct facts.
Facts are previous question and answers strings concatenated as one. These
serve as memory units that the model can refer back to.
For example, 'Q: What is the man wearing? A: Sweater.' will have a fact
'What is the man wearing? Sweater.' so that the model can address follow-up
questions like 'What color is it?' by referring to this fact.
"""
print('Constructing facts..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
fact = np.zeros((num_diags, num_rounds, max_len))
fact_len = np.zeros((num_diags, num_rounds))
fact.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans, a_len = self.imdb['ans'][a_id], self.imdb['ans_len'][a_id]
# handle overflow
bound = min(q_len, max_len)
fact[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
bound = min(q_len + a_len, max_len)
fact[diag_id, r_id, q_len:bound] = ans[:bound-q_len]
fact_len[diag_id, r_id] = bound
# flatten
self.imdb['fact'] = fact
self.imdb['fact_len'] = fact_len
#--------------------------------------------------------------------------
def _construct_history(self):
"""Method to construct history, which concatenates entire dialogs so far.
"""
print('Constructing history..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder * 2 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
history = np.zeros((num_diags, num_rounds, max_len))
hist_len = np.zeros((num_diags, num_rounds))
history.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
# history for first round is caption
c_id = datum['caption_ind']
cap_len = self.imdb['cap_len'][c_id]
caption = self.imdb['cap'][c_id]
# handle overflow
bound = min(cap_len, max_len)
hist_len[diag_id, 0] = bound
history[diag_id, 0, :bound] = caption[:bound]
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans, a_len = self.imdb['ans'][a_id], self.imdb['ans_len'][a_id]
# handle overflow
bound = min(q_len, max_len)
history[diag_id, r_id + 1, :bound] = ques[:bound]
if bound < max_len:
bound = min(q_len + a_len, max_len)
history[diag_id, r_id + 1, q_len:bound] = ans[:bound-q_len]
hist_len[diag_id, r_id + 1] = bound
self.imdb['hist'] = history
self.imdb['hist_len'] = hist_len
#--------------------------------------------------------------------------
def load_one_batch(self, sample_ids):
"""Load data given the sample ids.
"""
actual_batch_size = len(sample_ids)
batch = {}
# replace question _Find with _Refer
find_module_token = self.assembler.name2idx_dict['_Find']
#refer_module_token = self.assembler.name2idx_dict['_Refer']
eos_token = self.assembler.name2idx_dict['<eos>']
# whether to flatten or not
flatten = 'dial' not in self.params['model']
flatten = 'nmn-cap' not in self.params['model']
num_rounds = self.num_rounds
# captions
if flatten:
cap_inds = [self.imdb['data'][ii]['caption_ind'] for ii in sample_ids
for _ in range(num_rounds)]
else:
cap_inds = [self.imdb['data'][ii]['caption_ind'] for ii in sample_ids]
cap_batch = self.imdb['cap'][cap_inds][:, :self.T_encoder]
cap_len = self.imdb['cap_len'][cap_inds]
# get caption programs
cap_prog = None
cap_gt_att = None
if 'nmn-cap' in self.params['model']:
cap_prog = np.zeros((self.T_decoder, len(cap_inds)), np.int32)
cap_prog.fill(eos_token)
for spot, ii in enumerate(cap_inds):
layout = self.imdb['cap_prog'][ii]
cap_prog[:, spot] = \
self.assembler.module_list2tokens(layout, self.T_decoder)
# also get attention for supervision
if self.supervise_attention:
cap_gt_att = np.zeros((self.T_decoder, self.T_encoder, \
actual_batch_size, 1), np.float32)
for spot, ii in enumerate(cap_inds):
for t_id, att in enumerate(self.imdb['cap_prog_att'][ii]):
span = att[1] - att[0]
# NOTE: number of attention hardwired to be <= 4
if span > 0 or span == 0: continue
if span == 0: continue
cap_gt_att[t_id, att[0]:att[1], spot] = 1/span
# questions
ques_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_ind']]
ques_batch = self.imdb['ques'][ques_inds][:, :self.T_encoder].transpose()
ques_len = self.imdb['ques_len'][ques_inds]
ques_ids = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_id']]
gt_index = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['gt_ind']]
# answers
ans_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['answer_ind']]
ans_batch_in = self.imdb['ans_in'][ans_inds][:, :self.T_encoder]
ans_batch_out = self.imdb['ans_out'][ans_inds][:, :self.T_encoder]
ans_batch = self.imdb['ans_in'][ans_inds][:, 1:self.T_encoder]
ans_len = self.imdb['ans_len'][ans_inds]
# getting history
if self.use_history:
history = self.imdb['hist'][sample_ids]
hist_len = self.imdb['hist_len'][sample_ids]
else:
history, hist_len = None, None
# image features
if 'prog' in self.params['model']:
# single copy per conversation
image_feats = np.zeros((actual_batch_size, self.feat_H,
self.feat_W, self.feat_D), np.float32)
else:
image_feats = None
image_path = [None] * actual_batch_size
# load fact
if self.params['use_fact']:
fact = self.imdb['fact'][sample_ids]
fact_len = self.imdb['fact_len'][sample_ids]
# flatten
fact = np.reshape(fact, [-1, fact.shape[-1]])
fact_len = np.reshape(fact_len, [-1])
else:
fact, fact_len = None, None
# programs
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder,
num_rounds * actual_batch_size), np.int32)
gt_layout_batch.fill(eos_token)
gt_attention = None
if self.supervise_attention:
gt_attention = np.zeros((self.T_decoder, self.T_encoder,
num_rounds * actual_batch_size, 1), np.float32)
# mask for weights, for history attention
weight_mask = []
for n in range(len(sample_ids)):
iminfo = self.imdb['data'][sample_ids[n]]
# image features
if 'prog' in self.params['model']:
# if VGG features are to be used
if self.use_vgg:
img_id = '/'.join(iminfo['feature_path'].split('/')[-2:])
img_id = img_id.replace('npy', 'jpg')
if img_id in self.img_index:
f_ind = self.img_index[img_id]
cur_feat = self.img_feats[f_ind]
else:
cur_feat = self.zero_feature
else:
# use preloaded image features
feat_path = self._adjust_image_dir(iminfo['feature_path'])
if not self.preload_features: cur_feat = np.load(feat_path)
else: cur_feat = self.img_feats[feat_path]
# single copy per conversation
image_feats[n] = cur_feat
image_path[n] = iminfo['image_path']
# programs
if self.load_gt_layout:
# go over all the questions
for r_id, layout in enumerate(iminfo['gt_layout_tokens']):
gt_layout_batch[:, num_rounds * n + r_id] = \
self.assembler.module_list2tokens(layout, self.T_decoder)
if self.supervise_attention:
num_refers = 0
for r_id, att in enumerate(iminfo['gt_layout_att']):
for t_id in range(att.shape[0]):
index = num_rounds * n + r_id
span = att[t_id, 1] - att[t_id, 0]
# NOTE: number of attention timesteps hardwired to be <= 4
if span > 4 or span == 0: continue
gt_attention[t_id, att[t_id,0]:att[t_id,1], index] = 1/span
# if options are not needed, continue
if not self.fetch_options: continue
#------------------------------------------------------------------
# get options
opt_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['option_ind']]
num_options = len(opt_inds[0])
opt_batch_in = [None] * num_options
opt_batch_out = [None] * num_options
opt_len = [None] * num_options
for ii in range(num_options):
cur_inds = [jj[ii] for jj in opt_inds]
opt_batch_in[ii] = self.imdb['ans_in'][cur_inds][:, :self.T_encoder]
opt_batch_out[ii] = self.imdb['ans_out'][cur_inds][:, :self.T_encoder]
opt_len[ii] = self.imdb['ans_len'][cur_inds]
#------------------------------------------------------------------
batch = {'ques': ques_batch, 'ques_len': ques_len,
'ques_id': ques_ids, 'gt_layout': gt_layout_batch,
'gt_att' : gt_attention,
'cap': cap_batch, 'cap_len': cap_len, 'cap_prog': cap_prog,
'cap_att': cap_gt_att,
'hist': history, 'hist_len': hist_len, 'ans_in': ans_batch_in,
'ans_out': ans_batch_out, 'ans_len':ans_len, 'ans': ans_batch,
'fact': fact, 'fact_len': fact_len,
'img_feat': image_feats, 'img_path': image_path}
#------------------------------------------------------------------
# further add options
if self.fetch_options:
options = {'opt_in': opt_batch_in, 'opt_out': opt_batch_out,\
'opt_len': opt_len, 'gt_ind': gt_index}
batch.update(options)
#------------------------------------------------------------------
if 'nmn-cap' not in self.params['model']:
return batch
# getting data for training alignment on caption
if actual_batch_size > 1:
info = [batch['cap'], batch['cap_len'],
batch['cap_prog'].transpose()]
if batch['cap_att'] is not None:
info.append(batch['cap_att'].transpose((2, 0, 1, 3)))
shuffled = support.shuffle(info, actual_batch_size)
batch['sh_cap'], batch['sh_cap_len'] = shuffled[:2]
batch['sh_cap_prog'] = shuffled[2].transpose()
batch['align_gt'] = np.ones(num_rounds*actual_batch_size).astype('int32')
if batch['cap_att'] is not None:
batch['sh_cap_att'] = shuffled[3].transpose((1, 2, 0, 3))
for ii in range(actual_batch_size):
start = num_rounds * ii + num_rounds // 2
end = num_rounds * (ii+1)
batch['align_gt'][start:end] = 0
else:
batch['sh_cap'] = np.tile(batch['cap'], [num_rounds, 1])
batch['sh_cap_len'] = np.tile(batch['cap_len'], [num_rounds])
batch['sh_cap_prog'] = np.tile(batch['cap_prog'], [1, num_rounds])
batch['sh_cap_att'] = np.tile(batch['cap_att'], [1, 1, num_rounds, 1])
batch['align_gt'] = np.ones(num_rounds*actual_batch_size).astype('int32')
return batch
class DataReader:
"""Main dataloader class for experiments on Visual Dialog.
"""
def __init__(self, params):
imdb_path = params['path']
print('Loading imdb from: %s' % imdb_path)
if imdb_path.endswith('.npy'): imdb = np.load(imdb_path)
else: raise Type_error('unknown imdb format.')
self.imdb = imdb[()]
self.shuffle = params.get('shuffle', True)
self.one_pass = params.get('one_pass', False)
self.prefetch_num = params.get('num_prefetch', 8)
self.params = params
copy_args = {'max_enc_len', 'max_dec_len', 'text_vocab_path', 'model',
'batch_size', 'use_fact', 'preload_features',
'supervise_attention','generator', 'feature_path'}
self.params.update({ii: params['args'][ii] for ii in copy_args
if ii in params['args'] and
params['args'][ii] is not None})
# VD data loader
self.batch_loader = BatchLoaderVD(self.imdb, self.params)
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty(): pass
#print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle,
one_pass, params):
num_samples = len(imdb['data'])
batch_size = params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
|
corefnmn-main
|
loader_vd/data_reader.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read command line flags.
Uses argparse library to read command line flags.
Author: Satwik Kottur
"""
import argparse
import os
import pdb
from util import support
# read command line arguments
def read_command_line():
title = 'Train explicit coreference resolution visual dialog model'
parser = argparse.ArgumentParser(description=title)
#-------------------------------------------------------------------------
# data input settings
parser.add_argument('--dataset', default='visdial_v0.9_tiny',
help='Visdial dataset type')
parser.add_argument('--data_root', default='data/',
help='Root to the data')
parser.add_argument('--feature_path', default='data/resnet_res5c/',
help='Path to the image features')
parser.add_argument('--text_vocab_path', default='',
help='Path to the vocabulary for text')
parser.add_argument('--prog_vocab_path', default='',
help='Path to the vocabulary for programs')
parser.add_argument('--snapshot_path', default='checkpoints/',
help='Path to save checkpoints')
#--------------------------------------------------------------------------
# specify encoder/decoder
parser.add_argument('--model', default='nmn-cap-prog-only',
help='Name of the model, will be changed later')
parser.add_argument('--generator', default='ques',
help='Name of the generator to use (ques | memory)')
parser.add_argument('--decoder', default='gen',
help='Name of the decoder to use (gen | disc)')
parser.add_argument('--preload_features', default=False, type=bool,
help='Preload visual features on RAM')
#-------------------------------------------------------------------------
# model hyperparameters
parser.add_argument('--h_feat', default=14, type=int,
help='Height of visual conv feature')
parser.add_argument('--w_feat', default=14, type=int,
help='Width of visual conv feature')
parser.add_argument('--d_feat', default=2048, type=int,
help='Size of visual conv feature')
parser.add_argument('--text_embed_size', default=300, type=int,
help='Size of embedding for text')
parser.add_argument('--map_size', default=1024, type=int,
help='Size of the final mapping')
parser.add_argument('--prog_embed_size', default=300, type=int,
help='Size of embedding for program tokens')
parser.add_argument('--lstm_size', default=1000, type=int,
help='Size of hidden state in LSTM')
parser.add_argument('--enc_dropout', default=True, type=bool,
help='Dropout in encoder')
parser.add_argument('--dec_dropout', default=True, type=bool,
help='Dropout in decoder')
parser.add_argument('--num_layers', default=2, type=int,
help='Number of layers in LSTM')
parser.add_argument('--max_enc_len', default=24, type=int,
help='Maximum encoding length for sentences (ques|cap)')
parser.add_argument('--max_dec_len', default=14, type=int,
help='Maximum decoding length for programs (ques|cap)')
parser.add_argument('--dec_sampling', default=False, type=bool,
help='Sample while decoding programs vs argmax')
#---------------------------------------------------------------------------
parser.add_argument('--use_refer', dest='use_refer', action='store_true',
help='Flag to use Refer for coreference resolution')
parser.set_defaults(use_refer=False)
parser.add_argument('--use_fact', dest='use_fact', action='store_true',
help='Flag to use the fact in coreference pool')
parser.set_defaults(use_fact=False)
parser.add_argument('--supervise_attention', dest='supervise_attention',
action='store_true',
help='Flag to supervise attention for the modules')
parser.set_defaults(supervise_attention=False)
parser.add_argument('--amalgam_text_feats', dest='amalgam_text_feats',
action='store_true',
help='Flag to amalgamate text features')
parser.set_defaults(amalgam_text_feats=False)
parser.add_argument('--no_cap_alignment', dest='cap_alignment',
action='store_false',
help='Use the auxiliary caption alignment loss')
parser.set_defaults(cap_alignment=True)
#-------------------------------------------------------------------------
# optimization params
parser.add_argument('--batch_size', default=20, type=int,
help='Training batch size (adjust based on GPU memory)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate for training')
parser.add_argument('--dropout', default=0.5, type=float, help='Dropout')
parser.add_argument('--num_epochs', default=20, type=int,
help='Maximum number of epochs to run training')
parser.add_argument('--gpu_id', type=int, default=0,
help='GPU id to use for training, -1 for CPU')
#-------------------------------------------------------------------------
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if parsed_args['gpu_id'] < 0 else str(parsed_args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# pretty print arguments and return
support.pretty_print_dict(parsed_args)
return parsed_args
|
corefnmn-main
|
exp_vd/options.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to visualize trained Visual Dialog model using supervised learning.
Visualizes visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_vd/visualize_sl.py --gpu_id=0 --test_split='val' \
--checkpoint='checkpoints/model_epoch_005.tmodel' --batch_size 1
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# Read command line options.
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True, help="Checkpoint to load")
parser.add_argument('--batch_size', type=int, default=10,
help='Batch size for visualization')
parser.add_argument('--test_split', default='val',
help='Split to run visualization')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--num_instances', type=int, default=50)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# Set the cuda environment variable for the gpu to use.
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
args['preload_feats'] = False
args['supervise_attention'] = False
print('Current model: ' + args['model'])
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler,
'fetch_options': True}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# for models trained later
if 'num_rounds' not in eval_params:
eval_params['num_rounds'] = val_loader.batch_loader.num_rounds
# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
cur_iter = 0
to_save = {'output': [], 'batch': []}
for batch in progressbar(val_loader.batches(), total=args['num_instances']):
_, outputs = model.run_visualize_iteration(batch, sess)
to_save['output'].append(outputs)
to_save['batch'].append(batch)
cur_iter += 1
if cur_iter >= args['num_instances']:
break
# Save the output + batch
batch_path = '{0}.{1}_batches.npy'.format(args['checkpoint'],
args['num_instances'])
support.save_batch(to_save, batch_path)
|
corefnmn-main
|
exp_vd/visualize_sl.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to evaluate trained Visual Dialog model using supervised learning.
Evaluates visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_vd/eval_sl.py --gpu_id=0 --test_split='val' \
--checkpoint='checkpoints/model_epoch_005.tmodel'
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# Read command line options.
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True, help="Checkpoint to load")
parser.add_argument('--test_split', default='val',
help='Split to run evaluation')
parser.add_argument('--gpu_id', type=int, default=0)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
args['preload_feats'] = False
# no supervision is needed
args['supervise_attention'] = False
# adjust for complex models
args['batch_size'] = min(args['batch_size'], 10)
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler,
'fetch_options': True}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# for models trained later
if 'num_rounds' not in eval_params:
eval_params['num_rounds'] = val_loader.batch_loader.num_rounds
# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
total_iter = int(val_loader.batch_loader.num_inst / args['batch_size'])
num_iters = 0
# get confusion matrix only if using refer
confusion_mat = np.zeros((2, 2))
if args['use_refer']:
refer_token = question_assembler.name2idx_dict['_Refer']
find_token = question_assembler.name2idx_dict['_Find']
for batch in progressbar(val_loader.batches(), total=total_iter):
batch_ranks, outputs = model.run_evaluate_iteration(batch, sess)
ranks.append(batch_ranks)
if 'matches' in outputs: matches.append(outputs['matches'])
# debug, get confusion between find/refer
if args['use_refer']:
find_gt = batch['gt_layout'] == find_token
refer_gt = batch['gt_layout'] == refer_token
find_pred = outputs['pred_tokens'] == find_token
refer_pred = outputs['pred_tokens'] == refer_token
confusion_mat[0, 0] += np.sum(find_pred & find_gt)
confusion_mat[0, 1] += np.sum(refer_pred & find_gt)
confusion_mat[1, 0] += np.sum(find_pred & refer_gt)
confusion_mat[1, 1] += np.sum(refer_pred & refer_gt)
try:
if len(matches) > 0:
matches = np.concatenate(matches)
percent = 100*np.sum(matches) / matches.size
print('Program accuracy: %f percent\n' % percent)
except:
pass
# print confusion matrix
print(confusion_mat)
# save the ranks
param_path = args['checkpoint'].replace('.tmodel', '_rankdump.npy')
np.save(param_path, np.hstack(ranks))
metrics = metrics.compute_metrics(np.hstack(ranks))
|
corefnmn-main
|
exp_vd/eval_sl.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train Visual Dialog model using supervised learning.
Trains visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage (check scripts/run_train.sh):
python -u exp_vd/train_sl.py --gpu_id=0 --dataset='visdial_v0.9' \
--data_root='data/' --model='nmn-cap-prog-only' --batch_size=5 \
--use_refer --use_fact --generator='mem' --feature_path='data/' \
--learning_rate=0.0001 --amalgam_text_feats \
--decoder='disc' --lstm_size 512
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# read command line options
args = options.read_command_line()
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# Data files
glove_mat_path = args['data_root'] + 'vocabulary_vd_glove.npy'
args['data_root'] = os.path.join(args['data_root'], args['dataset'])
args['text_vocab_path'] = os.path.join(args['data_root'], 'vocabulary_vd.txt')
root = args['data_root']
if args['use_refer']:
# use refer module
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_5.txt')
else:
# no explicit refer module
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_4.txt')
imdb_path_train = os.path.join(root, 'imdb_train.npy')
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# Dataloader for train
input_dict = {'path': imdb_path_train, 'shuffle': True, 'one_pass': False,
'assembler': question_assembler, 'use_count': False,
'args': args}
if args['decoder'] == 'disc':
input_dict['fetch_options'] = True
train_loader = DataReader(input_dict)
# model params for training
train_params = args.copy()
# use the ground truth program for training
train_params['use_gt_prog'] = True
train_params['text_vocab_size'] = train_loader.batch_loader.vocab_dict.num_vocab
train_params['prog_vocab_size'] = len(question_assembler.module_names)
train_params['pad_id'] = train_loader.batch_loader.vocab_dict.word2idx('<pad>')
train_params['num_rounds'] = train_loader.batch_loader.num_rounds
print('Using a vocab size: %d' % train_params['text_vocab_size'])
# model for training
model = CorefNMN(train_params, assemblers)
model.setup_training()
# train with Adam, optimization ops
solver = tf.train.AdamOptimizer(learning_rate=train_params['learning_rate'])
gradients = solver.compute_gradients(model.get_total_loss())
# clip gradients based on value
gradients = [(tf.clip_by_value(g, -2.0, 2.0), v) if g is not None else (g, v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# add it to the output
model.add_solver_op(solver_op)
# adjust snapshot to have a time stamp folder
cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())
args['snapshot_path'] = os.path.join(args['snapshot_path'], cur_time)
os.makedirs(args['snapshot_path'], exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
print('Saving checkpoints at: %s' % args['snapshot_path'])
# initialize all variables
sess.run(tf.global_variables_initializer())
# load glove vectors for embedding
glove_mat_path = os.path.join(args['data_root'], 'vocabulary_vd_glove.npy')
glove_mat = np.load(glove_mat_path)
with tf.variable_scope(train_params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
sess.run(tf.assign(embed_mat, glove_mat))
#------------------------------------------------------------------------------
# forget about embed and module scopes
del train_params['embed_scope']
if 'module_scope' in train_params:
del train_params['module_scope']
#-------------------------------------------------------------------------
print('Running training iteration..')
num_iter_per_epoch = int(train_loader.batch_loader.num_inst/args['batch_size'])
print('Number of iterations per epoch: %d' % num_iter_per_epoch)
# exponential smoothing for loss
smoother = metrics.ExponentialSmoothing()
for n_iter, batch in enumerate(train_loader.batches()):
# add epoch and iteration
epoch = float(n_iter) / num_iter_per_epoch
batch['epoch'] = epoch
batch['n_iter'] = n_iter
if n_iter >= args['num_epochs'] * num_iter_per_epoch:
break
# perform training iteration
losses, _ = model.run_train_iteration(batch, sess)
losses = smoother.report(losses)
# printing log
if n_iter % 10 == 0:
cur_time = time.strftime('%a %d%b%y %X', time.gmtime())
print_format = ('[%s][It: %d][Ep: %.2f][Loss: %.3f ' +
'Prog: %.3f Ans: %.3f Align: %.3f]')
print_info = (cur_time, n_iter, epoch, losses['total'], losses['prog'],
losses['ans'], losses['align'])
print(print_format % print_info)
# save snapshot after every epoch
if n_iter % num_iter_per_epoch == 0:
epoch = float(n_iter) / num_iter_per_epoch
# Save snapshot at every epoch
file_name = 'model_epoch_%03d.tmodel' % epoch
snapshot_path = os.path.join(args['snapshot_path'], file_name)
snapshot_saver.save(sess, snapshot_path, write_meta_graph=False)
# also save the arguments
params_path = snapshot_path.replace('.tmodel', '_params.json')
with open(params_path, 'w') as file_id:
json.dump(train_params, file_id)
print('Snapshot saved to: ' + snapshot_path)
#-------------------------------------------------------------------------
|
corefnmn-main
|
exp_vd/train_sl.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import setuptools
setuptools.setup(
name="cop3d",
version="1.0.0",
author="Meta AI",
author_email="romansh@meta.com",
packages=setuptools.find_packages(),
license="LICENSE",
description="Common Pets in 3D tools",
long_description=open("README.md").read(),
install_requires=[
"co3d @ git+ssh://git@github.com/facebookresearch/co3d.git#egg=co3d-2.1.0"
],
)
|
cop3d-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
from co3d.dataset.download_dataset_impl import build_arg_parser, download_dataset
REPO_ROOT = __file__.rsplit(os.sep, 2)[0]
DEFAULT_LINK_LIST_FILE = os.path.join(REPO_ROOT, "links", "links.json")
DEFAULT_SHA256S_FILE = os.path.join(REPO_ROOT, "links", "cop3d_sha256.json")
if __name__ == "__main__":
parser = build_arg_parser("COP3D", DEFAULT_LINK_LIST_FILE, DEFAULT_SHA256S_FILE)
args = parser.parse_args()
download_dataset(
str(args.link_list_file),
str(args.download_folder),
n_download_workers=int(args.n_download_workers),
n_extract_workers=int(args.n_extract_workers),
download_categories=args.download_categories,
checksum_check=bool(args.checksum_check),
single_sequence_subset=False,
clear_archives_after_unpacking=bool(args.clear_archives_after_unpacking),
sha256s_file=str(args.sha256_file),
skip_downloaded_archives=not bool(args.redownload_existing_archives),
)
|
cop3d-main
|
cop3d/download_dataset.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import re
from distutils.core import setup
from setuptools import find_packages
def find_version() -> str:
with open('bisk/__init__.py', 'r') as f:
version_file = f.read()
version_match = re.search(
r"^__version__ = ['\"]([^'\"]*)['\"]", version_file, re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string.")
setup(
name='bipedal-skills',
version=find_version(),
author='Meta AI Research',
author_email='jgehring@meta.com',
url='https://facebookresearch.github.io/hsd3',
license='MIT License',
description='Bipedal Skills RL Benchmark',
python_requires='>=3.7',
install_requires=[
'dm-control>=0.0.32',
'gym>=0.26',
'numpy>=1.9.0',
],
packages=find_packages(),
package_data={'bisk': ['assets/*.xml', 'assets/*.png']},
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
)
|
bipedal-skills-main
|
setup.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoalWall-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
@pytest.fixture
def env2d():
env = gym.make('BiskGoalWall-v1', robot='testcube2d')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_eoe_if_touched_wall(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
terminated = False
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
step += 1
assert reward == 0
assert step < 250
def test_reward_goal1(env):
env.p.named.data.qvel['ball'][0:3] = [10, -3, 2.5]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 1
def test_reward_goal2(env):
env.p.named.data.qvel['ball'][0:3] = [10, 3, 4]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 2
def test_reward_nogoal(env):
env.p.named.data.qvel['ball'][0:3] = [10, 0, 2]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_no_reward_if_beyond_line(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Move beyond line without touching the ball
for _ in range(2):
obs, reward, terminated, truncated, info = env.step([0, 1, upf])
assert terminated == False
for _ in range(18):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert terminated == False
env.p.named.data.qvel['ball'][0:3] = [10, 0, 2]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
env.p.named.data.qvel['ball'][0:3] = [10, -3, 2.5]
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_reward_goal2d(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 4
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 1
def test_reward_nogoal2d_1(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 0
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
def test_reward_nogoal2d_2(env2d):
env = env2d
env.p.named.data.qvel['ball-x'] = 10
env.p.named.data.qvel['ball-z'] = 10
terminated = False
ret = 0
step = 0
while not terminated:
obs, reward, terminated, truncated, info = env.step([0, 0])
ret += reward
step += 1
assert step < 250
assert ret == 0
|
bipedal-skills-main
|
tests/test_goalwall.py
|
# Copyright (c) 2022-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import numpy as np
import bisk
@pytest.fixture
def env():
env = gym.make('BiskGoToSphere-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_scripted_policy(env):
for i in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, 0])
retrn = 0
while not (terminated or truncated):
target = obs['targets'][:2]
dir = target / np.linalg.norm(target)
dx, dy = 0, 0
if np.abs(target[0]) > np.abs(target[1]):
dx = np.sign(target[0])
else:
dy = np.sign(target[1])
obs, reward, terminated, truncated, info = env.step([dx, dy, 0])
retrn += reward
assert terminated
assert not truncated
assert retrn == 1
|
bipedal-skills-main
|
tests/test_gotosphere.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
@pytest.fixture
def env():
env = gym.make('BiskHurdles-v1', robot='testcube')
obs, _ = env.reset(seed=0)
yield env
env.close()
def test_reward_clear(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
obs, reward, terminated, truncated, info = env.step([0, 0, 1])
# Cross hurdle
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
# Go back
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
if obs['next_hurdle'][2] == 0:
break
obs, reward, terminated, truncated, info = env.step([-1, 0, upf])
for _ in range(4):
obs, reward, terminated, truncated, info = env.step([0, 0, upf])
# Cross hurdle again, receive no reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 0
break
else:
assert reward == 0
obs = next_obs
obs = next_obs
# Cross next hurdle, receive reward
for _ in range(64):
next_obs, reward, terminated, truncated, info = env.step([1, 0, upf])
if next_obs['next_hurdle'][0] > obs['next_hurdle'][0]:
assert reward == 1
break
else:
assert reward == 0
obs = next_obs
def test_reward_stuck(env):
upf = (env.p.named.model.body_mass['robot/torso'] * 9.81) / (
env.p.named.model.actuator_gear['robot/z'][0]
)
# Go forward -- should be stuck at first hurdle
for _ in range(64):
obs, reward, terminated, truncated, info = env.step([1, 0, upf])
assert reward == 0
|
bipedal-skills-main
|
tests/test_hurdles.py
|
# Copyright (c) 2021-present, Facebook, Inc.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
#
import gym
import pytest
import bisk
# Simple test whether we can instantiate the env with a robot and do a single
# step.
def _create_helper(env_name: str, robot: str):
env = gym.make(env_name, robot='Walker')
env.reset(seed=0)
env.step(env.action_space.sample())
env.close()
env = gym.make(env_name.replace('-v', f'{robot}-v'))
env.reset(seed=0)
env.step(env.action_space.sample())
env.close()
def _envs_helper(robot: str):
_create_helper('BiskHurdles-v1', robot)
_create_helper('BiskLimbo-v1', robot)
_create_helper('BiskHurdlesLimbo-v1', robot)
_create_helper('BiskGaps-v1', robot)
_create_helper('BiskStairs-v1', robot)
_create_helper('BiskGoalWall-v1', robot)
_create_helper('BiskGoToTargets-v1', robot)
def test_halfcheetah_create():
_envs_helper('HalfCheetah')
def test_walker_create():
_envs_helper('Walker')
def test_humanoid_create():
_envs_helper('Humanoid')
def test_humanoidpc_create():
_envs_helper('HumanoidPC')
def test_humanoidamasspc_create():
_envs_helper('HumanoidAMASSPC')
|
bipedal-skills-main
|
tests/test_robots.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.