python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Download from https://cs.nyu.edu/~ylclab/data/norb-v1.0/
import sys
import pickle as pkl
# sys.path.insert(0, '../../')
import numpy as np
from sklearn.preprocessing import OneHotEncoder
from norb import NORBDataset
from scipy.misc import imresize
from data_utils import normalize_data, apply_normalization
MAX_VAL = 255.0
DS_SIZE = (32, 32)
N_CATEGORIES = 6
"""
Downsamples.
"""
def process_image(image):
# Downsample
ds = imresize(image, DS_SIZE, 'nearest')
# Flatten
return ds.flatten()
"""
Downsamples, stores only left stereo pair, converts to one-hot label.
"""
def process_data(data):
X = []
Y = []
for ex in data:
this_image = ex.image_lt
this_category = ex.category
X.append(process_image(this_image))
Y.append(this_category)
X = np.array(X)
Y = np.array(Y)
Y = np.expand_dims(Y, 1)
enc = OneHotEncoder(N_CATEGORIES)
Y = enc.fit_transform(Y).todense()
return X,Y
def process_images(names, out_loc, mean=None, sd=None):
print('Names: ', names)
dataset = NORBDataset(dataset_root='/dfs/scratch1/thomasat/datasets/norb', names=names)
Xs = []
Ys = []
print('Dataset names: ', dataset.data.keys())
for name in names:
X, Y = process_data(dataset.data[name])
print('X,Y shape: ', X.shape, Y.shape)
Xs.append(X)
Ys.append(Y)
X = np.vstack(Xs)
Y = np.vstack(Ys)
# Shuffle
idx = np.arange(0, X.shape[0])
np.random.shuffle(idx)
X = X[idx,:]
Y = Y[idx,:]
if mean is None and sd is None:
X, mean, sd = normalize_data(X)
print('X, Y: ', X.shape, Y.shape)
else:
X = apply_normalization(X,mean,sd)
# Save
data_dict = {'X': X, 'Y': Y}
pkl.dump(data_dict, open(out_loc, 'wb'), protocol=2)
return mean,sd
train_names = ['train' + str(i+1) for i in np.arange(10)]
train_out_loc = '/dfs/scratch1/thomasat/datasets/norb_full/processed_py2_train_' + str(DS_SIZE[0]) + '.pkl'
test_names = ['test' + str(i+1) for i in range(2)]
test_out_loc = '/dfs/scratch1/thomasat/datasets/norb_full/processed_py2_test_' + str(DS_SIZE[0]) + '.pkl'
mean, sd = process_images(train_names, train_out_loc)
process_images(test_names, test_out_loc, mean, sd)
|
structured-nets-master
|
scripts/data/preprocess_norb.py
|
# Download from http://www.iro.umontreal.ca/~lisa/twiki/bin/view.cgi/Public/DeepVsShallowComparisonICML2007
import numpy as np
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
from data_utils import normalize_data, apply_normalization
def process_data(data):
X = data[:, :-1]
Y = np.expand_dims(data[:, -1], 1)
# Y must be one-hot
enc = OneHotEncoder()
Y = enc.fit_transform(Y).todense()
return X,Y
train_loc = '/dfs/scratch1/thomasat/datasets/mnist_bg_rot/mnist_all_background_images_rotation_normalized_train_valid.amat'
test_loc = '/dfs/scratch1/thomasat/datasets/mnist_bg_rot/mnist_all_background_images_rotation_normalized_test.amat'
train_out = '/dfs/scratch1/thomasat/datasets/mnist_bg_rot/train_normalized'
test_out = '/dfs/scratch1/thomasat/datasets/mnist_bg_rot/test_normalized'
train_data = np.genfromtxt(train_loc)
train_X, train_Y = process_data(train_data)
test_data = np.genfromtxt(test_loc)
test_X, test_Y = process_data(test_data)
# Normalize
train_X, mean, sd = normalize_data(train_X)
test_X = apply_normalization(test_X, mean, sd)
# Save
print('test_X, test_Y shape: ', test_X.shape, test_Y.shape)
print('train_X, train_Y shape: ', train_X.shape, train_Y.shape)
train = {'X': train_X, 'Y': train_Y}
test = {'X': test_X, 'Y': test_Y}
pkl.dump(train, open(train_out, 'wb'), protocol=2)
pkl.dump(test, open(test_out, 'wb'), protocol=2)
print('Saved train to: ', train_out)
print('Saved test to: ', test_out)
|
structured-nets-master
|
scripts/data/preprocess_mnist_bg_rot.py
|
import numpy as np
import os,sys,h5py
import scipy.io as sio
from scipy.linalg import solve_sylvester
import pickle as pkl
from sklearn.preprocessing import OneHotEncoder
import torch
from torchvision import datasets, transforms
import utils
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def get_dataset(dataset_name, data_dir, transform):
"""
Get paths of datasets.
"""
if dataset_name == 'mnist':
train_loc = os.path.join(data_dir, 'mnist/train_normalized')
test_loc = os.path.join(data_dir, 'mnist/test_normalized')
elif dataset_name == 'cifar10':
train_loc = os.path.join(data_dir, 'cifar10_combined/train')
test_loc = os.path.join(data_dir, 'cifar10_combined/test')
elif dataset_name == 'cifar10mono':
train_loc = os.path.join(data_dir, 'cifar10_combined/train_grayscale')
test_loc = os.path.join(data_dir, 'cifar10_combined/test_grayscale')
elif dataset_name.startswith('mnist_noise'):
idx = dataset_name[-1]
train_loc = os.path.join(data_dir,'mnist_noise/train_' + str(idx))
test_loc = os.path.join(data_dir,'mnist_noise/test_' + str(idx))
elif dataset_name == 'norb':
train_loc = os.path.join(data_dir,'norb_full/processed_py2_train_32.pkl')
test_loc = os.path.join(data_dir,'norb_full/processed_py2_test_32.pkl')
elif dataset_name == 'rect_images': #TODO
train_loc = os.path.join(data_dir, 'rect_images/rectangles_im_train.amat')
test_loc = os.path.join(data_dir, 'rect_images/rectangles_im_test.amat')
elif dataset_name == 'rect':
train_loc = os.path.join(data_dir,'rect/train_normalized')
test_loc = os.path.join(data_dir, 'rect/test_normalized')
elif dataset_name == 'convex':
train_loc = os.path.join(data_dir, 'convex/train_normalized')
test_loc = os.path.join(data_dir, 'convex/test_normalized')
elif dataset_name == 'mnist_rand_bg': #TODO
train_loc = os.path.join(data_dir, 'mnist_rand_bg/mnist_background_random_train.amat')
test_loc = os.path.join(data_dir, 'mnist_rand_bg/mnist_background_random_test.amat')
elif dataset_name == 'mnist_bg_rot':
train_loc = os.path.join(data_dir, 'mnist_bg_rot/train_normalized')
test_loc = os.path.join(data_dir, 'mnist_bg_rot/test_normalized')
elif dataset_name == 'mnist_bg_rot_swap':
train_loc = os.path.join(data_dir, 'mnist_bg_rot/test_normalized')
test_loc = os.path.join(data_dir, 'mnist_bg_rot/train_normalized')
#TODO handle iwslt, copy tasks
# TODO smallnorb, timit
else:
print('dataset.py: unknown dataset name')
# TODO maybe want the .amat if that's standard and do postprocessing in a uniform way instead of having a separate script per dataset
train_data = pkl.load(open(train_loc, 'rb'))
train_X = train_data['X']
train_Y = train_data['Y']
test_data = pkl.load(open(test_loc, 'rb'))
test_X = test_data['X']
test_Y = test_data['Y']
train_X, train_Y = postprocess(transform, train_X, train_Y)
test_X, test_Y = postprocess(transform, test_X, test_Y)
in_size = train_X.shape[1]
out_size = train_Y.shape[1]
print("Train dataset size: ", train_X.shape[0])
print("Test dataset size: ", test_X.shape[0])
print("In size: ", in_size)
print("Out size: ", out_size)
return torch.FloatTensor(train_X), torch.FloatTensor(train_Y), torch.FloatTensor(test_X), torch.FloatTensor(test_Y), in_size, out_size
def split_train_val(train_X, train_Y, val_fraction, train_fraction=None):
"""
Input: training data as a torch.Tensor
"""
# Shuffle
idx = np.arange(train_X.shape[0])
np.random.shuffle(idx)
train_X = train_X[idx,:]
train_Y = train_Y[idx,:]
# Compute validation set size
val_size = int(val_fraction*train_X.shape[0])
# Downsample for sample complexity experiments
if train_fraction is not None:
train_size = int(train_fraction*train_X.shape[0])
assert val_size + train_size <= train_X.shape[0]
else:
train_size = train_X.shape[0] - val_size
# Shuffle X
idx = np.arange(0, train_X.shape[0])
np.random.shuffle(idx)
train_idx = idx[0:train_size]
val_idx = idx[-val_size:]
val_X = train_X[val_idx, :]
val_Y = train_Y[val_idx, :]
train_X = train_X[train_idx, :]
train_Y = train_Y[train_idx, :]
print('train_X: ', train_X.shape)
print('train_Y: ', train_Y.shape)
print('val_X: ', val_X.shape)
print('val_Y: ', val_Y.shape)
return train_X, train_Y, val_X, val_Y
def create_data_loaders(dataset_name, data_dir, transform, train_fraction, val_fraction, batch_size):
if device.type == 'cuda':
loader_args = {'num_workers': 16, 'pin_memory': True}
else:
loader_args = {'num_workers': 4, 'pin_memory': False}
train_X, train_Y, test_X, test_Y, in_size, out_size = get_dataset(dataset_name, data_dir, transform) # train/test data, input/output size
# train_X, train_Y = postprocess(transform, train_X, train_Y)
# test_X, test_Y = postprocess(transform, test_X, test_Y)
# TODO: use torch.utils.data.random_split instead
# however, this requires creating the dataset, then splitting, then applying transformations
train_X, train_Y, val_X, val_Y = split_train_val(train_X, train_Y, val_fraction, train_fraction)
# TODO: use pytorch transforms to postprocess
train_dataset = torch.utils.data.TensorDataset(train_X, train_Y)
val_dataset = torch.utils.data.TensorDataset(val_X, val_Y)
test_dataset = torch.utils.data.TensorDataset(test_X, test_Y)
# create dataloaders
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, **loader_args)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batch_size, shuffle=True, **loader_args)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=True, **loader_args)
return train_loader, val_loader, test_loader, in_size, out_size
class DatasetLoaders:
def __init__(self, name, data_dir, val_fraction, transform=None, train_fraction=None, batch_size=50):
if name.startswith('true'):
# TODO: Add support for synthetic datasets back. Possibly should be split into separate class
self.loss = utils.mse_loss
else:
self.train_loader, self.val_loader, self.test_loader, self.in_size, self.out_size = create_data_loaders(name,
data_dir, transform, train_fraction, val_fraction, batch_size)
self.loss = utils.cross_entropy_loss
### Utilities for processing data arrays in numpy
def postprocess(transform, X, Y=None):
# pad from 784 to 1024
if 'pad' in transform:
assert X.shape[1] == 784
print(X.shape, type(X))
X = np.pad(X.reshape((-1,28,28)), ((0,0),(2,2),(2,2)), 'constant').reshape(-1,1024)
if 'randomize' in transform:
assert Y is not None
np.random.shuffle(Y)
return X, Y
def augment(self, X, Y=None):
if 'contrast' in self.transform:
def scale_patch(X):
patch = ((9, 19), (9, 19))
X_ = X.copy()
X_[:, patch[0][0]:patch[0][1], patch[1][0]:patch[1][1]] *= 2
return X_
# subsample
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
X = X[idx,...]
Y = Y[idx,...]
X1 = X.reshape((-1,28,28))
X2 = scale_patch(X1)
X3 = scale_patch(X2)
X4 = scale_patch(X3)
# X5 = scale_patch(X4)
X = np.concatenate([X1, X2, X3, X4], axis=0).reshape(-1, 28*28)
Y = np.concatenate([Y, Y, Y, Y], axis=0)
if 'patch' in self.transform:
def add_patch(X):
patch = ((0, 4), (10, 18))
X_ = X.copy()
X_[:, patch[0][0]:patch[0][1], patch[1][0]:patch[1][1]] += 3.0
return X_
X1 = X.reshape((-1,28,28))
X2 = add_patch(X1)
X3 = add_patch(X2)
X4 = add_patch(X3)
X = np.concatenate([X1, X2, X3, X4], axis=0).reshape(-1, 28*28)
Y = np.concatenate([Y, Y, Y, Y], axis=0)
return X, Y
|
structured-nets-master
|
pytorch/dataset.py
|
import torch
import torch.nn as nn
def mse_loss(pred, true):
loss_fn = nn.MSELoss()
mse = loss_fn(pred, true)
accuracy = torch.FloatTensor([0])
return mse, accuracy
def cross_entropy_loss(pred, true):
loss_fn = nn.CrossEntropyLoss()
_, true_argmax = torch.max(true, 1)
cross_entropy = loss_fn(pred, true_argmax)
_, pred_argmax = torch.max(pred, 1)
correct_prediction = torch.eq(true_argmax, pred_argmax)
accuracy = torch.mean(correct_prediction.float())
return cross_entropy, accuracy
def get_commit_id():
return subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD'])
def descendants(cls):
"""
Get all subclasses (recursively) of class cls, not including itself
Assumes no multiple inheritance
"""
desc = []
for subcls in cls.__subclasses__():
desc.append(subcls)
desc.extend(descendants(subcls))
return desc
|
structured-nets-master
|
pytorch/utils.py
|
import sys, os, datetime, subprocess
import pickle as pkl
import itertools
import argparse, argh
import threading
import logging
import pprint
import numpy as np
import torch
from torch.optim.lr_scheduler import StepLR
from inspect import signature
# Add PyTorch root to path
pytorch_root = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.insert(0, pytorch_root)
from dataset import DatasetLoaders
from models.nets import ArghModel, construct_model
from learning import train, prune
from utils import descendants
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s %(message)s',
datefmt='%FT%T',)
# Command line params
parser = argparse.ArgumentParser()
parser.add_argument("--name", default='', help='Name of run')
parser.add_argument("--dataset", help='Dataset')
parser.add_argument('--transform', default='none', help='Any transform of dataset, e.g. padding')
parser.add_argument('--train-frac', type=float, nargs='+', default=[None])
parser.add_argument('--val-frac', type=float, default=0.15)
parser.add_argument("--result-dir", help='Where to save results')
parser.add_argument('--trials', type=int, default=1, help='Number of independent runs')
parser.add_argument('--trial-id', type=int, nargs='+', help='Specify trial numbers; alternate to --trials')
parser.add_argument('--batch-size', type=int, default=50, help='Batch size')
parser.add_argument("--epochs", type=int, default=1, help='Number of passes through the training data')
parser.add_argument('--optim', default='sgd', help='Optimizer')
parser.add_argument('--lr', nargs='+', type=float, default=[1e-3], help='Learning rates')
parser.add_argument('--mom', nargs='+', type=float, default=[0.9], help='Momentums')
parser.add_argument('--lr-decay', type=float, default=1.0)
parser.add_argument('--log-freq', type=int, default=100)
parser.add_argument('--test', action='store_false', help='Toggle testing on test set')
parser.add_argument('--prune', action='store_true', help='Whether to do pruning')
parser.add_argument('--prune-lr-decay', type=float, default=0.1, help='LR decay factor in each pruning iter')
parser.add_argument('--prune-factor', type=float, default=1, help='Factor by which to prune')
parser.add_argument('--prune-iters', type=int, default=1, help='Number of pruning iters')
parser.add_argument('--save-model', action='store_false', help='Whether to save best model')
parser.add_argument('--data-dir', default='../../datasets/', help='Data directory')
out_dir = os.path.dirname(pytorch_root) # Repo root
# seed = 0
# np.random.seed(seed)
# torch.manual_seed(seed)
def save_args(args, results_dir):
commit_id = subprocess.check_output(['git', 'rev-parse', '--short', 'HEAD']).strip()
command = ' '.join(sys.argv)
param_str = str(commit_id) + '\n' + command + '\n' + pprint.pformat(vars(args))
print(param_str)
# Make new dir with timestamp
if not os.path.exists(results_dir):
os.makedirs(results_dir)
# Save the parameters in readable form
text_file = open(os.path.join(results_dir, 'params.txt'), "w")
text_file.write(param_str)
text_file.close()
# Save the Namespace object
pkl.dump(args, open(os.path.join(results_dir, 'params.p'), "wb"))
def mlp(args):
for train_frac in args.train_frac:
dataset = DatasetLoaders(args.dataset, args.data_dir, args.val_frac, args.transform, train_frac, args.batch_size)
model = construct_model(nets[args.model], dataset.in_size, dataset.out_size, args)
for lr, mom in itertools.product(args.lr, args.mom):
run_name = args.name + '_' + model.name() \
+ '_lr' + str(lr) \
+ '_lrd' + str(args.lr_decay) \
+ '_mom' + str(mom) \
+ '_bs' + str(args.batch_size) \
+ '_ep' + str(args.epochs) \
+ '_' + str(args.dataset) \
+ '_vf' + str(args.val_frac) \
+ '_m' + str(args.model) \
+ '_hs' + str(args.hidden_size)
#+ '_nl' + str(args.num_layers)
if train_frac is not None:
run_name += '_tf' + str(train_frac)
if args.prune:
run_name += '_pf' + str(args.prune_factor)
results_dir = os.path.join(out_dir,
'results',
args.result_dir,
run_name + '_' + str(datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")))
save_args(args, results_dir)
trial_ids = args.trial_id if args.trial_id is not None else range(args.trials)
for trial_iter in trial_ids:
log_path = os.path.join(out_dir, 'tensorboard', args.result_dir, run_name, str(trial_iter))
checkpoint_path = os.path.join(out_dir, 'checkpoints', args.result_dir, run_name, str(trial_iter))
result_path = os.path.join(results_dir, str(trial_iter))
model.reset_parameters()
if args.optim == 'sgd':
optimizer = torch.optim.SGD(model.parameters(), lr=lr, momentum=mom)
elif args.optim == 'adam':
optimizer = torch.optim.Adam(model.parameters(), lr=lr, amsgrad=False)
elif args.optim == 'ams':
optimizer = torch.optim.Adam(model.parameters(), lr=lr, amsgrad=True)
else:
assert False, "invalid optimizer"
lr_scheduler = StepLR(optimizer, step_size=1, gamma=args.lr_decay)
if args.prune:
# Is there a better way to enforce pruning only for unconstrained and MLP?
assert model.class_type in ['unconstrained', 'u'] and args.model in ['MLP','CNN']
prune.prune(dataset, model, optimizer, lr_scheduler, args.epochs, args.log_freq, log_path,
checkpoint_path, result_path, args.test, args.save_model, args.prune_lr_decay, args.prune_factor,
args.prune_iters)
else:
train.train(dataset, model, optimizer, lr_scheduler, args.epochs, args.log_freq,
log_path, checkpoint_path, result_path, args.test, args.save_model)
## Parse
parser.set_defaults(task=mlp)
# subparsers = parser.add_subparsers()
# mlp_parser = subparsers.add_parser('MLP')
# mlp_parser.set_defaults(task=mlp)
# MLP models
model_options = []
nets = {}
for model in descendants(ArghModel):
# Change the names so argh can create parsers
model.args.__name__ = model.__name__
model_options.append(model.args)
nets[model.__name__] = model
argh.add_commands(parser, model_options, namespace='model', namespace_kwargs={'dest': 'model'})
for model in ArghModel.__subclasses__():
# Change names back
model.args.__name__ = 'args'
args = parser.parse_args()
args.task(args)
|
structured-nets-master
|
pytorch/main.py
|
import numpy as np
import os, time, logging
import pickle as pkl
import torch
import torch.optim as optim
from torch.optim.lr_scheduler import StepLR
from tensorboardX import SummaryWriter
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test_split(net, dataloader, loss_fn):
n = len(dataloader.dataset)
total_loss = 0.0
total_acc = 0.0
for data in dataloader:
batch_X, batch_Y = data
batch_X, batch_Y = batch_X.to(device), batch_Y.to(device)
output = net(batch_X)
loss_batch, acc_batch = loss_fn(output, batch_Y)
total_loss += len(batch_X)*loss_batch.data.item()
total_acc += len(batch_X)*acc_batch.data.item()
return total_loss/n, total_acc/n
# Epoch_offset: to ensure stats are not overwritten when called during pruning
def train(dataset, net, optimizer, lr_scheduler, epochs, log_freq, log_path, checkpoint_path, result_path,
test, save_model, epoch_offset=0):
logging.debug('Tensorboard log path: ' + log_path)
logging.debug('Tensorboard checkpoint path: ' + checkpoint_path)
logging.debug('Results directory: ' + result_path)
os.makedirs(checkpoint_path, exist_ok=True)
writer = SummaryWriter(log_path)
net.to(device)
logging.debug((torch.cuda.get_device_name(0)))
for name, param in net.named_parameters():
if param.requires_grad:
logging.debug(('Parameter name, shape: ', name, param.data.shape))
losses = {'Train': [], 'Val': [], 'DR': [], 'ratio': [], 'Test':[]}
accuracies = {'Train': [], 'Val': [], 'Test':[]}
best_val_acc = 0.0
best_val_save = None
# If not saving models, then keep updating test accuracy of best validation model
test_acc_of_best_val = 0.0
test_loss_of_best_val = 0.0
def log_stats(name, split, loss, acc, step):
losses[split].append(loss)
accuracies[split].append(acc)
writer.add_scalar(split+'/Loss', loss, step)
writer.add_scalar(split+'/Accuracy', acc, step)
logging.debug(f"{name} loss, accuracy: {loss:.6f}, {acc:.6f}")
# Compute initial stats
t1 = time.time()
init_loss, init_accuracy = test_split(net, dataset.val_loader, dataset.loss)
log_stats('Initial', 'Val', init_loss, init_accuracy, epoch_offset)
for epoch in range(epochs):
logging.debug('Starting epoch ' + str(epoch+epoch_offset))
for step, data in enumerate(dataset.train_loader, 0):
# Get the inputs
batch_xs, batch_ys = data
batch_xs, batch_ys = batch_xs.to(device), batch_ys.to(device)
optimizer.zero_grad() # Zero the gradient buffers
output = net(batch_xs)
train_loss, train_accuracy = dataset.loss(output, batch_ys)
train_loss += net.loss()
train_loss.backward()
optimizer.step()
# Log training every log_freq steps
total_step = (epoch + epoch_offset)*len(dataset.train_loader) + step+1
if total_step % log_freq == 0:
logging.debug(('Time: ', time.time() - t1))
t1 = time.time()
logging.debug(('Training step: ', total_step))
log_stats('Train', 'Train', train_loss.data.item(), train_accuracy.data.item(), total_step)
# Validate and checkpoint by epoch
# Test on validation set
val_loss, val_accuracy = test_split(net, dataset.val_loader, dataset.loss)
log_stats('Validation', 'Val', val_loss, val_accuracy, epoch+epoch_offset+1)
# Update LR
lr_scheduler.step()
for param_group in optimizer.param_groups:
logging.debug('Current LR: ' + str(param_group['lr']))
# Record best model
if val_accuracy > best_val_acc:
if save_model:
save_path = os.path.join(checkpoint_path, 'best')
with open(save_path, 'wb') as f:
torch.save(net.state_dict(), f)
logging.debug(("Best model saved in file: %s" % save_path))
best_val_save = save_path
else:
test_loss, test_accuracy = test_split(net, dataset.test_loader, dataset.loss)
test_loss_of_best_val = test_loss
test_acc_of_best_val = test_accuracy
best_val_acc = val_accuracy
# Save last checkpoint
if save_model:
save_path = os.path.join(checkpoint_path, 'last')
with open(save_path, 'wb') as f:
torch.save(net.state_dict(), f)
logging.debug(("Last model saved in file: %s" % save_path))
# Test trained model
if test:
if save_model:
# Load net from best validation
if best_val_save is not None: net.load_state_dict(torch.load(best_val_save))
logging.debug(f'Loaded best validation checkpoint from: {best_val_save}')
test_loss, test_accuracy = test_split(net, dataset.test_loader, dataset.loss)
log_stats('Test', 'Test', test_loss, test_accuracy, 0)
else:
log_stats('Test', 'Test', test_loss_of_best_val, test_acc_of_best_val, 0)
train_loss, train_accuracy = test_split(net, dataset.train_loader, dataset.loss)
# Log best validation accuracy and training acc for that model
writer.add_scalar('MaxAcc/Val', best_val_acc)
writer.add_scalar('MaxAcc/Train', train_accuracy)
writer.export_scalars_to_json(os.path.join(log_path, "all_scalars.json"))
writer.close()
pkl.dump(losses, open(result_path + '_losses.p', 'wb'), protocol=2)
pkl.dump(accuracies, open(result_path + '_accuracies.p', 'wb'), protocol=2)
logging.debug('Saved losses and accuracies to: ' + result_path)
return losses, accuracies
|
structured-nets-master
|
pytorch/learning/train.py
|
import numpy as np
from learning import train
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def generate_mask(W, prune_factor):
weights = W.W.cpu().data.numpy()
N = int(weights.size/prune_factor)
# Get indices of N highest magnitude weights
idx = np.abs(weights.flatten()).argsort()[-N:][::-1]
Z = np.zeros(weights.size)
Z[idx] = 1
Z = Z.reshape(weights.shape)
return Z
def set_masks(net, prune_factor, device):
for layer in net.layers:
mask = generate_mask(layer, prune_factor)
layer.set_mask(mask, device)
def prune(dataset, net, optimizer, lr_scheduler, epochs, log_freq, log_path, checkpoint_path, result_path, test, save, prune_lr_decay, prune_factor, prune_iters):
# Initial training
train.train(dataset, net, optimizer, lr_scheduler, epochs, log_freq, log_path, checkpoint_path, result_path, 0, save)
for i in range(prune_iters):
set_masks(net, prune_factor, device)
# Update learning rate
for param_group in optimizer.param_groups:
param_group['lr'] = prune_lr_decay*param_group['lr']
# Retrain
train.train(dataset, net, optimizer, lr_scheduler, epochs, log_freq, log_path, checkpoint_path, result_path, test, save, (i+1)*epochs)
|
structured-nets-master
|
pytorch/learning/prune.py
|
import numpy as np
import os, sys
sys.path.insert(0, '../../pytorch/')
import torch
from torch_utils import *
from torch.autograd import Variable
import torch.optim as optim
from torchtext import data, datasets
import spacy
from tensorboardX import SummaryWriter
sys.path.insert(0, '../../pytorch/attention/')
from attention import *
sys.path.insert(0, '../../')
from dataset import *
import pickle as pkl
spacy_de = spacy.load('de')
spacy_en = spacy.load('en')
def tokenize_de(text):
return [tok.text for tok in spacy_de.tokenizer(text)]
def tokenize_en(text):
return [tok.text for tok in spacy_en.tokenizer(text)]
def create_src_tgt():
BOS_WORD = '<s>'
EOS_WORD = '</s>'
BLANK_WORD = "<blank>"
SRC = data.Field(tokenize=tokenize_de, pad_token=BLANK_WORD)
TGT = data.Field(tokenize=tokenize_en, init_token = BOS_WORD,
eos_token = EOS_WORD, pad_token=BLANK_WORD)
MAX_LEN = 100
train, val, test = datasets.IWSLT.splits(
exts=('.de', '.en'), fields=(SRC, TGT),
filter_pred=lambda x: len(vars(x)['src']) <= MAX_LEN and
len(vars(x)['trg']) <= MAX_LEN)
MIN_FREQ = 2
SRC.build_vocab(train.src, min_freq=MIN_FREQ)
TGT.build_vocab(train.trg, min_freq=MIN_FREQ)
return SRC, TGT, train, val, test
# Duplicated
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
losses = []
for i, batch in enumerate(data_iter):
batch.src, batch.trg, batch.src_mask, batch.trg_mask = batch.src.cuda(), batch.trg.cuda(), batch.src_mask.cuda(), batch.trg_mask.cuda()
#print('batch.src:', batch.src)
#print('batch.trg: ', batch.trg)
#print('batch.src_mask: ', batch.src_mask)
#print('batch.trg_mask: ', batch.trg_mask)
#quit()
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens)
total_loss += loss
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print(("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / batch.ntokens, tokens / elapsed)))
start = time.time()
tokens = 0
losses.append(loss/batch.ntokens)
return total_loss / total_tokens, losses
def compute_bleu(model):
for i, batch in enumerate(valid_iter):
src = batch.src.transpose(0, 1)[:1]
src_mask = (src != SRC.vocab.stoi["<blank>"]).unsqueeze(-2)
out = greedy_decode(model, src, src_mask,max_len=60, start_symbol=TGT.vocab.stoi["<s>"])
print("Translation:", end="\t")
for i in range(1, out.size(1)):
sym = TGT.vocab.itos[out[0, i]]
if sym == "</s>": break
print(sym, end =" ")
print()
print("Target:", end="\t")
for i in range(1, batch.trg.size(0)):
sym = TGT.vocab.itos[batch.trg.data[i, 0]]
if sym == "</s>": break
print(sym, end =" ")
print()
break
def optimize_iwslt(dataset, params):
SRC, TGT, train, val, test = create_src_tgt()
pad_idx = TGT.vocab.stoi["<blank>"]
model = make_model(params, len(SRC.vocab), len(TGT.vocab), N=1)
model.cuda()
print((torch.cuda.get_device_name(0)))
for name, param in model.named_parameters():
if param.requires_grad:
print(('Parameter name, shape: ', name, param.data.shape))
writer = SummaryWriter(params.log_path)
losses = {'train': [], 'val': [], 'DR': [], 'ratio': []}
accuracies = {'train': [], 'val': []}
criterion = LabelSmoothing(size=len(TGT.vocab), padding_idx=pad_idx, smoothing=0.1)
criterion.cuda()
train_iter = MyIterator(train, batch_size=params.batch_size, device=0,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=True)
valid_iter = MyIterator(val, batch_size=params.batch_size, device=0,
repeat=False, sort_key=lambda x: (len(x.src), len(x.trg)),
batch_size_fn=batch_size_fn, train=False)
model_opt = NoamOpt(model.src_embed[0].d_model, 1, 2000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
for epoch in range(params.steps):
model.train()
train_total_loss_fraction, train_losses = run_epoch((rebatch(pad_idx, b) for b in train_iter),
model,
SimpleLossCompute(model.generator, criterion, model_opt))
losses['train'] += train_losses
print('Train, epoch: ', train_total_loss_fraction, epoch)
writer.add_scalar('Train/Loss', train_total_loss_fraction, epoch)
model.eval()
val_total_loss_fraction, val_losses = run_epoch((rebatch(pad_idx, b) for b in valid_iter),
model,
SimpleLossCompute(model.generator, criterion, None))
losses['val'] += val_losses
print('Val, epoch: ', val_total_loss_fraction, epoch)
writer.add_scalar('Val/Loss', val_total_loss_fraction, epoch)
print(loss)
"""
losses['train'] += train_losses
print('Train, epoch: ', train_total_loss_fraction, epoch)
writer.add_scalar('Train/Loss', train_total_loss_fraction, epoch)
val_total_loss_fraction, val_losses = run_epoch(writer, data_gen(V, 30, 5), model,
SimpleLossCompute(model.generator, criterion, None))
losses['val'] += val_losses
print('Val, epoch: ', val_total_loss_fraction, epoch)
writer.add_scalar('Val/Loss', val_total_loss_fraction, epoch)
# Checkpoint
save_path = os.path.join(params.checkpoint_path, str(epoch))
with open(save_path, 'wb') as f:
torch.save(model.state_dict(), f)
print(("Model saved in file: %s" % save_path))
"""
writer.export_scalars_to_json(os.path.join(params.log_path, "all_scalars.json"))
writer.close()
return losses, accuracies
|
structured-nets-master
|
pytorch/old/misc/attention/optimize_iwslt.py
|
"""
http://nlp.seas.harvard.edu/2018/04/03/attention.html
"""
import sys
sys.path.insert(0, '../')
from structured_layer import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import copy, math
import numpy as np
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask):
for layer in self.layers:
x = layer(x, memory, src_mask, tgt_mask)
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
print('self_atten: ', self_attn)
print('src_attn:', src_attn)
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask):
"Follow Figure 1 (right) for connections."
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, -1e9)
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, params, h, d_model, structured, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
print(('d_model, h, d_k: ', d_model, h, self.d_k))
if structured:
self.linears = clones(nn.Linear(d_model, d_model), 3)
self.linears.append(StructuredLinear(params))
else:
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2) *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + Variable(self.pe[:, :x.size(1)],
requires_grad=False)
return self.dropout(x)
def make_model(params, src_vocab, tgt_vocab, N=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(params, h, d_model, False)
structured_attn = MultiHeadedAttention(params, h, d_model, True)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(structured_attn), c(ff), dropout), N),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N),
nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform(p)
return model
if __name__ == '__main__':
tmp_model = make_model(10, 10, 2)
print(tmp_model)
|
structured-nets-master
|
pytorch/old/misc/attention/attention.py
|
import numpy as np
import os, sys
sys.path.insert(0, '../../pytorch/')
import torch
from torch_utils import *
from torch.autograd import Variable
import torch.optim as optim
from tensorboardX import SummaryWriter
sys.path.insert(0, '../../pytorch/attention/')
from attention import *
sys.path.insert(0, '../../')
from dataset import *
def run_epoch(writer, data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
losses = []
for i, batch in enumerate(data_iter):
batch.src, batch.trg, batch.src_mask, batch.trg_mask = batch.src.cuda(), batch.trg.cuda(), batch.src_mask.cuda(), batch.trg_mask.cuda()
#print('batch.src:', batch.src)
#print('batch.trg: ', batch.trg)
#print('batch.src_mask: ', batch.src_mask)
#print('batch.trg_mask: ', batch.trg_mask)
#quit()
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens)
total_loss += loss
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print(("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / batch.ntokens, tokens / elapsed)))
start = time.time()
tokens = 0
losses.append(loss/batch.ntokens)
return total_loss / total_tokens, losses
def optimize_nmt(dataset, params):
V = 11
writer = SummaryWriter(params.log_path)
losses = {'train': [], 'val': [], 'DR': [], 'ratio': []}
accuracies = {'train': [], 'val': []}
criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)
model = make_model(params, V, V, N=1)
for name, param in model.named_parameters():
if param.requires_grad:
print(('Parameter name, shape: ', name, param.data.shape))
model_opt = NoamOpt(model.src_embed[0].d_model, 1, 400,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
model.cuda()
for epoch in range(params.steps):
model.train()
train_total_loss_fraction, train_losses = run_epoch(writer, data_gen(V, 30, 20), model,
SimpleLossCompute(model.generator, criterion, model_opt))
losses['train'] += train_losses
print('Train, epoch: ', train_total_loss_fraction, epoch)
writer.add_scalar('Train/Loss', train_total_loss_fraction, epoch)
model.eval()
val_total_loss_fraction, val_losses = run_epoch(writer, data_gen(V, 30, 5), model,
SimpleLossCompute(model.generator, criterion, None))
losses['val'] += val_losses
print('Val, epoch: ', val_total_loss_fraction, epoch)
writer.add_scalar('Val/Loss', val_total_loss_fraction, epoch)
# Checkpoint
save_path = os.path.join(params.checkpoint_path, str(epoch))
with open(save_path, 'wb') as f:
torch.save(model.state_dict(), f)
print(("Model saved in file: %s" % save_path))
writer.export_scalars_to_json(os.path.join(params.log_path, "all_scalars.json"))
writer.close()
return losses, accuracies
|
structured-nets-master
|
pytorch/old/misc/attention/optimize_nmt.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math, copy, time
from torch.autograd import Variable
import matplotlib.pyplot as plt
import numpy as np
from attention import *
from torchtext import data, datasets
# Skip if not interested in multigpu.
class MultiGPULossCompute:
"A multi-gpu loss compute and train function."
def __init__(self, generator, criterion, devices, opt=None, chunk_size=5):
# Send out to different gpus.
self.generator = generator
self.criterion = nn.parallel.replicate(criterion,
devices=devices)
self.opt = opt
self.devices = devices
self.chunk_size = chunk_size
def __call__(self, out, targets, normalize):
total = 0.0
generator = nn.parallel.replicate(self.generator,
devices=self.devices)
out_scatter = nn.parallel.scatter(out,
target_gpus=self.devices)
out_grad = [[] for _ in out_scatter]
targets = nn.parallel.scatter(targets,
target_gpus=self.devices)
# Divide generating into chunks.
chunk_size = self.chunk_size
for i in range(0, out_scatter[0].size(1), chunk_size):
# Predict distributions
out_column = [[Variable(o[:, i:i+chunk_size].data,
requires_grad=self.opt is not None)]
for o in out_scatter]
gen = nn.parallel.parallel_apply(generator, out_column)
# Compute loss.
y = [(g.contiguous().view(-1, g.size(-1)),
t[:, i:i+chunk_size].contiguous().view(-1))
for g, t in zip(gen, targets)]
loss = nn.parallel.parallel_apply(self.criterion, y)
# Sum and normalize loss
l = nn.parallel.gather(loss,
target_device=self.devices[0])
l = l.sum()[0] / normalize
total += l.data[0]
# Backprop loss to output of transformer
if self.opt is not None:
l.backward()
for j, l in enumerate(loss):
out_grad[j].append(out_column[j][0].grad.data.clone())
# Backprop all loss through transformer.
if self.opt is not None:
out_grad = [Variable(torch.cat(og, dim=1)) for og in out_grad]
o1 = out
o2 = nn.parallel.gather(out_grad,
target_device=self.devices[0])
o1.backward(gradient=o2)
self.opt.step()
self.opt.optimizer.zero_grad()
return total * normalize
class MyIterator(data.Iterator):
def create_batches(self):
if self.train:
def pool(d, random_shuffler):
for p in data.batch(d, self.batch_size * 100):
p_batch = data.batch(
sorted(p, key=self.sort_key),
self.batch_size, self.batch_size_fn)
for b in random_shuffler(list(p_batch)):
yield b
self.batches = pool(self.data(), self.random_shuffler)
else:
self.batches = []
for b in data.batch(self.data(), self.batch_size,
self.batch_size_fn):
self.batches.append(sorted(b, key=self.sort_key))
def rebatch(pad_idx, batch):
"Fix order in torchtext to match ours"
src, trg = batch.src.transpose(0, 1), batch.trg.transpose(0, 1)
return Batch(src, trg, pad_idx)
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size, padding_idx, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.criterion(x, Variable(true_dist, requires_grad=False))
class Batch:
"Object for holding a batch of data with mask during training."
def __init__(self, src, trg=None, pad=0):
self.src = src
self.src_mask = (src != pad).unsqueeze(-2)
if trg is not None:
self.trg = trg[:, :-1]
self.trg_y = trg[:, 1:]
self.trg_mask = \
self.make_std_mask(self.trg, pad)
self.ntokens = (self.trg_y != pad).data.sum()
@staticmethod
def make_std_mask(tgt, pad):
"Create a mask to hide padding and future words."
tgt_mask = (tgt != pad).unsqueeze(-2)
tgt_mask = tgt_mask & Variable(
subsequent_mask(tgt.size(-1)).type_as(tgt_mask.data))
return tgt_mask
def run_epoch(data_iter, model, loss_compute):
"Standard Training and Logging Function"
start = time.time()
total_tokens = 0
total_loss = 0
tokens = 0
for i, batch in enumerate(data_iter):
out = model.forward(batch.src, batch.trg,
batch.src_mask, batch.trg_mask)
loss = loss_compute(out, batch.trg_y, batch.ntokens)
total_loss += loss
total_tokens += batch.ntokens
tokens += batch.ntokens
if i % 50 == 1:
elapsed = time.time() - start
print(("Epoch Step: %d Loss: %f Tokens per Sec: %f" %
(i, loss / batch.ntokens, tokens / elapsed)))
start = time.time()
tokens = 0
return total_loss / total_tokens
global max_src_in_batch, max_tgt_in_batch
def batch_size_fn(new, count, sofar):
"Keep augmenting batch and calculate total number of tokens + padding."
global max_src_in_batch, max_tgt_in_batch
if count == 1:
max_src_in_batch = 0
max_tgt_in_batch = 0
max_src_in_batch = max(max_src_in_batch, len(new.src))
max_tgt_in_batch = max(max_tgt_in_batch, len(new.trg) + 2)
src_elements = count * max_src_in_batch
tgt_elements = count * max_tgt_in_batch
return max(src_elements, tgt_elements)
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
def data_gen(V, batch, nbatches):
"Generate random data for a src-tgt copy task."
for i in range(nbatches):
data = torch.from_numpy(np.random.randint(1, V, size=(batch, 10)))
data[:, 0] = 1
src = Variable(data, requires_grad=False)
tgt = Variable(data, requires_grad=False)
yield Batch(src, tgt, 0)
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
loss.backward()
if self.opt is not None:
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data[0] * norm
if __name__ == '__main__':
"""
opts = [NoamOpt(512, 1, 4000, None),
NoamOpt(512, 1, 8000, None),
NoamOpt(256, 1, 4000, None)]
plt.plot(np.arange(1, 20000), [[opt.rate(i) for opt in opts] for i in range(1, 20000)])
plt.legend(["512:4000", "512:8000", "256:4000"])
plt.show()
crit = LabelSmoothing(5, 0, 0.4)
predict = torch.FloatTensor([[0, 0.2, 0.7, 0.1, 0],
[0, 0.2, 0.7, 0.1, 0],
[0, 0.2, 0.7, 0.1, 0]])
v = crit(Variable(predict.log()),
Variable(torch.LongTensor([2, 1, 0])))
# Show the target distributions expected by the system.
plt.imshow(crit.true_dist)
plt.show()
crit = LabelSmoothing(5, 0, 0.1)
def loss(x):
d = x + 3 * 1
predict = torch.FloatTensor([[0, x / d, 1 / d, 1 / d, 1 / d],
])
#print(predict)
return crit(Variable(predict.log()),
Variable(torch.LongTensor([1]))).data[0]
plt.plot(np.arange(1, 100), [loss(x) for x in range(1, 100)])
plt.show()
"""
# Train the simple copy task.
V = 11
criterion = LabelSmoothing(size=V, padding_idx=0, smoothing=0.0)
model = make_model(V, V, N=2)
for name, param in model.named_parameters():
if param.requires_grad:
print(name, param.data.shape)
quit()
model_opt = NoamOpt(model.src_embed[0].d_model, 1, 400,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
for epoch in range(10):
model.train()
run_epoch(data_gen(V, 30, 20), model,
SimpleLossCompute(model.generator, criterion, model_opt))
model.eval()
print((run_epoch(data_gen(V, 30, 5), model,
SimpleLossCompute(model.generator, criterion, None))))
|
structured-nets-master
|
pytorch/old/misc/attention/train.py
|
import copy
import numpy as np
import torch
import torch.nn.functional as F
from torchvision import transforms
from torch.autograd import Variable
use_cuda = torch.cuda.is_available()
def get_train_valid_datasets(dataset,
valid_size=0.1,
random_seed=None,
shuffle=True):
"""
Utility function for loading and returning train and validation
datasets.
Parameters:
------
- dataset: the dataset, need to have train_data and train_labels attributes.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- random_seed: fix seed for reproducibility.
- shuffle: whether to shuffle the train/validation indices.
Returns:
-------
- train_dataset: training set.
- valid_dataset: validation set.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
num_train = len(dataset)
indices = list(range(num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
split = int(np.floor(valid_size * num_train))
train_idx, valid_idx = indices[split:], indices[:split]
train_dataset, valid_dataset = copy.copy(dataset), copy.copy(dataset)
train_dataset.train_data = train_dataset.train_data[train_idx]
train_dataset.train_labels = train_dataset.train_labels[train_idx]
valid_dataset.train_data = valid_dataset.train_data[valid_idx]
valid_dataset.train_labels = valid_dataset.train_labels[valid_idx]
return train_dataset, valid_dataset
def copy_with_new_transform(dataset, transform):
"""A copy of @dataset with its transform set to @transform.
"""
new_dataset = copy.copy(dataset)
new_dataset.transform = transform
return new_dataset
def augment_transforms(augmentations, base_transform, add_id_transform=True):
"""Construct a new transform that stack all the augmentations.
Parameters:
augmentations: list of transforms (e.g. image rotations)
base_transform: transform to be applied after augmentation (e.g. ToTensor)
add_id_transform: whether to include the original image (i.e. identity transform) in the new transform.
Return:
a new transform that takes in a data point and applies all the
augmentations, then stack the result.
"""
if add_id_transform:
fn = lambda x: torch.stack([base_transform(x)] + [base_transform(aug(x))
for aug in augmentations])
else:
fn = lambda x: torch.stack([base_transform(aug(x)) for aug in augmentations])
return transforms.Lambda(fn)
def train(data_loader, model, optimizer):
model.train()
train_loss, train_acc = [], []
for data, target in data_loader:
if use_cuda:
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
optimizer.zero_grad()
output = model(data)
pred = model.predict(output)
loss = model.loss(output, target)
loss.backward()
optimizer.step()
acc = (pred == target.data).sum() / target.data.size(0)
train_loss.append(loss.data)
train_acc.append(acc)
return train_loss, train_acc
def train_models_compute_agreement(data_loader, models, optimizers):
train_agreement = []
for model in models:
model.train()
for data, target in data_loader:
if use_cuda:
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
pred, loss = [], []
for model, optimizer in zip(models[:8], optimizers[:8]):
optimizer.zero_grad()
output = model(data)
pred.append(model.predict(output))
loss_minibatch = model.loss(output, target)
loss_minibatch.backward()
optimizer.step()
loss.append(loss_minibatch.data)
loss = np.array(loss)
pred = np.array([p.cpu().numpy() for p in pred])
train_agreement.append((pred == pred[0]).mean(axis=1))
return train_agreement
def train_all_epochs(train_loader,
valid_loader,
model,
optimizer,
n_epochs,
verbose=True):
model.train()
train_loss, train_acc, valid_acc = [], [], []
for epoch in range(n_epochs):
if verbose:
print(f'Train Epoch: {epoch}')
loss, acc = train(train_loader, model, optimizer)
train_loss += loss
train_acc += acc
correct, total = accuracy(valid_loader, model)
valid_acc.append(correct / total)
if verbose:
print(
f'Validation set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)'
)
return train_loss, train_acc, valid_acc
def accuracy(data_loader, model):
"""Accuracy over all mini-batches.
"""
training = model.training
model.eval()
correct, total = 0, 0
for data, target in data_loader:
if use_cuda:
data, target = Variable(
data.cuda(), volatile=True), Variable(target.cuda())
else:
data, target = Variable(data, volatile=True), Variable(target)
output = model(data)
pred = model.predict(output)
correct += (pred == target.data).sum()
total += target.size(0)
model.train(training)
return correct, total
def all_losses(data_loader, model):
"""All losses over all mini-batches.
"""
training = model.training
model.eval()
losses = []
for data, target in data_loader:
if use_cuda:
data, target = Variable(
data.cuda(), volatile=True), Variable(target.cuda())
else:
data, target = Variable(data, volatile=True), Variable(target)
losses.append([l.data[0] for l in model.all_losses(data, target)])
model.train(training)
return np.array(losses)
def agreement_kl_accuracy(data_loader, models):
training = [model.training for model in models]
for model in models:
model.eval()
valid_agreement, valid_acc, valid_kl = [], [], []
for data, target in data_loader:
if use_cuda:
data, target = Variable(data.cuda()), Variable(target.cuda())
else:
data, target = Variable(data), Variable(target)
pred, out = [], []
for model in models:
output = model(data)
out.append(output)
pred.append(model.predict(output))
pred = torch.stack(pred)
out = torch.stack(out)
log_prob = F.log_softmax(out, dim=-1)
prob = F.softmax(out[0], dim=-1).detach()
valid_kl.append([F.kl_div(lp, prob, size_average=False).data.cpu() / prob.size(0) for lp in log_prob])
valid_acc.append((pred == target.data).float().mean(dim=1).cpu().numpy())
valid_agreement.append((pred == pred[0]).float().mean(dim=1).cpu().numpy())
for model, training_ in zip(models, training):
model.train(training_)
return valid_agreement, valid_kl, valid_acc
|
structured-nets-master
|
pytorch/old/misc/circtest/utils.py
|
import numpy as np
import math
import torch
import torch.optim as optim
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torchvision import datasets, transforms
from torch import autograd
from torch.autograd import Variable
from utils import get_train_valid_datasets, train, train_all_epochs, accuracy, all_losses
use_cuda = torch.cuda.is_available()
class TwoLayerNet(nn.Module):
def __init__(self, n_features, n_classes):
super().__init__()
self.fc1 = nn.Linear(n_features, n_features)
self.fc2 = nn.Linear(n_features, n_classes)
def forward(self, x):
feat = F.relu(self.fc1(x.view(x.size(0), -1)))
return self.fc2(feat)
@staticmethod
def loss(output, target, reduce=True):
return F.cross_entropy(output, target, reduce=reduce)
@staticmethod
def predict(output):
return output.data.max(1)[1]
class Circulant(nn.Module):
def __init__(self, in_features, bias=True):
super().__init__()
self.in_features = in_features
self.weight = Parameter(torch.Tensor(in_features))
if bias:
self.bias = Parameter(torch.Tensor(in_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(0))
self.weight.data.uniform_(-stdv, stdv)
if self.bias is not None:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
return F.linear(input, Krylov(shift, self.weight).t(), self.bias)
class TwoLayerCirculant(nn.Module):
def __init__(self, n_features, n_classes):
super().__init__()
self.fc1 = Circulant(n_features, bias=True)
self.fc2 = nn.Linear(n_features, n_classes)
def forward(self, x):
feat = F.relu(self.fc1(x.view(x.size(0), -1)))
return self.fc2(feat)
@staticmethod
def loss(output, target, reduce=True):
return F.cross_entropy(output, target, reduce=reduce)
@staticmethod
def predict(output):
return output.data.max(1)[1]
def shift(v, f=1):
return torch.cat((f * v[[v.size(0) - 1]], v[:-1]))
def Krylov(linear_map, v, n=None):
if n is None:
n = v.size(0)
cols = [v]
for _ in range(n - 1):
v = linear_map(v)
cols.append(v)
return torch.stack(cols, dim=-1)
batch_size = 256
if use_cuda:
loader_args = {'num_workers': 8, 'pin_memory': True}
else:
loader_args = {'num_workers': 1, 'pin_memory': False}
def loader_from_dataset(dataset):
return torch.utils.data.DataLoader(dataset, batch_size=batch_size,
shuffle=True, **loader_args)
mnist_normalize = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))
])
mnist_train = datasets.MNIST(
'../data', train=True, download=True, transform=mnist_normalize)
# Just for faster training on CPU
# mnist_train.train_data = mnist_train.train_data[:5000]
mnist_test = datasets.MNIST(
'data', train=False, download=True, transform=mnist_normalize)
mnist_train, mnist_valid = get_train_valid_datasets(mnist_train)
train_loader = loader_from_dataset(mnist_train)
valid_loader = loader_from_dataset(mnist_valid)
test_loader = loader_from_dataset(mnist_test)
n_features = 28 * 28
n_classes = 10
gamma = 0.003 # Best gamma is around 0.001--0.003
n_components = 10000
sgd_n_epochs = 15
def sgd_opt_from_model(model, learning_rate=0.01, momentum=0.9, weight_decay=0.001):
return optim.SGD((p for p in model.parameters() if p.requires_grad),
lr=learning_rate, momentum=momentum,
weight_decay=weight_decay)
# model = TwoLayerNet(n_features, n_classes)
model = TwoLayerCirculant(n_features, n_classes)
optimizer = sgd_opt_from_model(model)
train_loss, train_acc, valid_acc = train_all_epochs(train_loader, valid_loader, model, optimizer, sgd_n_epochs)
correct, total = accuracy(test_loader, model)
print(f'Test set: Accuracy: {correct}/{total} ({correct/total*100:.4f}%)\n')
|
structured-nets-master
|
pytorch/old/misc/circtest/circulant.py
|
# -*- coding: utf-8 -*-
"""
Classifying Names with a Character-Level RNN
*********************************************
**Author**: `Sean Robertson <https://github.com/spro/practical-pytorch>`_
We will be building and training a basic character-level RNN to classify
words. A character-level RNN reads words as a series of characters -
outputting a prediction and "hidden state" at each step, feeding its
previous hidden state into each next step. We take the final prediction
to be the output, i.e. which class the word belongs to.
Specifically, we'll train on a few thousand surnames from 18 languages
of origin, and predict which language a name is from based on the
spelling:
::
$ python predict.py Hinton
(-0.47) Scottish
(-1.52) English
(-3.57) Irish
$ python predict.py Schmidhuber
(-0.19) German
(-2.48) Czech
(-2.68) Dutch
**Recommended Reading:**
I assume you have at least installed PyTorch, know Python, and
understand Tensors:
- http://pytorch.org/ For installation instructions
- :doc:`/beginner/deep_learning_60min_blitz` to get started with PyTorch in general
- :doc:`/beginner/pytorch_with_examples` for a wide and deep overview
- :doc:`/beginner/former_torchies_tutorial` if you are former Lua Torch user
It would also be useful to know about RNNs and how they work:
- `The Unreasonable Effectiveness of Recurrent Neural
Networks <http://karpathy.github.io/2015/05/21/rnn-effectiveness/>`__
shows a bunch of real life examples
- `Understanding LSTM
Networks <http://colah.github.io/posts/2015-08-Understanding-LSTMs/>`__
is about LSTMs specifically but also informative about RNNs in
general
Preparing the Data
==================
.. Note::
Download the data from
`here <https://download.pytorch.org/tutorial/data.zip>`_
and extract it to the current directory.
Included in the ``data/names`` directory are 18 text files named as
"[Language].txt". Each file contains a bunch of names, one name per
line, mostly romanized (but we still need to convert from Unicode to
ASCII).
We'll end up with a dictionary of lists of names per language,
``{language: [names ...]}``. The generic variables "category" and "line"
(for language and name in our case) are used for later extensibility.
"""
from __future__ import unicode_literals, print_function, division
from io import open
import glob
def findFiles(path): return glob.glob(path)
print(findFiles('data/names/*.txt'))
import unicodedata
import string
all_letters = string.ascii_letters + " .,;'"
n_letters = len(all_letters)
# Turn a Unicode string to plain ASCII, thanks to http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
and c in all_letters
)
print(unicodeToAscii('Ślusàrski'))
# Build the category_lines dictionary, a list of names per language
category_lines = {}
all_categories = []
# Read a file and split into lines
def readLines(filename):
lines = open(filename, encoding='utf-8').read().strip().split('\n')
return [unicodeToAscii(line) for line in lines]
for filename in findFiles('data/names/*.txt'):
category = filename.split('/')[-1].split('.')[0]
all_categories.append(category)
lines = readLines(filename)
category_lines[category] = lines
n_categories = len(all_categories)
######################################################################
# Now we have ``category_lines``, a dictionary mapping each category
# (language) to a list of lines (names). We also kept track of
# ``all_categories`` (just a list of languages) and ``n_categories`` for
# later reference.
#
print(category_lines['Italian'][:5])
######################################################################
# Turning Names into Tensors
# --------------------------
#
# Now that we have all the names organized, we need to turn them into
# Tensors to make any use of them.
#
# To represent a single letter, we use a "one-hot vector" of size
# ``<1 x n_letters>``. A one-hot vector is filled with 0s except for a 1
# at index of the current letter, e.g. ``"b" = <0 1 0 0 0 ...>``.
#
# To make a word we join a bunch of those into a 2D matrix
# ``<line_length x 1 x n_letters>``.
#
# That extra 1 dimension is because PyTorch assumes everything is in
# batches - we're just using a batch size of 1 here.
#
import torch
# Find letter index from all_letters, e.g. "a" = 0
def letterToIndex(letter):
return all_letters.find(letter)
# Just for demonstration, turn a letter into a <1 x n_letters> Tensor
def letterToTensor(letter):
tensor = torch.zeros(1, n_letters)
tensor[0][letterToIndex(letter)] = 1
return tensor
# Turn a line into a <line_length x 1 x n_letters>,
# or an array of one-hot letter vectors
def lineToTensor(line):
tensor = torch.zeros(len(line), 1, n_letters)
for li, letter in enumerate(line):
tensor[li][0][letterToIndex(letter)] = 1
return tensor
print(letterToTensor('J'))
print(lineToTensor('Jones').size())
######################################################################
# Creating the Network
# ====================
#
# Before autograd, creating a recurrent neural network in Torch involved
# cloning the parameters of a layer over several timesteps. The layers
# held hidden state and gradients which are now entirely handled by the
# graph itself. This means you can implement a RNN in a very "pure" way,
# as regular feed-forward layers.
#
# This RNN module (mostly copied from `the PyTorch for Torch users
# tutorial <http://pytorch.org/tutorials/beginner/former_torchies/
# nn_tutorial.html#example-2-recurrent-net>`__)
# is just 2 linear layers which operate on an input and hidden state, with
# a LogSoftmax layer after the output.
#
# .. figure:: https://i.imgur.com/Z2xbySO.png
# :alt:
#
#
import torch.nn as nn
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
n_hidden = 128
rnn = RNN(n_letters, n_hidden, n_categories)
######################################################################
# To run a step of this network we need to pass an input (in our case, the
# Tensor for the current letter) and a previous hidden state (which we
# initialize as zeros at first). We'll get back the output (probability of
# each language) and a next hidden state (which we keep for the next
# step).
#
input = letterToTensor('A')
hidden =torch.zeros(1, n_hidden)
output, next_hidden = rnn(input, hidden)
######################################################################
# For the sake of efficiency we don't want to be creating a new Tensor for
# every step, so we will use ``lineToTensor`` instead of
# ``letterToTensor`` and use slices. This could be further optimized by
# pre-computing batches of Tensors.
#
input = lineToTensor('Albert')
hidden = torch.zeros(1, n_hidden)
output, next_hidden = rnn(input[0], hidden)
print(output)
######################################################################
# As you can see the output is a ``<1 x n_categories>`` Tensor, where
# every item is the likelihood of that category (higher is more likely).
#
######################################################################
#
# Training
# ========
# Preparing for Training
# ----------------------
#
# Before going into training we should make a few helper functions. The
# first is to interpret the output of the network, which we know to be a
# likelihood of each category. We can use ``Tensor.topk`` to get the index
# of the greatest value:
#
def categoryFromOutput(output):
top_n, top_i = output.topk(1)
category_i = top_i[0].item()
return all_categories[category_i], category_i
print(categoryFromOutput(output))
######################################################################
# We will also want a quick way to get a training example (a name and its
# language):
#
import random
def randomChoice(l):
return l[random.randint(0, len(l) - 1)]
def randomTrainingExample():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
category_tensor = torch.tensor([all_categories.index(category)], dtype=torch.long)
line_tensor = lineToTensor(line)
return category, line, category_tensor, line_tensor
for i in range(10):
category, line, category_tensor, line_tensor = randomTrainingExample()
print('category =', category, '/ line =', line)
######################################################################
# Training the Network
# --------------------
#
# Now all it takes to train this network is show it a bunch of examples,
# have it make guesses, and tell it if it's wrong.
#
# For the loss function ``nn.NLLLoss`` is appropriate, since the last
# layer of the RNN is ``nn.LogSoftmax``.
#
criterion = nn.NLLLoss()
######################################################################
# Each loop of training will:
#
# - Create input and target tensors
# - Create a zeroed initial hidden state
# - Read each letter in and
#
# - Keep hidden state for next letter
#
# - Compare final output to target
# - Back-propagate
# - Return the output and loss
#
learning_rate = 0.005 # If you set this too high, it might explode. If too low, it might not learn
def train(category_tensor, line_tensor):
hidden = rnn.initHidden()
rnn.zero_grad()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
loss = criterion(output, category_tensor)
loss.backward()
# Add parameters' gradients to their values, multiplied by learning rate
for p in rnn.parameters():
p.data.add_(-learning_rate, p.grad.data)
return output, loss.item()
######################################################################
# Now we just have to run that with a bunch of examples. Since the
# ``train`` function returns both the output and loss we can print its
# guesses and also keep track of loss for plotting. Since there are 1000s
# of examples we print only every ``print_every`` examples, and take an
# average of the loss.
#
import time
import math
n_iters = 100000
print_every = 5000
plot_every = 1000
# Keep track of losses for plotting
current_loss = 0
all_losses = []
def timeSince(since):
now = time.time()
s = now - since
m = math.floor(s / 60)
s -= m * 60
return '%dm %ds' % (m, s)
start = time.time()
for iter in range(1, n_iters + 1):
category, line, category_tensor, line_tensor = randomTrainingExample()
output, loss = train(category_tensor, line_tensor)
current_loss += loss
# Print iter number, loss, name and guess
if iter % print_every == 0:
guess, guess_i = categoryFromOutput(output)
correct = '✓' if guess == category else '✗ (%s)' % category
print('%d %d%% (%s) %.4f %s / %s %s' % (iter, iter / n_iters * 100, timeSince(start), loss, line, guess, correct))
# Add current loss avg to list of losses
if iter % plot_every == 0:
all_losses.append(current_loss / plot_every)
current_loss = 0
######################################################################
# Plotting the Results
# --------------------
#
# Plotting the historical loss from ``all_losses`` shows the network
# learning:
#
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.figure()
plt.plot(all_losses)
######################################################################
# Evaluating the Results
# ======================
#
# To see how well the network performs on different categories, we will
# create a confusion matrix, indicating for every actual language (rows)
# which language the network guesses (columns). To calculate the confusion
# matrix a bunch of samples are run through the network with
# ``evaluate()``, which is the same as ``train()`` minus the backprop.
#
# Keep track of correct guesses in a confusion matrix
confusion = torch.zeros(n_categories, n_categories)
n_confusion = 10000
# Just return an output given a line
def evaluate(line_tensor):
hidden = rnn.initHidden()
for i in range(line_tensor.size()[0]):
output, hidden = rnn(line_tensor[i], hidden)
return output
# Go through a bunch of examples and record which are correctly guessed
correct = 0
total = 0
for i in range(n_confusion):
category, line, category_tensor, line_tensor = randomTrainingExample()
output = evaluate(line_tensor)
guess, guess_i = categoryFromOutput(output)
category_i = all_categories.index(category)
if guess_i == category_i:
correct += 1
total += 1
confusion[category_i][guess_i] += 1
# Normalize by dividing every row by its sum
for i in range(n_categories):
confusion[i] = confusion[i] / confusion[i].sum()
# Print overall accuracy
print('Total accuracy: ', float(correct)/total)
# Set up plot
fig = plt.figure()
ax = fig.add_subplot(111)
cax = ax.matshow(confusion.numpy())
fig.colorbar(cax)
# Set up axes
ax.set_xticklabels([''] + all_categories, rotation=90)
ax.set_yticklabels([''] + all_categories)
# Force label at every tick
ax.xaxis.set_major_locator(ticker.MultipleLocator(1))
ax.yaxis.set_major_locator(ticker.MultipleLocator(1))
# sphinx_gallery_thumbnail_number = 2
plt.show()
######################################################################
# You can pick out bright spots off the main axis that show which
# languages it guesses incorrectly, e.g. Chinese for Korean, and Spanish
# for Italian. It seems to do very well with Greek, and very poorly with
# English (perhaps because of overlap with other languages).
#
######################################################################
# Running on User Input
# ---------------------
#
def predict(input_line, n_predictions=3):
print('\n> %s' % input_line)
with torch.no_grad():
output = evaluate(lineToTensor(input_line))
# Get top N categories
topv, topi = output.topk(n_predictions, 1, True)
predictions = []
for i in range(n_predictions):
value = topv[0][i].item()
category_index = topi[0][i].item()
print('(%.2f) %s' % (value, all_categories[category_index]))
predictions.append([value, all_categories[category_index]])
predict('Dovesky')
predict('Jackson')
predict('Satoshi')
######################################################################
# The final versions of the scripts `in the Practical PyTorch
# repo <https://github.com/spro/practical-pytorch/tree/master/char-rnn-classification>`__
# split the above code into a few files:
#
# - ``data.py`` (loads files)
# - ``model.py`` (defines the RNN)
# - ``train.py`` (runs training)
# - ``predict.py`` (runs ``predict()`` with command line arguments)
# - ``server.py`` (serve prediction as a JSON API with bottle.py)
#
# Run ``train.py`` to train and save the network.
#
# Run ``predict.py`` with a name to view predictions:
#
# ::
#
# $ python predict.py Hazaki
# (-0.42) Japanese
# (-1.39) Polish
# (-3.51) Czech
#
# Run ``server.py`` and visit http://localhost:5533/Yourname to get JSON
# output of predictions.
#
######################################################################
# Exercises
# =========
#
# - Try with a different dataset of line -> category, for example:
#
# - Any word -> language
# - First name -> gender
# - Character name -> writer
# - Page title -> blog or subreddit
#
# - Get better results with a bigger and/or better shaped network
#
# - Add more linear layers
# - Try the ``nn.LSTM`` and ``nn.GRU`` layers
# - Combine multiple of these RNNs as a higher level network
#
|
structured-nets-master
|
pytorch/old/misc/charRNN/char_rnn_classification_tutorial.py
|
import torch
import functools
import numpy as np
from torch.autograd import Variable
import time
# Down shift
def Z_mult_fn(f, x):
return torch.cat((f * x[-1], x[:-1]))
# Up shift
def Z_transpose_mult_fn(f, x):
#print('x[1:]: ', x[1:])
#print('f*x[0]: ', f*x[0])
#return torch.cat((x[1:], torch.FloatTensor([f * x[0]])))
return torch.cat((x[1:], f*x[0]))
# Diagonal multiplication
def diag_mult_fn(diag, x):
return diag * x
# Circulant sparsity pattern operators
def circ_mult_fn(subdiag_f, x):
# note: f corresponds to last element instead of first
y = torch.cat((x[-1], x[:-1]))
return y * subdiag_f
def circ_transpose_mult_fn(subdiag_f, x):
# Circular shift
y = torch.cat((x[1:], x[0]))
# Scale by [v f]
return y * subdiag_f
# Tridiagonal + corner operators
# TODO NEEDS FIX
def tridiag_transpose_mult_fn(subdiag_f, diag, supdiag, x):
y = torch.cat((x[1:], x[0]))
sub_result = y * subdiag_f
z = Variable(torch.zeros(1).cuda())
sup_result = torch.cat((z, x[:-1] * supdiag))
diag_result = x*diag
return sup_result + sub_result + diag_result
# Assumes Stein displacement.
def set_mult_fns(self, params):
# assert params.disp_type == 'stein'
if params.class_type in ['toeplitz_like', 'toep_corner', 'toep_nocorn']:
fn_A = functools.partial(Z_mult_fn, 1)
fn_B_T = functools.partial(Z_mult_fn, -1)
# TODO: operators for hankel and vandermonde have not been checked for transpose consistency
elif params.class_type == 'hankel_like':
fn_A = functools.partial(Z_transpose_mult_fn, 1)
fn_B_T = functools.partial(Z_mult_fn, 0)
elif params.class_type == 'vandermonde_like':
v = Parameter(torch.Tensor(params.layer_size))
torch.nn.init.normal_(v,std=params.init_stddev)
self.v = v
fn_A = functools.partial(diag_mult_fn, self.v)
fn_B_T = functools.partial(Z_transpose_mult_fn, 0)
elif params.class_type == 'circulant_sparsity':
self.subdiag_f_A = Parameter(torch.Tensor(params.layer_size))
self.subdiag_f_B = Parameter(torch.Tensor(params.layer_size))
torch.nn.init.normal_(self.subdiag_f_A,std=params.init_stddev)
torch.nn.init.normal_(self.subdiag_f_B,std=params.init_stddev)
fn_A = functools.partial(circ_mult_fn, self.subdiag_f_A)
fn_B_T = functools.partial(circ_mult_fn, self.subdiag_f_B)
elif params.class_type == 'tridiagonal_corner':
self.subdiag_f_A = Parameter(torch.Tensor(params.layer_size))
self.subdiag_f_B = Parameter(torch.Tensor(params.layer_size))
self.diag_A = Parameter(torch.Tensor(params.layer_size))
self.diag_B = Parameter(torch.Tensor(params.layer_size))
self.supdiag_A = Parameter(torch.Tensor(params.layer_size-1))
self.supdiag_B = Parameter(torch.Tensor(params.layer_size-1))
torch.nn.init.normal_(self.subdiag_f_A,std=params.init_stddev)
torch.nn.init.normal_(self.subdiag_f_B,std=params.init_stddev)
torch.nn.init.normal_(self.diag_A,std=params.init_stddev)
torch.nn.init.normal_(self.diag_B,std=params.init_stddev)
torch.nn.init.normal_(self.supdiag_A,std=params.init_stddev)
torch.nn.init.normal_(self.supdiag_B,std=params.init_stddev)
fn_A = functools.partial(tridiag_mult_fn, self.subdiag_f_A, self.diag_A, self.supdiag_A)
fn_B_T = functools.partial(tridiag_mult_fn, self.subdiag_f_B, self.diag_B, self.supdiag_B)
else:
print(('Not supported: ', params.class_type))
assert 0
return fn_A, fn_B_T
|
structured-nets-master
|
pytorch/old/utils/torch_krylov.py
|
import torch
from torch.autograd import Variable
import time
from torch_utils import *
from torch_krylov import *
from scipy.linalg import toeplitz
import numpy as np
import functools
def krylov(fn, v, n):
cols = [v]
for _ in range(n - 1):
v = fn(v)
cols.append(v)
return torch.stack(cols, dim=-1)
def krylov_recon(r, n, G, H, fn_A, fn_B_T):
"G, H: (r, n)"
W1 = Variable(torch.zeros(n, n).cuda())
for i in range(r):
K_A = krylov(fn_A, G[i], n)
K_B = krylov(fn_B_T, H[i], n).t()
prod = torch.matmul(K_A, K_B).cuda()
#print('W1: ', W1)
#print('prod: ', prod)
W1 = torch.add(W1, prod)
return W1
def recon(net):
W = krylov_recon(net.params.r, net.params.layer_size, net.G, net.H, net.fn_A, net.fn_B_T)
# Normalize
if net.params.class_type in ['vandermonde_like', 'hankel_like']:
return W
if net.params.class_type == 'toeplitz_like':
return 0.5*W
elif net.params.class_type in ['circulant_sparsity', 'tridiagonal_corner']:
# Compute a and b
a = torch.prod(net.subdiag_f_A)
b = torch.prod(net.subdiag_f_B)
coeff = 1.0/(1 - a*b)
return coeff*W
else:
print(('Class_type not supported: ', net.params.class_type))
assert 0
if __name__ == '__main__':
# Tests
# Toeplitz matrix
n = 10
disp_rank = 2
c = np.random.random(n)
r = np.random.random(n)
T = toeplitz(c,r)
A = gen_Z_f(n, 1).T
B = gen_Z_f(n, -1)
E = T - np.dot(A,np.dot(T,B))
print(np.linalg.matrix_rank(E))
U, S, V = np.linalg.svd(E, full_matrices=False)
SV = np.dot(np.diag(S), V)
G = torch.FloatTensor(U[:, 0:disp_rank])
H = torch.FloatTensor(SV[0:disp_rank, :].T)
fn_A = functools.partial(Z_transpose_mult_fn, 1)
fn_B_T = functools.partial(Z_transpose_mult_fn, -1)
W = 0.5*krylov_recon(disp_rank, n, G, H, fn_A, fn_B_T)
print('W: ', W)
print('T: ', T)
|
structured-nets-master
|
pytorch/old/utils/torch_reconstruction.py
|
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
# Circulant sparsity pattern
def gen_Z_f(m, f, v=None):
if v is not None:
assert v.size <= m-1
I_m = np.eye(m-1, m-1)
Z_f = np.hstack((I_m, np.zeros((m-1, 1))))
Z_f = np.vstack((np.zeros((1, m)), Z_f))
Z_f[0, -1] = f
if v is not None:
for i in range(v.size):
Z_f[i+1, i] = v[i]
return Z_f
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
class NoamOpt:
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def get_std_opt(model):
return NoamOpt(model.src_embed[0].d_model, 2, 4000,
torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
# TODO AG: are the non cross-entropy cases even used?
def get_loss(params, generator=None, model_opt=None):
loss_fn = None
if params.dataset_name.startswith('true'):
assert params.loss == 'mse'
loss_fn = nn.MSELoss()
elif params.dataset_name.startswith('copy'):
assert params.loss == 'label_smoothing'
ls = LabelSmoothing(params.ls_size, params.ls_padding_idx, params.ls_smoothing)
loss_fn = SimpleLossCompute(generator, ls, model_opt)
else:
assert params.loss == 'cross_entropy'
loss_fn = nn.CrossEntropyLoss()
return params.loss, loss_fn
class SimpleLossCompute:
"A simple loss compute and train function."
def __init__(self, generator, criterion, opt=None):
self.generator = generator
self.criterion = criterion
self.opt = opt
def __call__(self, x, y, norm):
x = self.generator(x)
loss = self.criterion(x.contiguous().view(-1, x.size(-1)),
y.contiguous().view(-1)) / norm
loss.backward()
if self.opt is not None:
self.opt.step()
self.opt.optimizer.zero_grad()
return loss.data[0] * norm
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size, padding_idx, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False)
self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.size = size
self.true_dist = None
def forward(self, x, target):
assert x.size(1) == self.size
true_dist = x.data.clone()
true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
true_dist[:, self.padding_idx] = 0
mask = torch.nonzero(target.data == self.padding_idx)
if mask.dim() > 0:
true_dist.index_fill_(0, mask.squeeze(), 0.0)
self.true_dist = true_dist
return self.criterion(x, Variable(true_dist, requires_grad=False))
# y_: One hot. y: vector of predicted class probabilities.
def compute_loss_and_accuracy(pred, true, loss_name):
if loss_name == 'mse':
loss_fn = nn.MSELoss()
mse = loss_fn(pred, true)
accuracy = torch.FloatTensor([0])
return mse, accuracy
elif loss_name == 'cross_entropy':
loss_fn = nn.CrossEntropyLoss()
_, true_argmax = torch.max(true, 1)
cross_entropy = loss_fn(pred, true_argmax)
_, pred_argmax = torch.max(pred, 1)
correct_prediction = torch.eq(true_argmax, pred_argmax)
accuracy = torch.mean(correct_prediction.float())
return cross_entropy, accuracy
else:
print(('Not supported: ', loss_name))
assert 0
|
structured-nets-master
|
pytorch/old/utils/torch_utils.py
|
from inspect import signature
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import structure.LDR as ldr
import structure.layer as sl
def construct_model(cls, in_size, out_size, args):
args_fn = cls.args
options = {param: vars(args)[param]
for param in signature(args_fn).parameters}
return cls(in_size=in_size, out_size=out_size, **options)
class ArghModel(nn.Module):
def __init__(self, in_size, out_size, **options):
""""
options: dictionary of options/params that args() accepts
If the model if constructed with construct_model(), the options will contain defaults based on its args() function
"""
super().__init__()
self.in_size = in_size
self.out_size = out_size
self.__dict__.update(**options)
self.reset_parameters()
def args():
"""
Empty function whose signature contains parameters and defaults for the class
"""
pass
def reset_parameters(self):
pass
def name(self):
"""
Short string summarizing the main parameters of the class
Used to construct a unique identifier for an experiment
"""
return ''
def loss(self):
"""
Model-specific loss function (e.g. per-parameter regularization)
"""
return 0
# Pytorch tutorial lenet variant
class Lenet(ArghModel):
def reset_parameters(self):
# super().__init__()
# in_channels, out_channels, kernel_size, stride=1, padding=0, dilation=1, groups=1, bias=True
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = x.view(-1, 3, 32, 32)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, 16 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class CNN(ArghModel):
"""
Single channel net where the dense last layer has same dimensions as input
"""
def name(self):
return self.layers[0].name()
def args(class_type='unconstrained', layer_size=-1, r=1, bias=True,hidden_size=-1): pass
def reset_parameters(self):
if self.layer_size == -1:
self.layer_size = self.in_size
if self.hidden_size == -1:
self.hidden_size = self.layer_size
assert self.layer_size == self.in_size
self.d = int(np.sqrt(self.layer_size))
self.conv1 = nn.Conv2d(1, 6, 5, padding=2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, padding=2)
layers = []
layers.append(sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias,
hidden_size=self.hidden_size))
self.layers = nn.ModuleList(layers)
self.logits = nn.Linear(self.hidden_size, self.out_size)
def forward(self, x):
x = x.view(-1, 1, self.d, self.d)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, self.layer_size)
x = F.relu(self.layers[0](x))
x = self.logits(x)
return x
class CNNColor(ArghModel):
"""
3 channel net where the dense last layer has same dimensions as input
"""
def args(class_type='unconstrained', layer_size=-1, r=1, bias=True,hidden_size=-1): pass
def reset_parameters(self):
self.layer_size = int(self.in_size/3)
if self.hidden_size == -1:
self.hidden_size = self.layer_size
self.d = int(np.sqrt(self.layer_size))
self.conv1 = nn.Conv2d(3, 6, 5, padding=2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, padding=2)
self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias,
hidden_size=self.hidden_size)
self.logits = nn.Linear(self.hidden_size, self.out_size)
def name(self):
return self.W.name()
def forward(self, x):
x = x.view(-1, 3, self.d, self.d)
x = self.pool(F.relu(self.conv1(x)))
x = self.pool(F.relu(self.conv2(x)))
x = x.view(-1, self.layer_size)
x = F.relu(self.W(x))
x = self.logits(x)
return x
class CNNPool(ArghModel):
"""
Simple 3 layer CNN with pooling for cifar10
"""
def name(self):
return str(self.channels) + 'pool'
def args(channels=3, fc_size=512): pass
def reset_parameters(self):
self.conv1 = nn.Conv2d(3, self.channels, 5, padding=2)
self.pool = nn.MaxPool2d(2, 2)
self.fc = nn.Linear(self.channels*1024//4, self.fc_size)
self.logits = nn.Linear(self.fc_size,10)
def forward(self, x):
x = x.view(-1, 3, 32, 32)
x = F.relu(self.conv1(x))
x = self.pool(x)
x = x.view(-1, self.channels*1024//4)
x = F.relu(self.fc(x))
x = self.logits(x)
return x
class TwoLayer(ArghModel):
"""
Simple 2 hidden layer network: convolution channels or FC, FC, softmax
"""
def name(self):
return "3conv"
def args(conv=True): pass
def reset_parameters(self):
if self.conv:
self.conv1 = nn.Conv2d(3, 3, 5, padding=2)
else:
self.conv1 = nn.Linear(3*1024, 3*1024)
self.fc = nn.Linear(3*1024, 512)
self.logits = nn.Linear(512, 10)
def forward(self, x):
if self.conv:
x = x.view(-1, 3, 32, 32)
x = F.relu(self.conv1(x))
x = x.view(-1, 3*1024)
else:
x = F.relu(self.conv1(x))
x = F.relu(self.fc(x))
x = self.logits(x)
return x
def loss(self):
return 0
class WLDRFC(ArghModel):
"""
LDR layer (single weight matrix), followed by FC and softmax
"""
def name(self):
return self.W.name()+'u'
def args(class_type='unconstrained', layer_size=-1, r=1, fc_size = 512): pass
def reset_parameters(self):
if self.layer_size == -1: self.layer_size = self.in_size
self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r)
self.fc = nn.Linear(3*1024, self.fc_size)
self.logits = nn.Linear(self.fc_size, 10)
def forward(self, x):
x = self.W(x)
x = F.relu(self.fc(x))
x = self.logits(x)
return x
def loss(self):
return self.W.loss()
class LDRFC(ArghModel):
"""
LDR layer with channels, followed by FC and softmax
"""
def name(self):
return self.W.name()+'u'
def args(class_type='t', r=1, channels=3, fc_size=512): pass
def reset_parameters(self):
self.n = 1024
self.LDR1 = ldr.LDR(self.class_type, 3, self.channels, self.r, self.n, bias=True)
self.fc = nn.Linear(self.channels*self.n, self.fc_size)
self.logits = nn.Linear(self.fc_size, 10)
def forward(self, x):
x = x.view(-1, 3, 1024)
x = x.transpose(0,1).contiguous().view(3, -1, self.n)
x = F.relu(self.LDR1(x))
x = x.transpose(0,1) # swap batches and channels axis
x = x.contiguous().view(-1, self.channels*self.n)
x = F.relu(self.fc(x))
x = self.logits(x)
return x
def loss(self):
return self.LDR1.loss()
class LDRLDR(ArghModel):
"""
LDR layer (either 3 channels or one wide matrix), followed by another LDR layer, then softmax
intended for 3-channel images of size 1024 (e.g. CIFAR-10)
"""
def name(self):
# w = 'wide' if not self.channels else ''
return self.LDR1.name() + self.LDR211.name()
def args(class1='toeplitz', class2='toeplitz', channels=False, rank1=48, rank2=16): pass
def reset_parameters(self):
self.n = 1024
self.fc_size = self.n // 2
if self.channels:
self.LDR1 = ldr.LDR(self.class1, 3, 3, self.rank1, self.n)
else:
self.LDR1 = sl.StructuredLinear(self.class1, layer_size=3*self.n, r=self.rank1)
self.LDR211 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.LDR212 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.LDR221 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.LDR222 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.LDR231 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.LDR232 = sl.StructuredLinear(self.class2, layer_size=self.fc_size, r=self.rank2)
self.b = Parameter(torch.zeros(self.fc_size))
self.logits = nn.Linear(self.fc_size, 10)
def forward(self, x):
if self.channels:
x = x.view(-1, 3, self.n)
x = x.transpose(0,1).contiguous().view(3, -1, self.n)
x = F.relu(self.LDR1(x))
else:
x = F.relu(self.LDR1(x))
x = x.view(-1, 3, self.n)
x = x.transpose(0,1).contiguous().view(3, -1, self.n)
x11 = x[0][:,:self.fc_size]
x12 = x[0][:,self.fc_size:]
x21 = x[1][:,:self.fc_size]
x22 = x[1][:,self.fc_size:]
x31 = x[2][:,:self.fc_size]
x32 = x[2][:,self.fc_size:]
x = F.relu(self.LDR211(x11) + self.LDR212(x12) + self.LDR221(x21) + self.LDR222(x22) + self.LDR231(x31) + self.LDR232(x32) + self.b)
x = self.logits(x)
return x
def loss(self):
return self.LDR1.loss()
class LDRLDR2(ArghModel):
"""
Same as LDRLDR but use larger matrix to represent rectangular LDR
"""
def name(self):
return self.LDR1.name() + self.LDR2.name()
def args(class1='toeplitz', class2='toeplitz', layer_size=-1, channels=3, fc_size=512, rank1=48, rank2=16): pass
def reset_parameters(self):
if self.layer_size == -1:
self.layer_size = self.in_size
self.n = self.layer_size
self.LDR1 = sl.StructuredLinear(self.class1, layer_size=self.channels*self.n, r=self.rank1, bias=True)
self.LDR2 = sl.StructuredLinear(self.class2,layer_size=self.channels*self.n, r=self.rank2, bias=True)
self.logits = nn.Linear(self.fc_size, 10)
def forward(self, x):
batch_size, n = x.shape[0], x.shape[1]
x = torch.cat((x, torch.zeros(batch_size, self.channels*self.n-n).cuda()), dim=-1)
x = F.relu(self.LDR1(x))
x = F.relu(self.LDR2(x))
x = x[:,:self.fc_size]
x = self.logits(x)
return x
def loss(self):
return self.LDR1.loss()
class SL(ArghModel):
"""
Single layer linear model (for synthetic regression tests)
"""
def name(self):
return self.W.name()
def args(class_type='unconstrained', layer_size=-1, r=1, bias=False, hidden_size=-1): pass
def reset_parameters(self):
if self.layer_size == -1:
self.layer_size = self.in_size
if self.hidden_size == -1:
self.hidden_size = self.in_size
self.W = sl.StructuredLinear(self.class_type, layer_size=self.layer_size, r=self.r, bias=self.bias,
hidden_size=self.hidden_size)
def forward(self, x):
return self.W(x)
class SHL(SL):
"""
Single hidden layer
"""
def args(class_type='unconstrained', layer_size=-1, r=1, bias=True, hidden_size=-1): pass
def reset_parameters(self):
super().reset_parameters()
self.W2 = nn.Linear(self.hidden_size, self.out_size)
def forward(self, x):
return self.W2(F.relu(self.W(x)))
class MLP(ArghModel):
"""
Multi layer fully connected net.
"""
def name(self):
return self.layers[0].name()
def args(class_type='unconstrained', layer_size=-1, r=1, bias=True, num_layers=1): pass
def reset_parameters(self):
if self.layer_size == -1:
self.layer_size = self.in_size
layers = []
for layer in range(self.num_layers):
layers.append(sl.StructuredLinear(self.class_type,layer_size=self.layer_size, r=self.r, bias=self.bias))
self.layers = nn.ModuleList(layers)
self.W2 = nn.Linear(self.layer_size, self.out_size)
def forward(self, x):
output = F.relu(self.layers[0](x))
for i in range(self.num_layers-1):
output = F.relu(self.layers[i+1](output))
return self.W2(output)
|
structured-nets-master
|
pytorch/models/nets.py
|
"""
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
###############################################################################
# Language Modeling on Penn Tree Bank
#
# This file generates new sentences sampled from the language model
#
###############################################################################
import argparse
import torch
from torch.autograd import Variable
import data
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 Language Model')
# Model parameters.
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--checkpoint', type=str, default='./model.pt',
help='model checkpoint to use')
parser.add_argument('--outf', type=str, default='generated.txt',
help='output file for generated text')
parser.add_argument('--words', type=int, default='1000',
help='number of words to generate')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature - higher will increase diversity')
parser.add_argument('--log-interval', type=int, default=100,
help='reporting interval')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
if args.temperature < 1e-3:
parser.error("--temperature has to be greater or equal 1e-3")
with open(args.checkpoint, 'rb') as f:
model = torch.load(f).to(device)
model.eval()
corpus = data.Corpus(args.data)
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(1)
input = torch.randint(ntokens, (1, 1), dtype=torch.long).to(device)
with open(args.outf, 'w') as outf:
with torch.no_grad(): # no tracking history
for i in range(args.words):
output, hidden = model(input, hidden)
word_weights = output.squeeze().div(args.temperature).exp().cpu()
word_idx = torch.multinomial(word_weights, 1)[0]
input.fill_(word_idx)
word = corpus.dictionary.idx2word[word_idx]
outf.write(word + ('\n' if i % 20 == 19 else ' '))
if i % args.log_interval == 0:
print('| Generated {}/{} words'.format(i, args.words))
|
structured-nets-master
|
pytorch/examples/word_language_model/generate.py
|
"""
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
import torch.nn as nn
from torch.nn import Parameter
import torch
import numpy as np
import sys
from lstm import SingleLayerLSTM, LSTMCell
class RNNModel(nn.Module):
"""Container module with an encoder, a recurrent module, and a decoder."""
def __init__(self, class_type, r, rnn_type, ntoken, ninp, nhid, nlayers, dropout=0.5, tie_weights=False):
super(RNNModel, self).__init__()
self.drop = nn.Dropout(dropout)
self.encoder = nn.Embedding(ntoken, ninp)
if rnn_type in ['LSTM', 'GRU']:
print('ninp, nhid, nlayers: ', ninp, nhid, nlayers)
if rnn_type == 'LSTM':
self.rnn = SingleLayerLSTM(class_type, r, input_size=ninp, hidden_size=nhid, dropout=dropout)
else:
self.rnn = getattr(nn, rnn_type)(ninp, nhid, nlayers, dropout=dropout)
else:
try:
nonlinearity = {'RNN_TANH': 'tanh', 'RNN_RELU': 'relu'}[rnn_type]
except KeyError:
raise ValueError( """An invalid option for `--model` was supplied,
options are ['LSTM', 'GRU', 'RNN_TANH' or 'RNN_RELU']""")
self.rnn = nn.RNN(ninp, nhid, nlayers, nonlinearity=nonlinearity, dropout=dropout)
self.decoder = nn.Linear(nhid, ntoken)
# Optionally tie weights as in:
# "Using the Output Embedding to Improve Language Models" (Press & Wolf 2016)
# https://arxiv.org/abs/1608.05859
# and
# "Tying Word Vectors and Word Classifiers: A Loss Framework for Language Modeling" (Inan et al. 2016)
# https://arxiv.org/abs/1611.01462
if tie_weights:
if nhid != ninp:
raise ValueError('When using the tied flag, nhid must be equal to emsize')
self.decoder.weight = self.encoder.weight
self.init_weights()
self.rnn_type = rnn_type
self.nhid = nhid
self.nlayers = nlayers
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, input, hidden):
emb = self.drop(self.encoder(input))
output, hidden = self.rnn(emb, hx=hidden)
output = output.squeeze()
hidden = (hidden[0].squeeze(0), hidden[1].squeeze(0))
output = self.drop(output)
decoded = self.decoder(output.view(output.size(0)*output.size(1), output.size(2)))
return decoded.view(output.size(0), output.size(1), decoded.size(1)), hidden
def init_hidden(self, bsz):
weight = next(self.parameters())
if self.rnn_type == 'LSTM':
return (weight.new_zeros(self.nlayers, bsz, self.nhid),
weight.new_zeros(self.nlayers, bsz, self.nhid))
else:
return weight.new_zeros(self.nlayers, bsz, self.nhid)
|
structured-nets-master
|
pytorch/examples/word_language_model/model.py
|
"""
Some parts modified from https://github.com/jihunchoi/recurrent-batch-normalization-pytorch/blob/master/bnlstm.py
"""
import torch
from torch import nn
from torch.nn import init
from torch.autograd import Variable
import sys
sys.path.insert(0, '../../../pytorch/')
import structure.layer as sl
class LSTMCell(nn.Module):
def __init__(self, class_type, r, input_size, hidden_size, use_bias=True):
super(LSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.class_type = class_type
self.r = r
# Replace W_ih with structured matrices
self.W_ih = sl.StructuredLinear(class_type, layer_size=4*hidden_size, r=r, bias=False)
self.W_hh = nn.Parameter(
torch.FloatTensor(hidden_size, 4 * hidden_size))
if use_bias:
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
else:
self.bias = None
self.reset_parameters()
def reset_parameters(self):
W_hh_data = torch.eye(self.hidden_size).repeat(1,4)
self.W_hh.data.set_(W_hh_data)
if self.use_bias:
init.constant(self.bias.data, val=0)
def forward(self, input_, hx):
h_0, c_0 = hx
h_0 = h_0.squeeze()
c_0 = c_0.squeeze()
batch_size = h_0.size(0)
bias_batch = (self.bias.unsqueeze(0)
.expand(batch_size, *self.bias.size()))
wh_b = torch.addmm(bias_batch, h_0, self.W_hh)
z = torch.zeros(input_.size(0),3*self.hidden_size).cuda()
input_padded = torch.cat((input_, z), dim=1)
wi = self.W_ih(input_padded)
f, i, o, g = torch.split(wh_b + wi,
split_size_or_sections=self.hidden_size, dim=1)
c_1 = torch.sigmoid(f)*c_0 + torch.sigmoid(i)*torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
return h_1, c_1
class SingleLayerLSTM(nn.Module):
def __init__(self, class_type, r, input_size, hidden_size,use_bias=True,dropout=0):
super(SingleLayerLSTM, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.use_bias = use_bias
self.dropout = dropout
# Initialize LSTMCell
self.cell = LSTMCell(class_type=class_type, r=r, input_size=input_size,
hidden_size=hidden_size, use_bias=use_bias)
self.dropout_layer = nn.Dropout(dropout)
self.reset_parameters()
def reset_parameters(self):
self.cell.reset_parameters()
@staticmethod
def _forward_rnn(cell, input_, length, hx):
max_time = input_.size(0)
output = []
for time in range(max_time):
h_next, c_next = cell(input_=input_[time], hx=hx)
mask = (time < length).float().unsqueeze(1).expand_as(h_next)
h_next = h_next*mask + hx[0]*(1 - mask)
c_next = c_next*mask + hx[1]*(1 - mask)
hx_next = (h_next, c_next)
output.append(h_next)
hx = hx_next
output = torch.stack(output, 0)
return output, hx
def forward(self, input_, hx):
max_time, batch_size, _ = input_.size()
length = Variable(torch.LongTensor([max_time] * batch_size))
if input_.is_cuda:
device = input_.get_device()
length = length.cuda(device)
output, (h_n, c_n) = SingleLayerLSTM._forward_rnn(
cell=self.cell, input_=input_, length=length, hx=hx)
input_ = self.dropout_layer(output)
return output, (h_n, c_n)
|
structured-nets-master
|
pytorch/examples/word_language_model/lstm.py
|
"""
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
# coding: utf-8
import argparse, os
import time
import math
import torch
import torch.nn as nn
import pickle as pkl
import data
import model
parser = argparse.ArgumentParser(description='PyTorch Wikitext-2 RNN/LSTM Language Model')
parser.add_argument('--name', type=str, default='',
help='name of the experiment')
parser.add_argument('--class_type', type=str, default='unconstrained',
help='structured class')
parser.add_argument('--r', type=int, default=1,
help='displacement rank')
parser.add_argument('--data', type=str, default='./data/wikitext-2',
help='location of the data corpus')
parser.add_argument('--model', type=str, default='LSTM',
help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)')
parser.add_argument('--emsize', type=int, default=200,
help='size of word embeddings')
parser.add_argument('--nhid', type=int, default=200,
help='number of hidden units per layer')
parser.add_argument('--nlayers', type=int, default=1,
help='number of layers')
parser.add_argument('--lr', type=float, default=20,
help='initial learning rate')
parser.add_argument('--clip', type=float, default=0.25,
help='gradient clipping')
parser.add_argument('--epochs', type=int, default=40,
help='upper epoch limit')
parser.add_argument('--batch_size', type=int, default=20, metavar='N',
help='batch size')
parser.add_argument('--bptt', type=int, default=35,
help='sequence length')
parser.add_argument('--dropout', type=float, default=0.2,
help='dropout applied to layers (0 = no dropout)')
parser.add_argument('--tied', action='store_true',
help='tie the word embedding and softmax weights')
parser.add_argument('--seed', type=int, default=1111,
help='random seed')
parser.add_argument('--cuda', action='store_true',
help='use CUDA')
parser.add_argument('--log-interval', type=int, default=200, metavar='N',
help='report interval')
parser.add_argument('--result-dir', default='../../../results/language/')
parser.add_argument('--test', type=int, default=0,
help='Flag to test on test set')
args = parser.parse_args()
# Set the random seed manually for reproducibility.
torch.manual_seed(args.seed)
if torch.cuda.is_available():
if not args.cuda:
print("WARNING: You have a CUDA device, so you should probably run with --cuda")
device = torch.device("cuda" if args.cuda else "cpu")
###############################################################################
# Load data
###############################################################################
corpus = data.Corpus(args.data)
# Starting from sequential data, batchify arranges the dataset into columns.
# For instance, with the alphabet as the sequence and batch size 4, we'd get
# ┌ a g m s ┐
# │ b h n t │
# │ c i o u │
# │ d j p v │
# │ e k q w │
# └ f l r x ┘.
# These columns are treated as independent by the model, which means that the
# dependence of e. g. 'g' on 'f' can not be learned, but allows more efficient
# batch processing.
def batchify(data, bsz):
# Work out how cleanly we can divide the dataset into bsz parts.
nbatch = data.size(0) // bsz
# Trim off any extra elements that wouldn't cleanly fit (remainders).
data = data.narrow(0, 0, nbatch * bsz)
# Evenly divide the data across the bsz batches.
data = data.view(bsz, -1).t().contiguous()
return data.to(device)
eval_batch_size = 10
train_data = batchify(corpus.train, args.batch_size)
val_data = batchify(corpus.valid, eval_batch_size)
test_data = batchify(corpus.test, eval_batch_size)
# Make results dir
out_dir = os.path.join(args.result_dir, args.name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
###############################################################################
# Build the model
###############################################################################
ntokens = len(corpus.dictionary)
model = model.RNNModel(args.class_type, args.r, args.model, ntokens, args.emsize, args.nhid, args.nlayers, args.dropout, args.tied).to(device)
# Print params
for name, param in model.named_parameters():
if param.requires_grad:
print(('Parameter name, shape: ', name, param.data.shape))
criterion = nn.CrossEntropyLoss()
###############################################################################
# Training code
###############################################################################
def repackage_hidden(h):
"""Wraps hidden states in new Tensors, to detach them from their history."""
if isinstance(h, torch.Tensor):
return h.detach()
else:
return tuple(repackage_hidden(v) for v in h)
# get_batch subdivides the source data into chunks of length args.bptt.
# If source is equal to the example output of the batchify function, with
# a bptt-limit of 2, we'd get the following two Variables for i = 0:
# ┌ a g m s ┐ ┌ b h n t ┐
# └ b h n t ┘ └ c i o u ┘
# Note that despite the name of the function, the subdivison of data is not
# done along the batch dimension (i.e. dimension 1), since that was handled
# by the batchify function. The chunks are along dimension 0, corresponding
# to the seq_len dimension in the LSTM.
def get_batch(source, i):
seq_len = min(args.bptt, len(source) - 1 - i)
data = source[i:i+seq_len]
target = source[i+1:i+1+seq_len].view(-1)
return data, target
def evaluate(data_source):
# Turn on evaluation mode which disables dropout.
model.eval()
total_loss = 0.
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(eval_batch_size)
with torch.no_grad():
for i in range(0, data_source.size(0) - 1, args.bptt):
data, targets = get_batch(data_source, i)
output, hidden = model(data, hidden)
output_flat = output.view(-1, ntokens)
total_loss += len(data) * criterion(output_flat, targets).item()
hidden = repackage_hidden(hidden)
return total_loss / len(data_source)
def train():
# Turn on training mode which enables dropout.
model.train()
total_loss = 0.
start_time = time.time()
ntokens = len(corpus.dictionary)
hidden = model.init_hidden(args.batch_size)
for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)):
data, targets = get_batch(train_data, i)
# Starting each batch, we detach the hidden state from how it was previously produced.
# If we didn't, the model would try backpropagating all the way to start of the dataset.
hidden = repackage_hidden(hidden)
model.zero_grad()
output, hidden = model(data, hidden)
loss = criterion(output.view(-1, ntokens), targets)
loss.backward()
# `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs.
torch.nn.utils.clip_grad_norm(model.parameters(), args.clip)
for p in model.parameters():
if p.grad is None:
continue
#print(('Parameter name, shape: ', p.name, p.data.shape))
p.data.add_(-lr, p.grad.data)
total_loss += loss.item()
if batch % args.log_interval == 0 and batch > 0:
cur_loss = total_loss / args.log_interval
elapsed = time.time() - start_time
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | '
'loss {:5.2f} | ppl {:8.2f}'.format(
epoch, batch, len(train_data) // args.bptt, lr,
elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss)))
total_loss = 0
start_time = time.time()
# Loop over epochs.
lr = args.lr
best_val_loss = None
val_losses = []
val_perps = []
# At any point you can hit Ctrl + C to break out of training early.
try:
for epoch in range(1, args.epochs+1):
epoch_start_time = time.time()
train()
val_loss = evaluate(val_data)
val_losses.append(val_loss)
val_perps.append(math.exp(val_loss))
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | valid loss {:5.2f} | '
'valid ppl {:8.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, math.exp(val_loss)))
print('-' * 89)
# Save the model if the validation loss is the best we've seen so far.
if not best_val_loss or val_loss < best_val_loss:
with open(os.path.join(out_dir, 'model.pt'), 'wb') as f:
torch.save(model, f)
best_val_loss = val_loss
else:
# Anneal the learning rate if no improvement has been seen in the validation dataset.
lr /= 4.0
except KeyboardInterrupt:
print('-' * 89)
print('Exiting from training early')
# Load the best saved model.
with open(os.path.join(out_dir, 'model.pt'), 'rb') as f:
model = torch.load(f)
# after load the rnn params are not a continuous chunk of memory
# this makes them a continuous chunk, and will speed up forward pass
#model.rnn.flatten_parameters()
results = {}
results['val_losses'] = val_losses
results['val_perps'] = val_perps
if args.test:
# Run on test data.
test_loss = evaluate(test_data)
print('=' * 89)
print('| End of training | test loss {:5.2f} | test ppl {:8.2f}'.format(
test_loss, math.exp(test_loss)))
print('=' * 89)
results['test_loss'] = test_loss
results['test_perp'] = math.exp(test_loss)
# Save
out = os.path.join(out_dir, args.class_type + '_' + str(args.r) + '.p')
print('Saving losses to: ', out)
pkl.dump(results, open(out, 'wb'))
|
structured-nets-master
|
pytorch/examples/word_language_model/main.py
|
"""
Modified from pytorch/examples/word_language_model to demonstrate 'StructuredLinear' usage.
"""
import os
import torch
class Dictionary(object):
def __init__(self):
self.word2idx = {}
self.idx2word = []
def add_word(self, word):
if word not in self.word2idx:
self.idx2word.append(word)
self.word2idx[word] = len(self.idx2word) - 1
return self.word2idx[word]
def __len__(self):
return len(self.idx2word)
class Corpus(object):
def __init__(self, path):
self.dictionary = Dictionary()
self.train = self.tokenize(os.path.join(path, 'train.txt'))
self.valid = self.tokenize(os.path.join(path, 'valid.txt'))
self.test = self.tokenize(os.path.join(path, 'test.txt'))
def tokenize(self, path):
"""Tokenizes a text file."""
assert os.path.exists(path)
# Add words to the dictionary
with open(path, 'r') as f:
tokens = 0
for line in f:
words = line.split() + ['<eos>']
tokens += len(words)
for word in words:
self.dictionary.add_word(word)
# Tokenize file content
with open(path, 'r') as f:
ids = torch.LongTensor(tokens)
token = 0
for line in f:
words = line.split() + ['<eos>']
for word in words:
ids[token] = self.dictionary.word2idx[word]
token += 1
return ids
|
structured-nets-master
|
pytorch/examples/word_language_model/data.py
|
"""
Modified from pytorch/examples/vae to demonstrate 'StructuredLinear' usage.
"""
from __future__ import print_function
import argparse, sys, os
import torch
import torch.utils.data
from torch import nn, optim
from torch.nn import functional as F
from torchvision import datasets, transforms
from torchvision.utils import save_image
sys.path.insert(0, '../../../pytorch/')
import structure.layer as sl
import pickle as pkl
parser = argparse.ArgumentParser(description='VAE MNIST Example')
parser.add_argument('--batch-size', type=int, default=50, metavar='N',
help='input batch size for training (default: 128)') #128
parser.add_argument('--name', default='')
parser.add_argument('--result-dir', default='../../../results/vae/')
parser.add_argument('--data-dir', default='../../../../datasets/mnist/')
parser.add_argument('--layer-size',type=int, default=784)
parser.add_argument('--class-type', default='unconstrained')
parser.add_argument('--lr',type=float, default=1e-3)
parser.add_argument('--r',type=int, default=1)
parser.add_argument('--epochs', type=int, default=10, metavar='N',
help='number of epochs to train (default: 10)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='enables CUDA training')
parser.add_argument('--seed', type=int, default=1, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
device = torch.device("cuda" if args.cuda else "cpu")
# Make results dir
out_dir = os.path.join(args.result_dir, args.name)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=True, download=True,
transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST(args.data_dir, train=False, transform=transforms.ToTensor()),
batch_size=args.batch_size, shuffle=True, **kwargs)
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = sl.StructuredLinear(class_type=args.class_type, layer_size=args.layer_size, r=args.r, bias=False)
self.fc21 = nn.Linear(784, 20)
self.fc22 = nn.Linear(784, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return F.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
model = VAE().to(device)
for name, param in model.named_parameters():
if param.requires_grad:
print(('Parameter name, shape: ', name, param.data.shape))
optimizer = optim.Adam(model.parameters(), lr=args.lr)
# Reconstruction + KL divergence losses summed over all elements and batch
def loss_function(recon_x, x, mu, logvar):
BCE = F.binary_cross_entropy(recon_x, x.view(-1, 784), size_average=False)
# see Appendix B from VAE paper:
# Kingma and Welling. Auto-Encoding Variational Bayes. ICLR, 2014
# https://arxiv.org/abs/1312.6114
# 0.5 * sum(1 + log(sigma^2) - mu^2 - sigma^2)
KLD = -0.5 * torch.sum(1 + logvar - mu.pow(2) - logvar.exp())
return BCE + KLD
def train(epoch):
model.train()
train_loss = 0
for batch_idx, (data, _) in enumerate(train_loader):
data = data.to(device)
optimizer.zero_grad()
recon_batch, mu, logvar = model(data)
loss = loss_function(recon_batch, data, mu, logvar)
loss.backward()
train_loss += loss.item()
optimizer.step()
if batch_idx % args.log_interval == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader),
loss.item() / len(data)))
avg_loss = train_loss / len(train_loader.dataset)
print('====> Epoch: {} Average loss: {:.4f}'.format(
epoch, train_loss / len(train_loader.dataset)))
return avg_loss
def test(epoch):
model.eval()
test_loss = 0
with torch.no_grad():
for i, (data, _) in enumerate(test_loader):
data = data.to(device)
recon_batch, mu, logvar = model(data)
test_loss += loss_function(recon_batch, data, mu, logvar).item()
if i == 0:
n = min(data.size(0), 8)
comparison = torch.cat([data[:n],
recon_batch.view(args.batch_size, 1, 28, 28)[:n]])
save_image(comparison.cpu(),
args.result_dir + 'reconstruction_' + str(epoch) + '.png', nrow=n)
test_loss /= len(test_loader.dataset)
print('====> Test set loss: {:.4f}'.format(test_loss))
return test_loss
train_losses = []
for epoch in range(1, args.epochs + 1):
train_losses.append(train(epoch))
test_loss = test(epoch)
with torch.no_grad():
sample = torch.randn(64, 20).to(device)
sample = model.decode(sample).cpu()
save_image(sample.view(64, 1, 28, 28),
args.result_dir + '/sample_' + str(epoch) + '.png')
results = {}
results['train'] = train_losses
results['test'] = test_loss
out_filename = os.path.join(out_dir, args.class_type + '_r' + str(args.r) + '_e' + str(args.epochs))
print('Saving to: ', out_filename)
pkl.dump(results, open(out_filename, 'wb'))
|
structured-nets-master
|
pytorch/examples/vae/main.py
|
''' Utility functions for handling complex tensors: conjugate and complex_mult.
Pytorch (as of 0.4.0) does not support complex tensors, so we store them as
float tensors where the last dimension is 2 (real and imaginary parts).
'''
import torch
def conjugate(X):
assert X.shape[-1] == 2, 'Last dimension must be 2'
return X * torch.tensor((1, -1), dtype=X.dtype, device=X.device)
def complex_mult(X, Y):
assert X.shape[-1] == 2 and Y.shape[-1] == 2, 'Last dimension must be 2'
return torch.stack(
(X[..., 0] * Y[..., 0] - X[..., 1] * Y[..., 1],
X[..., 0] * Y[..., 1] + X[..., 1] * Y[..., 0]),
dim=-1)
|
structured-nets-master
|
pytorch/structure/complex_utils.py
|
import numpy as np
import scipy.fftpack as fft
import itertools
from scipy import signal
class KT_Toeplitz():
"""Multiply Krylov(A, v)^T @ u when A is zero except on the subdiagonal.
"""
def __init__(self, n, f=0, batch_size=1, rank=1):
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
self.n = n
self.m = m
self.batch_size = batch_size
self.rank = rank
self.eta = None
if f == 0:
pass
else:
mod = np.power(np.abs(f), np.arange(n)/n)
if f > 0:
arg = np.ones(n)
else:
arg = np.fft.fft(np.eye(1,2*n,2*n-1))[0,:n]
self.eta = mod * arg
def __call__(self, v, u):
"""
Multiply Krylov(Z_f, v)^T @ u
v: (rank, n)
u: (batch, n)
out: (batch, rank, n)
"""
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
if self.eta is not None: # cycle version
u_ = np.fft.ifft(1/self.eta * u)
v_ = np.fft.fft(self.eta * v)
uv_ = u_.reshape(batch_size, 1, n) * v_.reshape(1, rank, n)
ans = self.eta * np.fft.fft(uv_)
return np.real(ans)
else:
u_ = np.fft.rfft(np.concatenate((u[...,::-1], np.zeros_like(u)), axis=-1))
v_ = np.fft.rfft(np.concatenate((v, np.zeros_like(v)), axis=-1))
uv_ = u_.reshape(batch_size, 1, -1) * v_.reshape(1, rank, -1)
ans = np.fft.irfft(uv_)[..., n-1::-1]
return ans
class K_Toeplitz():
"""Multiply Krylov(A, v) @ w when A is zero except on the subdiagonal.
"""
def __init__(self, n, f, batch_size=1, rank=1):
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
self.n = n
self.m = m
self.batch_size = batch_size
self.rank = rank
self.eta = None
if f == 0:
pass
else:
mod = np.power(np.abs(f), np.arange(n)/n)
if f > 0:
arg = np.ones(n)
else:
arg = np.fft.fft(np.eye(1,2*n,2*n-1))[0,:n]
# arg = np.exp(np.arange(n) * 1j * np.pi / n)
self.eta = mod * arg
def __call__(self, v, w):
"""
v: (rank, n)
w: (batch_size, rank, n)
out: (batch_size, n)
"""
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
if self.eta is not None:
w_ = np.fft.fft(self.eta * w)
v_ = np.fft.fft(self.eta * v)
wv_ = w_ * v_.reshape((1, rank, n))
ans = 1/self.eta * np.fft.ifft(np.sum(wv_, axis=1))
ans = np.real(ans)
else:
w_ = np.fft.rfft(np.concatenate((w, np.zeros_like(w)), axis=-1))
v_ = np.fft.rfft(np.concatenate((v, np.zeros_like(v)), axis=-1))
wv_ = w_ * v_.reshape((1, rank, -1))
ans = np.fft.irfft(np.sum(wv_, axis=1))[..., :n]
return ans
def toeplitz_mult(G, H, x, cycle=True):
rank, n = G.shape
batch_size = x.shape[0]
f = (1,-1) if cycle else (0,0)
transpose_out = KT_Toeplitz(n, f[1], batch_size, rank)(H, x)
krylov_out = K_Toeplitz(n, f[0], batch_size, rank)(G, transpose_out)
return krylov_out/2 if cycle else krylov_out
##### Slow mult
def krylov_construct(f, v, m):
n = v.shape[0]
K = np.zeros(shape=(m,n))
K[0,:] = v
for i in range(1,m):
K[i,1:] = K[i-1,:-1]
K[i,0] = f*K[i-1,-1]
return K.T
def toeplitz_mult_slow(G, H, x, cycle=True):
assert G.shape == H.shape
rank, n = G.shape
f = (1,-1) if cycle else (0,0)
krylovs = [(krylov_construct(f[0], G[i], n), krylov_construct(f[1], H[i], n).T) for i in range(rank)]
prods = [K[0] @ K[1] @ x.T for K in krylovs]
return np.sum(np.array(prods), axis=0).T
if __name__ == '__main__':
v = np.array([[0,1,0,-1],[0,1,2,3]])
u = np.array([[1,1,1,1],[0,1,2,3]])
w = KT_Toeplitz(4, -1, 2, 2)(v, u)
# output:
# [[[ 0 2 2 0]
# [ 6 0 -4 -6]]
# [[ -2 2 4 2]
# [ 14 8 0 -8]]]
w = KT_Toeplitz(4, 0, 2, 2)(v, u)
# [[[ 0 1 1 0]
# [ 6 3 1 0]]
# [[ -2 2 3 0]
# [ 14 8 3 0]]]
print(toeplitz_mult(v, v, u))
print(toeplitz_mult_slow(v, v, u))
# output:
# array([[-16., -20., -4., 16.],
# [ 16., -8., 12., 64.]])
print(toeplitz_mult(v, v, u, cycle=False))
print(toeplitz_mult_slow(v, v, u, cycle=False))
# output:
# array([[ 0., 6., 16., 26.],
# [ 0., 12., 38., 66.]])
|
structured-nets-master
|
pytorch/structure/toeplitz_cpu.py
|
import torch
from torch.autograd import Variable
import torch.nn as nn
from torch.nn.parameter import Parameter
from . import toeplitz as toep
from . import krylov as kry
# TODO: rewrite with structure.layer
# TODO: subclass with each DR type
class LDR(nn.Module):
def name(self):
return str(self.in_channels) + str(self.out_channels) + self.displacement + str(self.r)
# TODO: support non-square multiplications
def __init__(self, displacement, in_channels, out_channels, rank, layer_size, bias=True):
super(LDR, self).__init__()
self.displacement = displacement
self.in_channels = in_channels
self.out_channels = out_channels
self.r = rank
self.n = layer_size
self.bias = None
self.G = Parameter(torch.Tensor(self.in_channels, self.out_channels, self.r, self.n))
self.H = Parameter(torch.Tensor(self.in_channels, self.out_channels, self.r, self.n))
torch.nn.init.normal_(self.G, std=0.01) #TODO
torch.nn.init.normal_(self.H, std=0.01)
if bias:
self.bias = Parameter(torch.zeros(self.out_channels, 1, self.n))
if self.displacement == 'toeplitz_corner' or self.displacement == 'tc':
self.corner = True
elif self.displacement == 'toeplitz' or self.displacement == 't':
self.corner = False
elif self.displacement == 'subdiagonal' or self.displacement == 'sd':
self.subd_A = Parameter(torch.ones((self.in_channels, self.out_channels, self.n-1)))
self.subd_B = Parameter(torch.ones((self.in_channels, self.out_channels, self.n-1)))
def forward(self, x):
"""
x: (in_channels, batch, n)
out: (out_channels, batch, n)
"""
_, b, n = x.shape
assert n == self.n
# print("shapes ", self.G[0,0].shape, self.H[0,0].shape, x[0].shape)
comps = Variable(torch.Tensor(self.in_channels, self.out_channels, b, self.n)).cuda()
for i in range(self.in_channels):
for j in range(self.out_channels):
if self.displacement in ['toeplitz_corner', 'toeplitz', 'tc', 't']:
g = self.G[i,j]
h = self.H[i,j]
comps[i,j] = toep.toeplitz_mult(self.G[i,j], self.H[i,j], x[i], self.corner)
elif self.displacement == 'subdiagonal' or self.displacement == 'sd':
comps[i,j] = kry.subdiag_mult_conv(self.subd_A[i,j], self.subd_B[i,j], self.G[i,j], self.H[i,j], x[i])
out = torch.sum(comps, dim=0)
if self.bias is not None:
out += self.bias
return out
def loss(self):
lamb = 0.0001
# lamb = 0
return lamb*torch.sum(torch.abs(self.G)) + lamb*torch.sum(torch.abs(self.H))
|
structured-nets-master
|
pytorch/structure/LDR.py
|
import numpy as np
import torch
use_hadamard_transform_cuda = True
try:
import hadamard_cuda
# import torch.utils.cpp_extension
# hadamard_cuda = torch.utils.cpp_extension.load(
# name='hadamard_cuda',
# sources=[
# 'hadamard_cuda/hadamard_cuda.cpp',
# 'hadamard_cuda/hadamard_cuda_kernel.cu',
# ],
# extra_cuda_cflags=['-O2'],
# verbose=False
# )
except (ImportError, RuntimeError) as e:
print("CUDA version of Hadamard transform isn't installed. Will use Pytorch's version, which is much slower.")
use_hadamard_transform_cuda = False
from scipy.linalg import hadamard
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def hadamard_transform_torch(u, normalize=False):
"""Multiply H_n @ u where H_n is the Hadamard matrix of dimension n x n.
n must be a power of 2.
Parameters:
u: Tensor of shape (..., n)
normalize: if True, divide the result by 2^{m/2} where m = log_2(n).
Returns:
product: Tensor of shape (..., n)
"""
batch_size, n = u.shape
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
x = u[..., np.newaxis]
for d in range(m)[::-1]:
x = torch.cat((x[..., ::2, :] + x[..., 1::2, :], x[..., ::2, :] - x[..., 1::2, :]), dim=-1)
return x.squeeze(-2) / 2**(m / 2) if normalize else x.squeeze(-2)
class HadamardTransformCuda(torch.autograd.Function):
'''The unnormalized Hadamard transform (i.e. without dividing by sqrt(2))
'''
@staticmethod
def forward(ctx, u):
return hadamard_cuda.hadamard_transform(u)
@staticmethod
def backward(ctx, grad):
return HadamardTransformCuda.apply(grad)
def hadamard_transform_cuda(u, normalize=False):
"""Multiply H_n @ u where H_n is the Hadamard matrix of dimension n x n.
n must be a power of 2.
Parameters:
u: Tensor of shape (..., n)
normalize: if True, divide the result by 2^{m/2} where m = log_2(n).
Returns:
product: Tensor of shape (..., n)
"""
_, n = u.shape
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
output = HadamardTransformCuda.apply(u)
return output / 2**(m / 2) if normalize else output
def test_hadamard_transform():
m = 15
n = 1 << m
batch_size = 50
u = torch.rand((batch_size, n), requires_grad=True, device=device)
result_cuda = hadamard_transform_cuda(u)
grad_cuda, = torch.autograd.grad(result_cuda.sum(), u, retain_graph=True)
result_torch = hadamard_transform_torch(u)
grad_torch, = torch.autograd.grad(result_torch.sum(), u, retain_graph=True)
# Explicit construction from scipy
H = torch.tensor(hadamard(n), dtype=torch.float, device=device)
result_explicit = u @ H.t()
print((result_cuda - result_explicit).abs().max().item())
print((result_cuda - result_explicit).abs().mean().item())
print((result_torch - result_explicit).abs().max().item())
print((result_torch - result_explicit).abs().mean().item())
print((grad_cuda - grad_torch).abs().max().item())
print((grad_cuda - grad_torch).abs().mean().item())
hadamard_transform = hadamard_transform_cuda if use_hadamard_transform_cuda else hadamard_transform_torch
if __name__ == '__main__':
test_hadamard_transform()
|
structured-nets-master
|
pytorch/structure/hadamard.py
|
import numpy as np
import torch
import torch.nn as nn
from torch.nn.parameter import Parameter
from torch.autograd import Variable
from . import toeplitz as toep
from . import krylov as kry
from . import circulant as circ
from . import fastfood as ff
from utils import descendants
class Layer(nn.Module):
class_type = None
abbrev = None
def name(self):
return self.__class__.abbrev
def __init__(self, layer_size=None, bias=True, **kwargs):
super().__init__()
self.layer_size = layer_size
self.bias = bias
self.__dict__.update(kwargs)
self.reset_parameters()
def reset_parameters(self):
assert self.layer_size is not None
self.b = None
if self.bias:
self.b = Parameter(torch.zeros(self.layer_size))
def apply_bias(self, out):
if self.b is not None:
return self.b + out
else:
return out
def loss(self):
return 0
class Unconstrained(Layer):
class_type = 'unconstrained'
abbrev = 'u'
def name(self):
return self.__class__.abbrev + str(self.hidden_size)
def __init__(self, layer_size, hidden_size=None, **kwargs):
if hidden_size is None:
hidden_size = layer_size
super().__init__(layer_size, hidden_size=hidden_size, **kwargs)
def reset_parameters(self):
super().reset_parameters()
self.W = Parameter(torch.Tensor(self.layer_size, self.hidden_size))
self.init_stddev = np.sqrt(1./self.layer_size)
torch.nn.init.normal_(self.W, std=self.init_stddev)
self.mask = None
if self.bias:
self.b = Parameter(torch.zeros(self.hidden_size))
def set_mask(self, mask, device):
self.mask = Variable(torch.FloatTensor(mask).to(device), requires_grad=False)
self.W.data *= self.mask.data
print('Num. nonzero entries after pruning: ', torch.nonzero(self.W).size(0))
def forward(self, x):
if self.mask is not None:
masked_W = self.W*self.mask
#print('NNZ, mask: ', torch.nonzero(self.mask).size(0))
#print('NNZ, masked_W: ', torch.nonzero(masked_W).size(0))
out = torch.matmul(x, masked_W)
else:
out = torch.matmul(x, self.W)
return self.apply_bias(out)
class Circulant(Layer):
class_type = 'circulant'
abbrev = 'c'
def reset_parameters(self):
super().reset_parameters()
self.c = Parameter(torch.Tensor(self.layer_size))
self.init_stddev = np.sqrt(1./self.layer_size)
torch.nn.init.normal_(self.c, std=self.init_stddev)
def forward(self, x):
return self.apply_bias(circ.circulant_multiply(self.c, x))
class FastFood(Layer):
class_type = 'fastfood'
abbrev = 'f'
def reset_parameters(self):
super().reset_parameters()
# Initialize as non adaptive Fastfood (Le et al. 2013)
# TODO: check initialization of S (scaling matrix) is correct
# S,G,B: diagonal, learnable parameters
# P: permutation, fixed
S = np.sqrt(np.random.chisquare(self.layer_size, size=self.layer_size))
G = np.random.randn(self.layer_size)
S /= np.linalg.norm(G)
B = np.random.choice((-1, 1), size=self.layer_size)
self.S = Parameter(torch.FloatTensor(S))
self.G = Parameter(torch.FloatTensor(G))
self.B = Parameter(torch.FloatTensor(B))
self.P = torch.LongTensor(np.random.permutation(self.layer_size))
#self.init_stddev = np.sqrt(1./self.layer_size)
#torch.nn.init.normal_(self.S, std=self.init_stddev)
#torch.nn.init.normal_(self.G, std=self.init_stddev)
#torch.nn.init.normal_(self.B, std=self.init_stddev)
def forward(self, x):
return self.apply_bias(ff.fastfood_multiply(self.S, self.G, self.B, self.P, x))
class LowRank(Layer):
class_type = 'low_rank'
abbrev = 'lr'
def name(self):
return self.__class__.abbrev + str(self.r)
def __init__(self, layer_size, r=1, **kwargs):
super().__init__(layer_size, r=r, **kwargs)
def reset_parameters(self):
super().reset_parameters()
self.G = Parameter(torch.Tensor(self.r, self.layer_size))
self.H = Parameter(torch.Tensor(self.r, self.layer_size))
# self.init_stddev = 0.01
self.init_stddev = np.power(1. / (self.r * self.layer_size), 1/2)
torch.nn.init.normal_(self.G, std=self.init_stddev)
torch.nn.init.normal_(self.H, std=self.init_stddev)
def forward(self, x):
xH = torch.matmul(x, self.H.t())
out = torch.matmul(xH, self.G)
return self.apply_bias(out)
def loss(self):
return 0
# lamb = 0.0001
# return lamb*torch.sum(torch.abs(self.G)) + lamb*torch.sum(torch.abs(self.H))
class ToeplitzLike(LowRank):
class_type = 'toeplitz'
abbrev = 't'
def reset_parameters(self):
super().reset_parameters()
self.corner = False
def forward(self, x):
out = toep.toeplitz_mult(self.G, self.H, x, self.corner)
return self.apply_bias(out)
class ToeplitzLikeC(ToeplitzLike):
class_type = 'toeplitz_corner'
abbrev = 'tc'
def reset_parameters(self):
super().reset_parameters()
self.corner = True
class HankelLike(LowRank):
class_type = 'hankel'
abbrev = 'h'
def forward(self, x):
out = toep.toeplitz_mult(self.G, self.H, x, True)
return self.apply_bias(out.flip(out.dim() - 1))
class VandermondeLike(LowRank):
class_type = 'vandermonde'
abbrev = 'v'
def reset_parameters(self):
super().reset_parameters()
self.diag = Parameter(torch.Tensor(self.layer_size))
torch.nn.init.uniform_(self.diag, -0.7, 0.7)
def forward(self, x):
# want: K_A[i,j,k] = g_i[j] * d[j] ** k
# K_A = kry.Krylov(lambda v: self.diag * v, self.G)
n = x.size(-1)
d_ = self.diag.unsqueeze(1) ** torch.arange(n, dtype=x.dtype, device=x.device)
K_A = self.G.unsqueeze(-1) * d_
# K_B = kry.Krylov(lambda v: torch.cat((v[...,1:],0*v[...,:1]),dim=-1), self.H)
# out = (x @ K_B) @ K_A.transpose(1,2)
out = toep.toeplitz_krylov_transpose_multiply(self.H, x)
out = out.transpose(0,1) @ K_A.transpose(1,2)
out = torch.sum(out, dim=0)
return self.apply_bias(out)
# transpose Vandermonde:
# K_H = kry.Krylov(lambda v: self.diag * v, self.H)
# out = toep.toeplitz_krylov_multiply(self.G, torch.transpose(x @ K_H, 0,1))
class LearnedOperator(LowRank):
"""
Abstract class for learned displacement operators
Contains parameters such as tie_operators
"""
class_type = None # abstract
abbrev = None
def __init__(self, tie_operators=False, corner=False, **kwargs):
super().__init__(tie_operators=tie_operators, corner=corner, **kwargs)
class LDRSubdiagonal(LearnedOperator):
class_type = 'subdiagonal'
abbrev = 'sd'
def reset_parameters(self):
super().reset_parameters()
self.subd_A = Parameter(torch.ones(self.layer_size-1))
if self.tie_operators:
self.subd_B = self.subd_A
else:
self.subd_B = Parameter(torch.ones(self.layer_size-1))
def forward(self, x):
out = kry.subdiag_mult(self.subd_A, self.subd_B, self.G, self.H, x)
#out = kry.subdiag_mult_conv(self.subd_A, self.subd_B, self.G, self.H, x)
return self.apply_bias(out)
class LDRSubdiagonalC(LDRSubdiagonal):
class_type = 'subdiagonal_corner'
abbrev = 'sdc'
def reset_parameters(self):
super().reset_parameters()
self.corner_A = Parameter(torch.tensor(0.0))
self.corner_B = Parameter(torch.tensor(0.0))
def forward(self, x):
out = kry.subdiag_mult_cuda(self.subd_A, self.subd_B, self.G, self.H, x, corner_A=self.corner_A, corner_B=self.corner_B)
return self.apply_bias(out)
class LDRTridiagonal(LearnedOperator):
class_type = 'tridiagonal'
abbrev = 'td'
def reset_parameters(self):
super().reset_parameters()
self.subd_A = Parameter(torch.ones(self.layer_size-1))
self.diag_A = Parameter(torch.zeros(self.layer_size))
self.supd_A = Parameter(torch.zeros(self.layer_size-1))
if self.tie_operators:
self.subd_B = self.subd_A
self.diag_B = self.diag_A
self.supd_B = self.supd_A
else:
self.subd_B = Parameter(torch.ones(self.layer_size-1))
self.diag_B = Parameter(torch.zeros(self.layer_size))
self.supd_B = Parameter(torch.zeros(self.layer_size-1))
self.corners_A = (0.0,0.0)
self.corners_B = (0.0,0.0)
def forward(self, x):
out = kry.tridiag_mult_slow(self.subd_A, self.diag_A, self.supd_A, self.subd_B, self.diag_B, self.supd_B, self.G, self.H, x, corners_A=self.corners_A, corners_B=self.corners_B)
return self.apply_bias(out)
class LDRTridiagonalC(LDRTridiagonal):
class_type = 'tridiagonal_corner'
abbrev = 'tdc'
def reset_parameters(self):
super().reset_parameters()
self.corners_A = (Parameter(torch.tensor(0.0)), Parameter(torch.tensor(0.0)))
self.corners_B = (Parameter(torch.tensor(0.0)), Parameter(torch.tensor(0.0)))
# create a map from class names to the Python class
class_map = {}
for cls in descendants(Layer):
if cls.class_type is None: continue
class_map[cls.class_type] = cls
class_map[cls.abbrev] = cls
def StructuredLinear(class_type, **kwargs):
return class_map[class_type](**kwargs)
|
structured-nets-master
|
pytorch/structure/layer.py
|
'''Functions to multiply by an LDR matrix with subdiagonal and tridiagonal
operator matrices.
We implement the fast multiplication for the subdiagonal case.
This comprises two steps: Krylov(g) @ Krylov(h)^T @ u, which are Krylov
transpose multiply and Krylov multiply.
For tridiagonal case, we implement the slow multiplication algorithm: construct
the Krylov matrix then call regular matrix multiply.
'''
import functools
import numpy as np
import torch
from torch.nn import functional as F
from .scratch.krylovslow import krylov_construct
from .complex_utils import complex_mult, conjugate
try:
import diag_mult_cuda
# import torch.utils.cpp_extension
# diag_mult_cuda = torch.utils.cpp_extension.load(
# name='diag_mult_cuda',
# sources=[
# 'diag_mult_cuda/diag_mult_cuda.cpp',
# 'diag_mult_cuda/diag_mult_cuda_kernel.cu',
# ],
# extra_cuda_cflags=['-O2'],
# verbose=False
# )
except (ImportError, RuntimeError) as e:
print("CUDA version of slow Krylov multiply isn't installed.")
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
##### Fast multiplication for the subdiagonal case
def poly_mult_sum_benchmark(p, q):
"""Multiply and sum two sets of polynomials.
Parameters:
p: (batch_size, n1, n2)
q: (rank, n1, n2)
Output:
o: (batch_size, rank, 2 * n2 - 1)
"""
print(p.shape[2])
import time
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(100):
y = F.conv1d(p, q.flip(q.dim() - 1), padding=p.shape[-1] -1)
g = torch.autograd.grad(y.sum(), (p, q), retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Elapsed time conv1d: {end - start}s.')
batch_size, rank = p.shape[0], q.shape[0]
n1, n2 = p.shape[1], p.shape[2]
start = time.perf_counter()
for _ in range(100):
S = torch.cat((torch.cat((q, p)),
torch.zeros((rank + batch_size, p.shape[1], p.shape[2]), dtype=q.dtype, device=q.device)), dim=-1)
S_f = torch.rfft(S, 1)
S0_10_f, S1_01_f = S_f[:rank], S_f[rank:rank+batch_size]
prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
T_00_f_sum = torch.stack((prod[..., 0, 0] - prod[..., 1, 1], prod[..., 0, 1] + prod[..., 1, 0]), dim=-1)
T_00_sum = torch.irfft(T_00_f_sum, 1, signal_sizes=(2 * n2, ))[..., :-1]
g = torch.autograd.grad(T_00_sum.sum(), (p, q), retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Elapsed time FFT: {end - start}s.\n')
return F.conv1d(p, q.flip(q.dim() - 1), padding=p.shape[-1] - 1)
def poly_mult_sum_backward_benchmark(grad, q):
"""Backward pass of multiplying and summing two sets of polynomials.
Parameters:
grad: (batch_size, rank, 2 * n2 - 1)
q: (rank, n1, n2)
Output:
dp: (batch_size, n1, n2)
"""
print(q.shape[2])
import time
torch.cuda.synchronize()
start = time.perf_counter()
for _ in range(100):
dp = F.conv_transpose1d(grad, q.flip(2), padding=q.shape[-1] - 1)
g = torch.autograd.grad(dp.sum(), (grad, q), retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Elapsed time conv1d: {end - start}s.')
batch_size, rank = grad.shape[0], q.shape[0]
n1, n2 = q.shape[1], q.shape[2]
start = time.perf_counter()
for _ in range(100):
dT_00_sum = torch.cat((grad, torch.zeros((batch_size, rank, 1), dtype=grad.dtype, device=grad.device)), dim=-1)
dT_00_sum_f = torch.rfft(dT_00_sum, 1)
S0_10_f = torch.rfft(torch.cat((q, torch.zeros_like(q)), dim=-1), 1)
# dS1_01_f = complex_mult(conjugate(S0_10_f), dT_00_sum_f[:, :, np.newaxis]).sum(dim=1)
# Manually doing complex multiply
prod = (S0_10_f[..., np.newaxis] * dT_00_sum_f[:, :, np.newaxis, :, np.newaxis, :]).sum(dim=1)
dS1_01_f = torch.stack((prod[..., 0, 0] + prod[..., 1, 1], prod[..., 0, 1] - prod[..., 1, 0]), dim=-1)
dp = torch.irfft(dS1_01_f, 1, signal_sizes=(2 * n2, ))[:, :, :n2]
g = torch.autograd.grad(dp.sum(), (grad, q), retain_graph=True)
torch.cuda.synchronize()
end = time.perf_counter()
print(f'Elapsed time FFT: {end - start}s.\n')
return F.conv_transpose1d(grad, q.flip(2), padding=q.shape[-1] - 1)
def krylov_transpose_multiply_conv(subdiag, v, u):
"""Multiply Krylov(A, v_i)^T @ u when A is zero except on the subdiagonal.
Use either Pytorch's conv1d or FFT for polynomial multiplication, depending
on polynomial degree. This is the fastest implementation.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
u: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, rank, n)
"""
batch_size, n = u.shape
rank, n_ = v.shape
assert n == n_, 'u and v must have the same last dimension'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
result = torch.zeros((batch_size, rank, n), dtype=u.dtype, device=u.device)
T_00_sum = u @ v.t()
result[:, :, 0] += T_00_sum
T_01 = u[..., np.newaxis]
T_10 = v[..., np.newaxis]
T_11 = torch.ones(n, device=T_00_sum.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_00_sum, S_01, S_10, S_11 = T_00_sum, T_01, T_10, T_11
S0_10_mult_subdiag = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
# polynomial multiplication
# T_00_sum = poly_mult_sum_benchmark(S_01[:, 1::2], S0_10_mult_subdiag)
if n2 <= 128: # Pick between 2 implementations based on polynomial degree n2
T_00_sum = F.conv1d(S_01[:, 1::2], S0_10_mult_subdiag.flip(2), padding=n2 - 1)
else:
S = torch.cat((torch.cat((S0_10_mult_subdiag, S_01[:, 1::2])),
torch.zeros((rank + batch_size, n1, n2), dtype=S_10.dtype, device=S_10.device)), dim=-1)
S_f = torch.rfft(S, 1)
S0_10_f, S1_01_f = S_f[:rank], S_f[rank:rank+batch_size]
# Different ways to compute the same expression, for speed vs readability
# Option 1: call complex_mult, slowest
# T_00_f_sum = complex_mult(S1_01_f[:, np.newaxis], S0_10_f[np.newaxis]).sum(dim=2)
# Option 2: multiply and sum
# prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
# Option 3: einsum
prod = torch.einsum('bnmo,rnmp->brmop', S1_01_f, S0_10_f)
# Option 4: manually doing permute and reshape and bmm, only 3% faster than einsum.
# temp1 = S1_01_f.permute(2, 0, 3, 1).reshape((-1, batch_size * 2, n1))
# temp2 = S0_10_f.permute(2, 1, 0, 3).reshape((-1, n1, rank * 2))
# prod = (temp1 @ temp2).reshape((-1, batch_size, 2, rank, 2)).permute(1, 3, 0, 2, 4)
T_00_f_sum = torch.stack((prod[..., 0, 0] - prod[..., 1, 1], prod[..., 0, 1] + prod[..., 1, 0]), dim=-1)
T_00_sum = torch.irfft(T_00_f_sum, 1, signal_sizes=(2 * n2, ))[..., :-1]
# polynomial additions
result[:, :, 1:2*n2] += T_00_sum
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
T_01 = torch.cat((S_01[:, ::2], S_01[:, 1::2] * S0_11_mult_subdiag[:, np.newaxis]), dim=-1)
T_10 = torch.cat((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), dim=-1)
T_11 = S0_11_mult_subdiag * S_11[1::2]
return result
def krylov_transpose_multiply(subdiag, v, u):
"""Multiply Krylov(A, v_i)^T @ u when A is zero except on the subdiagonal.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
u: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, rank, n)
"""
batch_size, n = u.shape
rank, n_ = v.shape
assert n == n_, 'u and v must have the same last dimension'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
result = torch.zeros((batch_size, rank, n), dtype=u.dtype, device=u.device)
# T_00_sum = (u[:, np.newaxis, ..., np.newaxis] * v[np.newaxis, ..., np.newaxis]).sum(dim=2)
T_00_sum = u @ v.t()
result[:, :, 0] = T_00_sum
T_01 = u[..., np.newaxis]
T_10 = v[..., np.newaxis]
T_11 = torch.ones(n, device=T_00_sum.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_01, S_10, S_11 = T_01, T_10, T_11
# S0_10 = torch.cat((S_10[:, ::2], torch.zeros_like(S_10[:, ::2])), dim=-1)
# S1_01 = torch.cat((S_01[:, 1::2], torch.zeros_like(S_01[:, 1::2])), dim=-1)
# S = torch.cat((S0_10, S1_01))
S0_10_mult_subdiag = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
S = torch.cat((torch.cat((S0_10_mult_subdiag, S_01[:, 1::2])),
torch.zeros((rank + batch_size, n1, n2), dtype=S_10.dtype, device=S_10.device)), dim=-1)
# polynomial multiplications
S_f = torch.rfft(S, 1)
S0_10_f, S1_01_f = S_f[:rank], S_f[rank:rank+batch_size]
# Different ways to compute the same expression, for speed vs readability
# Option 1: call complex_mult, slowest
# T_00_f_sum = complex_mult(S1_01_f[:, np.newaxis], S0_10_f[np.newaxis]).sum(dim=2)
# Option 2: multiply and sum
# Manually doing complex multiply, somehow this is faster than Cupy's complex mult
# prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
# Option 3: einsum
prod = torch.einsum('bnmo,rnmp->brmop', S1_01_f, S0_10_f)
# Option 4: manually doing permute and reshape and bmm, only 3% faster than einsum.
# temp1 = S1_01_f.permute(2, 0, 3, 1).reshape((-1, batch_size * 2, n1))
# temp2 = S0_10_f.permute(2, 1, 0, 3).reshape((-1, n1, rank * 2))
# prod = (temp1 @ temp2).reshape((-1, batch_size, 2, rank, 2)).permute(1, 3, 0, 2, 4)
# prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
T_00_f_sum = torch.stack((prod[..., 0, 0] - prod[..., 1, 1], prod[..., 0, 1] + prod[..., 1, 0]), dim=-1)
T_00_sum = torch.irfft(T_00_f_sum, 1, signal_sizes=(2 * n2, ))[..., :-1]
# polynomial additions
result[:, :, 1:2*n2] += T_00_sum
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
T_01 = torch.cat((S_01[:, ::2], S_01[:, 1::2] * S0_11_mult_subdiag[:, np.newaxis]), dim=-1)
T_10 = torch.cat((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), dim=-1)
T_11 = S0_11_mult_subdiag * S_11[1::2]
return result
def KTu_traceable(subdiag, v, u):
"""Multiply Krylov(A, v_i)^T @ u when A is zero except on the subdiagonal.
(WIP) Written to be traceable by Pytorch 1.0 JIT compiler.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
u: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, rank, n)
"""
batch_size, n = u.shape
rank, n_ = v.shape
# assert n == n_, 'u and v must have the same last dimension'
m = int(np.log2(n))
# assert n == 1 << m, 'n must be a power of 2'
# T_00_sum = (u[:, np.newaxis, ..., np.newaxis] * v[np.newaxis, ..., np.newaxis]).sum(dim=2)
T_00_sum = u @ v.t()
result = T_00_sum.unsqueeze(-1)
T_01 = u[..., np.newaxis]
T_10 = v[..., np.newaxis]
T_11 = torch.ones(n, device=T_00_sum.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_01, S_10, S_11 = T_01, T_10, T_11
# S0_10 = torch.cat((S_10[:, ::2], torch.zeros_like(S_10[:, ::2])), dim=-1)
# S1_01 = torch.cat((S_01[:, 1::2], torch.zeros_like(S_01[:, 1::2])), dim=-1)
# S = torch.cat((S0_10, S1_01))
S0_10_mult_subdiag = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
S = torch.cat((torch.cat((S0_10_mult_subdiag, S_01[:, 1::2])),
torch.zeros((rank + batch_size, n1, n2), dtype=S_10.dtype, device=S_10.device)), dim=-1)
# polynomial multiplications
S_f = torch.rfft(S, 1)
S0_10_f, S1_01_f = S_f[:rank], S_f[rank:rank+batch_size]
# Different ways to compute the same expression, for speed vs readability
# Option 1: call complex_mult, slowest
# T_00_f_sum = complex_mult(S1_01_f[:, np.newaxis], S0_10_f[np.newaxis]).sum(dim=2)
# Option 2: multiply and sum
# Manually doing complex multiply, somehow this is faster than Cupy's complex mult
# prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
# Option 3: einsum
prod = torch.einsum('bnmo,rnmp->brmop', S1_01_f, S0_10_f)
# Option 4: manually doing permute and reshape and bmm, only 3% faster than einsum.
# temp1 = S1_01_f.permute(2, 0, 3, 1).reshape((-1, batch_size * 2, n1))
# temp2 = S0_10_f.permute(2, 1, 0, 3).reshape((-1, n1, rank * 2))
# prod = (temp1 @ temp2).reshape((-1, batch_size, 2, rank, 2)).permute(1, 3, 0, 2, 4)
# prod = (S1_01_f[:, np.newaxis, ..., np.newaxis] * S0_10_f[np.newaxis, ..., np.newaxis, :]).sum(dim=2)
T_00_f_sum = torch.stack((prod[..., 0, 0] - prod[..., 1, 1], prod[..., 0, 1] + prod[..., 1, 0]), dim=-1)
T_00_sum = torch.irfft(T_00_f_sum, 1, signal_sizes=(2 * n2, ))[..., :-1]
# polynomial additions
result = torch.cat((result[:, :, :1], result[:, :, 1:] + T_00_sum[:, :, :n2 - 1], T_00_sum[:, :, n2 - 1:]), dim=-1)
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
T_01 = torch.cat((S_01[:, ::2], S_01[:, 1::2] * S0_11_mult_subdiag[:, np.newaxis]), dim=-1)
T_10 = torch.cat((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), dim=-1)
T_11 = S0_11_mult_subdiag * S_11[1::2]
return result
def krylov_transpose_multiply_old(subdiag, v, u):
"""Multiply Krylov(A, v_i)^T @ u when A is zero except on the subdiagonal.
Uses the old algorithm that scales worse when batching.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
u: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, rank, n)
"""
batch_size, n = u.shape
rank, n_ = v.shape
assert n == n_, 'u and v must have the same last dimension'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
T_00 = u[:, np.newaxis, ..., np.newaxis] * v[np.newaxis, ..., np.newaxis]
T_01 = u[..., np.newaxis]
T_10 = v[..., np.newaxis]
T_11 = torch.ones((n, 1), device=T_00.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_00, S_01, S_10, S_11 = T_00, T_01, T_10, T_11
S0_10 = torch.cat((S_10[:, ::2], torch.zeros_like(S_10[:, ::2])), dim=-1)
S1_01 = torch.cat((S_01[:, 1::2], torch.zeros_like(S_01[:, 1::2])), dim=-1)
S0_11 = torch.cat((S_11[::2], torch.zeros_like(S_11[::2])), dim=-1)
S1_11 = torch.cat((S_11[1::2], torch.zeros_like(S_11[1::2])), dim=-1)
S = torch.cat((S0_10, S0_11[np.newaxis], S1_01, S1_11[np.newaxis]))
# polynomial multiplications
S_f = torch.rfft(S, 1)
# S0_10_f, S0_11_f, S1_01_f, S1_11_f = S_f[:rank], S_f[rank], S_f[rank+1:rank+1+batch_size], S_f[-1]
# T_00_f = complex_mult(S1_01_f[:, np.newaxis], S0_10_f[np.newaxis])
# T_01_f = complex_mult(S1_01_f, S0_11_f)
# T_10_f = complex_mult(S1_11_f, S0_10_f)
# T_11_f = complex_mult(S1_11_f, S0_11_f)
# T_f = torch.cat((torch.cat((T_00_f, T_01_f[:, np.newaxis]), dim=1),
# torch.cat((T_10_f[np.newaxis], T_11_f[np.newaxis, np.newaxis]), dim=1)))
# I didn't realize you could just batch all 4 multiplications like this
T_f = complex_mult(S_f[rank+1:, np.newaxis], S_f[:rank+1])
T = torch.irfft(T_f, 1, signal_sizes=(2 * n2, )) * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
T_00, T_01, T_10, T_11 = T[:batch_size, :rank], T[:batch_size, -1], T[-1, :rank], T[-1, -1]
# polynomial additions
T_00 = torch.cat((T_00[:, :, :, :n2], T_00[:, :, :, n2:] + S_00[:, :, ::2] + S_00[:, :, 1::2]), dim=-1)
T_01 = torch.cat((T_01[:, :, :n2], T_01[:, :, n2:] + S_01[:, ::2]), dim=-1)
T_10 = torch.cat((T_10[:, :, :n2], T_10[:, :, n2:] + S_10[:, 1::2]), dim=-1)
return T_00.squeeze(dim=2).flip(2)
def krylov_multiply_conv(subdiag, v, w):
"""Multiply \sum_i Krylov(A, v_i) @ w_i when A is zero except on the subdiagonal.
Since K @ w can be computed by autodiffing K^T @ u, the algorithm is just
hand-differentiating the code of @krylov_transpose_multiply.
Use either Pytorch's conv1d or FFT for polynomial multiplication, depending
on polynomial degree. This is the fastest implementation.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
w: Tensor of shape (batch_size, rank, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
batch_size, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
# Forward pass. Since K @ w can be computed by autodiffing K^T @ u, we
# carry out the forward pass K^T @ u for u = 0 here to save the
# intermediate values. This code is exactly the same as the function
# @krylov_transpose_multiply, specialized to the case where u = 0.
save_for_backward = [None] * m
T_10 = v[..., np.newaxis]
T_11 = torch.ones((n), device=T_10.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_10, S_11 = T_10, T_11
S0_10_mult_subdiag = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
T_10 = torch.cat((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), dim=-1)
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
save_for_backward[d] = S0_10_mult_subdiag, S0_11_mult_subdiag
T_11 = S0_11_mult_subdiag * S_11[1::2]
# Backward pass
dT_01 = torch.zeros((batch_size, 1, n), dtype=w.dtype, device=w.device)
for d in range(m):
n1, n2 = 1 << d, 1 << (m - d - 1)
S0_10_mult_subdiag, S0_11_mult_subdiag = save_for_backward[d]
dS_01 = torch.empty((batch_size, 2 * n1, n2), device=w.device)
dS_01[:, ::2] = dT_01[:, :, :n2]
# dS1_01 = poly_mult_sum_backward_benchmark(w[:, :, 1:2*n2], S0_10_mult_subdiag)
if n2 <= 128:
dS1_01 = F.conv_transpose1d(w[:, :, 1:2*n2], S0_10_mult_subdiag.flip(2), padding=n2 - 1)
else:
dT_00_sum = torch.cat((w[:, :, 1:2*n2], torch.zeros((batch_size, rank, 1), dtype=w.dtype, device=w.device)), dim=-1)
dT_00_sum_f = torch.rfft(dT_00_sum, 1)
S0_10_f = torch.rfft(torch.cat((S0_10_mult_subdiag, torch.zeros_like(S0_10_mult_subdiag)), dim=-1), 1)
# dS1_01_f = complex_mult(conjugate(S0_10_f), dT_00_sum_f[:, :, np.newaxis]).sum(dim=1)
# Manually doing complex multiply
# prod = (S0_10_f[..., np.newaxis] * dT_00_sum_f[:, :, np.newaxis, :, np.newaxis, :]).sum(dim=1)
prod = torch.einsum('rnmo,brmp->bnmop', S0_10_f, dT_00_sum_f)
dS1_01_f = torch.stack((prod[..., 0, 0] + prod[..., 1, 1], prod[..., 0, 1] - prod[..., 1, 0]), dim=-1)
dS1_01 = torch.irfft(dS1_01_f, 1, signal_sizes=(2 * n2, ))[:, :, :n2]
dS_01[:, 1::2] = dT_01[:, :, n2:] * S0_11_mult_subdiag[:, np.newaxis] + dS1_01
dT_01 = dS_01
# du = ((dT_00_sum[:, :, np.newaxis] * v[np.newaxis, :, :, np.newaxis]).sum(dim=1) + dT_01).squeeze(dim=-1)
du = w[:, :, 0] @ v + dT_01.squeeze(dim=-1)
return du
def krylov_multiply(subdiag, v, w):
"""Multiply \sum_i Krylov(A, v_i) @ w_i when A is zero except on the subdiagonal.
Since K @ w can be computed by autodiffing K^T @ u, the algorithm is just
hand-differentiating the code of @krylov_transpose_multiply.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
w: Tensor of shape (batch_size, rank, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
batch_size, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
# Forward pass. Since K @ w can be computed by autodiffing K^T @ u, we
# carry out the forward pass K^T @ u for u = 0 here to save the
# intermediate values. This code is exactly the same as the function
# @krylov_transpose_multiply, specialized to the case where u = 0.
save_for_backward = [None] * m
T_10 = v[..., np.newaxis]
T_11 = torch.ones((n), device=T_10.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_10, S_11 = T_10, T_11
S0_10_mult_subdiag = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
T_10 = torch.cat((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), dim=-1)
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
save_for_backward[d] = S0_10_mult_subdiag, S0_11_mult_subdiag
T_11 = S0_11_mult_subdiag * S_11[1::2]
# Backward pass
dT_01 = torch.zeros((batch_size, 1, n), dtype=w.dtype, device=w.device)
for d in range(m):
n1, n2 = 1 << d, 1 << (m - d - 1)
S0_10_mult_subdiag, S0_11_mult_subdiag = save_for_backward[d]
dS_01 = torch.empty((batch_size, 2 * n1, n2), device=w.device)
dS_01[:, ::2] = dT_01[:, :, :n2]
dT_00_sum = torch.cat((w[:, :, 1:2*n2], torch.zeros((batch_size, rank, 1), dtype=w.dtype, device=w.device)), dim=-1)
dT_00_sum_f = torch.rfft(dT_00_sum, 1)
S0_10_f = torch.rfft(torch.cat((S0_10_mult_subdiag, torch.zeros_like(S0_10_mult_subdiag)), dim=-1), 1)
# dS1_01_f = complex_mult(conjugate(S0_10_f), dT_00_sum_f[:, :, np.newaxis]).sum(dim=1)
# Manually doing complex multiply
# prod = (S0_10_f[..., np.newaxis] * dT_00_sum_f[:, :, np.newaxis, :, np.newaxis, :]).sum(dim=1)
prod = torch.einsum('rnmo,brmp->bnmop', S0_10_f, dT_00_sum_f)
dS1_01_f = torch.stack((prod[..., 0, 0] + prod[..., 1, 1], prod[..., 0, 1] - prod[..., 1, 0]), dim=-1)
dS1_01 = torch.irfft(dS1_01_f, 1, signal_sizes=(2 * n2, ))[:, :, :n2]
dS_01[:, 1::2] = dT_01[:, :, n2:] * S0_11_mult_subdiag[:, np.newaxis] + dS1_01
dT_01 = dS_01
# du = ((dT_00_sum[:, :, np.newaxis] * v[np.newaxis, :, :, np.newaxis]).sum(dim=1) + dT_01).squeeze(dim=-1)
du = w[:, :, 0] @ v + dT_01.squeeze(dim=-1)
return du
def krylov_multiply_by_autodiff(subdiag, v, w):
"""Multiply \sum_i Krylov(A, v_i) @ w_i when A is zero except on the subdiagonal, using Pytorch's autodiff.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
w: Tensor of shape (batch_size, rank, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
batch_size, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
u = torch.zeros((batch_size, n), dtype=v.dtype, device=v.device, requires_grad=True)
prod = krylov_transpose_multiply(subdiag, v, u)
result, = torch.autograd.grad(prod, u, grad_outputs=w, create_graph=True)
return result
def krylov_multiply_forward_old_(subdiag, v):
"""Forward pass of Krylov_multiply. Since K @ w can be computed by
autodiffing K^T @ u, we carry out the forward pass K^T @ u for u = 0 here
to save the intermediate values. This code is exactly the same as the
function @krylov_transpose_multiply_old, specialized to the case where u = 0.
Uses the old algorithm that scales worse when batching.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
Returns:
save_for_backward: list of length log n, containing intermediate values
necessary for the backward pass K @ w.
"""
rank, n = v.shape
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
save_for_backward = [None] * m
T_10 = v[..., np.newaxis]
T_11 = torch.ones((n, 1), device=T_10.device)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S_10, S_11 = T_10, T_11
S0_10 = torch.cat((S_10[:, ::2], torch.zeros_like(S_10[:, ::2])), dim=-1)
S0_11 = torch.cat((S_11[::2], torch.zeros_like(S_11[::2])), dim=-1)
S1_11 = torch.cat((S_11[1::2], torch.zeros_like(S_11[1::2])), dim=-1)
S = torch.cat((S0_10, S0_11[np.newaxis], S1_11[np.newaxis]))
# polynomial multiplications
S_f = torch.rfft(S, 1)
# S0_10_f, S0_11_f, S1_11_f = S_f[:rank], S_f[-2], S_f[-1]
# save_for_backward[d] = (S0_10_f, S0_11_f)
# T_10_f = complex_mult(S1_11_f, S0_10_f)
# T_11_f = complex_mult(S1_11_f, S0_11_f)
# T_f = torch.cat((T_10_f, T_11_f[np.newaxis]))
save_for_backward[d] = S_f[:rank+1]
T_f = complex_mult(S_f[-1], S_f[:rank+1])
T = torch.irfft(T_f, 1, signal_sizes=(2 * n2, )) * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
T_10, T_11 = T[:rank], T[-1]
# polynomial additions
T_10 = torch.cat((T_10[:, :, :n2], T_10[:, :, n2:] + S_10[:, 1::2]), dim=-1)
return save_for_backward
def krylov_multiply_old(subdiag, v, w):
"""Multiply \sum_i Krylov(A, v_i) @ w_i when A is zero except on the subdiagonal.
Since K @ w can be computed by autodiffing K^T @ u, the algorithm is just
hand-differentiating the code of @krylov_transpose_multiply.
Uses the old algorithm that scales worse when batching.
Parameters:
subdiag: Tensor of shape (n - 1, )
v: Tensor of shape (rank, n)
w: Tensor of shape (batch_size, rank, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
batch_size, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
save_for_backward = krylov_multiply_forward_old_(subdiag, v)
w = w[:, :, np.newaxis, :]
dT_00, dT_01 = w.flip(w.dim() - 1), torch.zeros((batch_size, 1, n), dtype=w.dtype, device=w.device)
for d in range(m):
n1, n2 = 1 << d, 1 << (m - d - 1)
dS_00 = torch.empty((batch_size, rank, 2 * n1, n2), device=w.device)
dS_00[:, :, ::2] = dT_00[:, :, :, n2:]
dS_00[:, :, 1::2] = dT_00[:, :, :, n2:]
dS_01 = torch.empty((batch_size, 2 * n1, n2), device=w.device)
dS_01[:, ::2] = dT_01[:, :, n2:]
dT = torch.cat((dT_00, dT_01[:, np.newaxis]), dim=1)
dT = dT * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
dT_f = torch.rfft(dT, 1) / (2 * n2)
# dT_00_f, dT_01_f = dT_f[:, :rank], dT_f[:, -1]
# S0_10_f, S0_11_f = save_for_backward[d]
# dS1_01_f = complex_mult(conjugate(S0_10_f)[np.newaxis], dT_00_f).sum(dim=1) + complex_mult(conjugate(S0_11_f), dT_01_f)
dS1_01_f = complex_mult(conjugate(save_for_backward[d]), dT_f).sum(dim=1)
dS1_01 = torch.irfft(dS1_01_f, 1, signal_sizes=(2 * n2, )) * (2 * n2)
dS_01[:, 1::2] = dS1_01[:, :, :n2]
dT_00, dT_01 = dS_00, dS_01
du = ((dT_00 * v[np.newaxis, :, :, np.newaxis]).sum(dim=1) + dT_01).squeeze(dim=-1)
return du
def subdiag_mult_conv(subdiag_A, subdiag_B, G, H, x):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the fast algorithm.
Use either Pytorch's conv1d or FFT for polynomial multiplication, depending
on polynomial degree. This is the fastest implementation.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
rank, n = G.shape
batch_size = x.shape[0]
# if not power of 2, round everything up
# TODO: this can maybe be handled better. also should benchmark how much speed non-po2 FFT loses
m = int(np.ceil(np.log2(n)))
n_extended = 1 << m
if n != n_extended:
x = torch.cat((x, torch.zeros(batch_size, n_extended - n, dtype=x.dtype, device=x.device)), dim=-1)
G = torch.cat((G, torch.zeros(rank, n_extended - n, dtype=G.dtype, device=G.device)), dim=-1)
H = torch.cat((H, torch.zeros(rank, n_extended - n, dtype=H.dtype, device=H.device)), dim=-1)
subdiag_A = torch.cat((subdiag_A, torch.zeros(n_extended - n, dtype=subdiag_A.dtype, device=subdiag_A.device)))
subdiag_B = torch.cat((subdiag_B, torch.zeros(n_extended - n, dtype=subdiag_B.dtype, device=subdiag_B.device)))
KT_out = krylov_transpose_multiply_conv(subdiag_B, H, x)
K_out = krylov_multiply_conv(subdiag_A, G, KT_out)
return K_out[:, :n] if n != n_extended else K_out
def subdiag_mult(subdiag_A, subdiag_B, G, H, x):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the fast algorithm.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
rank, n = G.shape
batch_size = x.shape[0]
# if not power of 2, round everything up
# TODO: this can maybe be handled better. also should benchmark how much speed non-po2 FFT loses
m = int(np.ceil(np.log2(n)))
n_extended = 1 << m
if n != n_extended:
x = torch.cat((x, torch.zeros(batch_size, n_extended - n, dtype=x.dtype, device=x.device)), dim=-1)
G = torch.cat((G, torch.zeros(rank, n_extended - n, dtype=G.dtype, device=G.device)), dim=-1)
H = torch.cat((H, torch.zeros(rank, n_extended - n, dtype=H.dtype, device=H.device)), dim=-1)
subdiag_A = torch.cat((subdiag_A, torch.zeros(n_extended - n, dtype=subdiag_A.dtype, device=subdiag_A.device)))
subdiag_B = torch.cat((subdiag_B, torch.zeros(n_extended - n, dtype=subdiag_B.dtype, device=subdiag_B.device)))
KT_out = krylov_transpose_multiply(subdiag_B, H, x)
K_out = krylov_multiply(subdiag_A, G, KT_out)
return K_out[:, :n] if n != n_extended else K_out
##### Slow multiplication for the subdiagonal case
def Krylov(linear_map, v, m=None):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{m-1} @ v].
Parameters:
linear_map: a function v -> A @ v that takes a vector of size m and returns a vector of size m.
v: the starting vector of size m or (rank, m).
m: max power of A.
Returns:
K: Krylov matrix of size (m, m) or (rank, m, m).
"""
if m is None:
m = v.size(-1)
cols = [v]
for _ in range(m - 1):
v = linear_map(v)
cols.append(v)
return torch.stack(cols, dim=-1)
def shift_subdiag(subdiag, v, upper_right_corner=0.0):
"""The linear map for multiplying with a subdiagonal matrix (possibly with an upper right corner).
This implementation is slow and not batched wrt rank, but easy to understand.
Parameters:
subdiag: (n - 1, )
v: (n, )
upper_right_corner: real number
Returns:
prod: (n, )
"""
return torch.cat((upper_right_corner * v[[-1]], subdiag * v[:-1]))
def subdiag_linear_map(subdiag, upper_right_corner=0.0):
"""Construct the linear map for multiplying with a subdiagonal matrix (possibly with an upper right corner).
This implementation is faster. The slowness of the Krylov construction is
from the kernel launch overhead in CUDA: we have n sequential steps, each
step having a few CUDA calls. To make it faster, we want to reduce the
number of CUDA operations. Here we reduce each step to 2 operations:
indexing, and pointwise multiplication.
Parameters:
subdiag: (n - 1, )
upper_right_corner: real number
Returns:
linear_map: v -> product, with v of shape either (n, ) or (rank, n)
"""
n = subdiag.size(0) + 1
shift_down = torch.arange(-1, n - 1, device=subdiag.device)
subdiag_extended = torch.cat((torch.tensor([upper_right_corner], dtype=subdiag.dtype, device=subdiag.device), subdiag))
# Pytorch 1.0 has torch.roll that should be much faster
# return lambda v: subdiag_extended * v.roll(1, dims=-1)
return lambda v: subdiag_extended * v[..., shift_down]
def krylov_subdiag_fast(subdiag, v, upper_right_corner=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A is a subdiagonal matrix (possibly with an upper right corner).
This uses vectorized indexing and cumprod so it's much faster than using
the Krylov function. However, the backward pass is slow because of
inefficient implementation of cumprod_backward in Pytorch.
This should yields similar speed (forward + backward) to the fast
multiplication algorithm, but requires more memory.
Parameters:
subdiag: (n - 1, )
v: the starting vector of size n or (rank, n).
upper_right_corner: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
rank, n = v.shape
a = torch.arange(n, dtype=torch.long, device=v.device)
b = -a
indices = a[:, np.newaxis] + b[np.newaxis]
v_circulant = v[:, indices]
subdiag_extended = torch.cat((torch.tensor([upper_right_corner], dtype=subdiag.dtype, device=subdiag.device), subdiag))
subdiag_circulant = subdiag_extended[indices]
subdiag_cumprod = subdiag_circulant.cumprod(dim=1)
K = v_circulant
K[:, :, 1:] *= subdiag_cumprod[:, :-1]
return K
def subdiag_mult_slow_old(subdiag_A, subdiag_B, G, H, x):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the explicit Krylov construction with slow (and easy to understand)
linear map.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
rank, n = G.shape
linear_map_A = functools.partial(shift_subdiag, subdiag_A)
linear_map_B = functools.partial(shift_subdiag, subdiag_B)
krylovs = [(Krylov(linear_map_A, G[i]), Krylov(linear_map_B, H[i]).t()) for i in range(rank)]
prods = [K[0] @ (K[1] @ x.t()) for K in krylovs]
return sum(prods).t()
def subdiag_mult_slow(subdiag_A, subdiag_B, G, H, x, corner_A=0.0, corner_B=0.0):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the explicit Krylov construction with the more careful implementation of linear map.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
if G.shape[0] == 1: # specialized code for rank=1, giving 2x speedup.
K_G = Krylov(subdiag_linear_map(subdiag_A, corner_A), G[0])
K_H = Krylov(subdiag_linear_map(subdiag_B, corner_B), H[0])
return (x @ K_H) @ K_G.t()
else:
K_G = Krylov(subdiag_linear_map(subdiag_A, corner_A), G)
K_H = Krylov(subdiag_linear_map(subdiag_B, corner_B), H)
return ((x @ K_H) @ K_G.transpose(1, 2)).sum(dim=0)
def subdiag_mult_slow_fast(subdiag_A, subdiag_B, G, H, x):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the fast construction of Krylov matrix.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
K_G, K_H = krylov_subdiag_fast(subdiag_A, G), krylov_subdiag_fast(subdiag_B, H)
return ((x @ K_H) @ K_G.transpose(1, 2)).sum(dim=0)
class CycleDownMultCuda(torch.autograd.Function):
'''Cycle v down and do pointwise multiplication with subdiag.
'''
@staticmethod
def forward(ctx, subdiag, v):
ctx.save_for_backward(subdiag, v)
return diag_mult_cuda.cycle_mult(subdiag, v, 0, -1)
@staticmethod
def backward(ctx, grad):
subdiag, v = ctx.saved_tensors
return diag_mult_cuda.cycle_mult(grad, v, 0, -1).sum(dim=0), diag_mult_cuda.cycle_mult(subdiag, grad, 1, 1)
cycle_down_mult = CycleDownMultCuda.apply
def test_cycle_down_mult():
n = 1 << 10
rank = 16
subdiag = torch.rand(n, requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
z = cycle_down_mult(subdiag, v)
y = torch.cat((subdiag[0] * v[..., -1:], subdiag[1:] * v[..., :-1]), dim=-1)
print((z - y).abs().max().item())
grad_output = torch.rand_like(y)
gs, gv = torch.autograd.grad(y, (subdiag, v), grad_output, retain_graph=True)
zs, zv = torch.autograd.grad(z.sum(), (subdiag, v), grad_output, retain_graph=True)
print((zs - gs).abs().max().item())
print((zv - gv).abs().max().item())
def subdiag_linear_map_cuda(subdiag, upper_right_corner=0.0):
"""Construct the linear map for multiplying with a subdiagonal matrix (possibly with an upper right corner).
Uses the construction in CUDA, so it's pretty fast.
Parameters:
subdiag: (n - 1, )
upper_right_corner: real number
Returns:
linear_map: v -> product, with v of shape either (n, ) or (rank, n)
"""
subdiag_extended = torch.cat((torch.tensor([upper_right_corner], dtype=subdiag.dtype, device=subdiag.device), subdiag))
return lambda v: cycle_down_mult(subdiag_extended, v)
def subdiag_mult_cuda(subdiag_A, subdiag_B, G, H, x, corner_A=0.0, corner_B=0.0):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the explicit Krylov construction in CUDA.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
Returns:
product: Tensor of shape (batch_size, n)
"""
K_G = Krylov(subdiag_linear_map_cuda(subdiag_A, corner_A), G)
K_H = Krylov(subdiag_linear_map_cuda(subdiag_B, corner_B), H)
return ((x @ K_H) @ K_G.transpose(1, 2)).sum(dim=0)
##### Slow multiplication for the tridiagonal case
def tridiag_linear_map(subdiag, diag, superdiag, upper_right_corner=0.0, lower_left_corner=0.0):
"""Construct the linear map for multiplying with a tridiagonal matrix
(possibly with upper right and lower left corners).
Similar to subdiag_linear_map, we want to reduce the number of CUDA
operations. Here we reduce each step to 3 operations: indexing,
pointwise multiplication, and summing.
Parameters:
subdiag: (n - 1, )
diag: (n, )
superdiag: (n - 1, )
upper_right_corner: real number
lower_left_corner: real number
Returns:
linear_map: v -> product, with v of shape either (n, ) or (rank, n)
"""
n = diag.size(0)
shift_none = torch.arange(n, device=diag.device)
shift_down = shift_none - 1
shift_up = (shift_none + 1) % n
shifts = torch.stack((shift_down, shift_none, shift_up))
subdiag_extended = torch.cat((torch.tensor([upper_right_corner], dtype=subdiag.dtype, device=subdiag.device), subdiag))
superdiag_extended = torch.cat((superdiag, torch.tensor([lower_left_corner], dtype=superdiag.dtype, device=superdiag.device)))
diags = torch.stack((subdiag_extended, diag, superdiag_extended))
return lambda v: (diags * v[..., shifts]).sum(dim=-2)
def tridiag_linear_map_slow(subdiag, diag, superdiag, upper_right_corner=0.0, lower_left_corner=0.0):
"""The linear map for multiplying with a tridiagonal matrix (possibly with
upper right and lower left corner).
This implementation is slow, but easy to understand.
Parameters:
subdiag: (n - 1, )
diag: (n, )
superdiag: (n - 1, )
upper_right_corner: real number
lower_left_corner: real number
Returns:
linear_map: v -> product, with v of shape either (n, ) or (rank, n)
"""
return lambda v: torch.cat((upper_right_corner * v[..., -1:], subdiag * v[..., :-1]), dim=-1) + diag * v + torch.cat((superdiag * v[..., 1:], lower_left_corner * v[..., :1]), dim=-1)
def tridiag_mult_slow(subdiag_A, diag_A, superdiag_A, subdiag_B, diag_B, superdiag_B, G, H, x, corners_A=(0.0, 0.0), corners_B=(0.0, 0.0)):
"""Multiply \sum_i Krylov(A, G_i) @ Krylov(B, H_i) @ x when A and B are zero except on the subdiagonal.
Uses the explicit Krylov construction with the more careful implementation of linear map.
Parameters:
subdiag_A: Tensor of shape (n - 1, )
diag_A: Tensor of shape (n, )
superdiag_A: Tensor of shape (n - 1, )
subdiag_B: Tensor of shape (n - 1, )
diag_B: Tensor of shape (n, )
superdiag_B: Tensor of shape (n - 1, )
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
corners_A: two real numbers, the upper right and lower left corners of A.
corners_B: two real numbers, the upper right and lower left corners of A.
Returns:
product: Tensor of shape (batch_size, n)
"""
if G.shape[0] == 1: # specialized code for rank=1, giving 2x speedup.
K_G = Krylov(tridiag_linear_map(subdiag_A, diag_A, superdiag_A, *corners_A), G[0])
K_H = Krylov(tridiag_linear_map(subdiag_B, diag_B, superdiag_B, *corners_B), H[0])
return (x @ K_H) @ K_G.t()
else:
K_G = Krylov(tridiag_linear_map(subdiag_A, diag_A, superdiag_A, *corners_A), G)
K_H = Krylov(tridiag_linear_map(subdiag_B, diag_B, superdiag_B, *corners_B), H)
return ((x @ K_H) @ K_G.transpose(1, 2)).sum(dim=0)
def test_krylov_transpose_multiply():
m = 10
n = 1 << m
batch_size = 50
rank = 16
subdiag = torch.rand(n-1, requires_grad=True, device=device)
A = np.diag(subdiag.data.cpu().numpy(), -1)
u = torch.rand((batch_size, n), requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
# Fast algorithm on GPU
# KTu_traced = torch.jit.trace(KTu_traceable, (subdiag, v, u))
result = krylov_transpose_multiply(subdiag, v, u)
# result = krylov_transpose_multiply_conv(subdiag, v, u)
# result = krylov_transpose_multiply_old(subdiag, v, u)
grad, = torch.autograd.grad(result.sum(), subdiag, retain_graph=True)
# CPU dense multiply
Ks = [krylov_construct(A, v.data.cpu().numpy()[i], n) for i in range(rank)]
u_cpu = u.data.cpu().numpy()
result_cpu = np.stack([u_cpu @ K.T for K in Ks])
result_cpu = result_cpu.swapaxes(0, 1).squeeze()
result_cpu = torch.tensor(result_cpu, dtype=torch.float, device=device)
# GPU dense multiply
Ks_gpu_dense = [torch.tensor(K, dtype=torch.float, device=device) for K in Ks]
result_gpu_dense = torch.stack([u @ K.t() for K in Ks_gpu_dense])
result_gpu_dense = result_gpu_dense.transpose(0, 1).squeeze()
# Explicit construction on GPU
Ks_gpu = Krylov(subdiag_linear_map(subdiag), v)
result_gpu = (u @ Ks_gpu).transpose(0, 1)
grad_gpu, = torch.autograd.grad(result_gpu.sum(), subdiag, retain_graph=True)
# Explicit construction on GPU, but faster
Ks_gpu_fast = krylov_subdiag_fast(subdiag, v)
result_gpu_fast = (u @ Ks_gpu_fast).transpose(0, 1)
grad_gpu_fast, = torch.autograd.grad(result_gpu_fast.sum(), subdiag, retain_graph=True)
# These max and mean differences should be small
print((result - result_cpu).abs().max().item())
print((result - result_cpu).abs().mean().item())
print((result - result_gpu_dense).abs().max().item())
print((result - result_gpu_dense).abs().mean().item())
print((result - result_gpu).abs().max().item())
print((result - result_gpu).abs().mean().item())
print((grad - grad_gpu).abs().max().item())
print((grad - grad_gpu).abs().mean().item())
print((result - result_gpu_fast).abs().max().item())
print((result - result_gpu_fast).abs().mean().item())
print((grad - grad_gpu_fast).abs().max().item())
print((grad - grad_gpu_fast).abs().mean().item())
# with torch.autograd.profiler.profile(use_cuda=True) as prof:
# result = krylov_transpose_multiply_conv(subdiag, v, u)
# grad, = torch.autograd.grad(result.sum(), subdiag, retain_graph=True)
def test_krylov_multiply():
m = 10
n = 1 << m
batch_size = 50
rank = 16
subdiag = torch.rand(n-1, requires_grad=True, device=device)
A = np.diag(subdiag.data.cpu().numpy(), -1)
u = torch.rand((batch_size, n), requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
w = torch.rand((batch_size, rank, n), requires_grad=True, device=device)
# Fast algorithm on GPU
# result = krylov_multiply_conv(subdiag, v, w)
result = krylov_multiply(subdiag, v, w)
# result = krylov_multiply_old(subdiag, v, w)
grad, = torch.autograd.grad(result.sum(), subdiag, retain_graph=True)
# Using autodiff
result_autodiff = krylov_multiply_by_autodiff(subdiag, v, w)
grad_autodiff, = torch.autograd.grad(result_autodiff.sum(), subdiag, retain_graph=True)
# CPU dense multiply
Ks = [krylov_construct(A, v.data.cpu().numpy()[i], n) for i in range(rank)]
w_cpu = w.data.cpu().numpy()
result_cpu = np.stack([w_cpu[:, i] @ Ks[i] for i in range(rank)]).sum(axis=0).squeeze()
result_cpu = torch.tensor(result_cpu, dtype=torch.float, device=device)
# Explicit construction on GPU
Ks_gpu = Krylov(subdiag_linear_map(subdiag), v)
result_gpu = (w.transpose(0, 1) @ Ks_gpu.transpose(1, 2)).sum(dim=0)
grad_gpu, = torch.autograd.grad(result_gpu.sum(), subdiag, retain_graph=True)
# Explicit construction on GPU, but faster
Ks_gpu_fast = krylov_subdiag_fast(subdiag, v)
result_gpu_fast = (w.transpose(0, 1) @ Ks_gpu_fast.transpose(1, 2)).sum(dim=0)
grad_gpu_fast, = torch.autograd.grad(result_gpu_fast.sum(), subdiag, retain_graph=True)
# These max and mean differences should be small
print((result - result_autodiff).abs().max().item())
print((result - result_autodiff).abs().mean().item())
print((grad - grad_autodiff).abs().max().item())
print((grad - grad_autodiff).abs().mean().item())
print((result - result_cpu).abs().max().item())
print((result - result_cpu).abs().mean().item())
print((result - result_gpu).abs().max().item())
print((result - result_gpu).abs().mean().item())
print((grad - grad_gpu).abs().max().item())
print((grad - grad_gpu).abs().mean().item())
print((result - result_gpu_fast).abs().max().item())
print((result - result_gpu_fast).abs().mean().item())
print((grad - grad_gpu_fast).abs().max().item())
print((grad - grad_gpu_fast).abs().mean().item())
def test_subdiag_mult():
m = 10
n = 1 << m
batch_size = 50
rank = 16
subdiag = torch.rand(n-1, requires_grad=True, device=device)
diag = torch.rand(n, requires_grad=True, device=device)
superdiag = torch.rand(n-1, requires_grad=True, device=device)
u = torch.rand((batch_size, n), requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
K = Krylov(subdiag_linear_map(subdiag, 1.0), v)
K_fast = krylov_subdiag_fast(subdiag, v, upper_right_corner=1.0)
print((K - K_fast).abs().max().item())
result = subdiag_mult_conv(subdiag, subdiag, v, v, u)
# result = subdiag_mult(subdiag, subdiag, v, v, u)
grad, = torch.autograd.grad(result.sum(), subdiag, retain_graph=True)
result_slow_old = subdiag_mult_slow_old(subdiag, subdiag, v, v, u)
grad_slow_old, = torch.autograd.grad(result_slow_old.sum(), subdiag, retain_graph=True)
result_slow = subdiag_mult_slow(subdiag, subdiag, v, v, u)
grad_slow, = torch.autograd.grad(result_slow.sum(), subdiag, retain_graph=True)
result_slow_fast = subdiag_mult_slow_fast(subdiag, subdiag, v, v, u)
grad_slow_fast, = torch.autograd.grad(result_slow_fast.sum(), subdiag, retain_graph=True)
result_cuda = subdiag_mult_cuda(subdiag, subdiag, v, v, u)
grad_cuda, = torch.autograd.grad(result_cuda.sum(), subdiag, retain_graph=True)
# These max and mean differences should be small
print((result - result_slow_old).abs().max().item())
print((result - result_slow_old).abs().mean().item())
print((grad - grad_slow_old).abs().max().item())
print((grad - grad_slow_old).abs().mean().item())
print((result - result_slow).abs().max().item())
print((result - result_slow).abs().mean().item())
print((grad - grad_slow).abs().max().item())
print((grad - grad_slow).abs().mean().item())
print((result - result_slow_fast).abs().max().item())
print((result - result_slow_fast).abs().mean().item())
print((grad - grad_slow_fast).abs().max().item())
print((grad - grad_slow_fast).abs().mean().item())
print((result - result_cuda).abs().max().item())
print((result - result_cuda).abs().mean().item())
print((grad - grad_cuda).abs().max().item())
print((grad - grad_cuda).abs().mean().item())
def test_tridiag_mult():
m = 10
n = 1 << m
batch_size = 50
rank = 16
subdiag = torch.rand(n-1, requires_grad=True, device=device) / 2
diag = torch.rand(n, requires_grad=True, device=device) / 2
superdiag = torch.rand(n-1, requires_grad=True, device=device) / 2
u = torch.rand((batch_size, n), requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
K = Krylov(tridiag_linear_map(subdiag, diag, superdiag, 0.5, 0.5), v)
K_old = Krylov(tridiag_linear_map_slow(subdiag, diag, superdiag, 0.5, 0.5), v)
print((K - K_old).abs().max().item())
trid_slow = tridiag_mult_slow(subdiag, diag, superdiag, subdiag, diag, superdiag, v, v, u)
# TODO: broken, move test into subpackage
if __name__ == "__main__":
test_krylov_transpose_multiply()
test_krylov_multiply()
test_subdiag_mult()
test_tridiag_mult()
|
structured-nets-master
|
pytorch/structure/krylov.py
|
import torch
from scipy.linalg import circulant
from .complex_utils import complex_mult
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def circulant_multiply(c, x):
""" Multiply circulant matrix with first column c by x
Parameters:
c: (n, )
x: (batch_size, n) or (n, )
Return:
prod: (batch_size, n) or (n, )
"""
return torch.irfft(complex_mult(torch.rfft(c, 1), torch.rfft(x, 1)), 1, signal_sizes=(c.shape[-1], ))
def test_circulant_multiply(n):
c = torch.rand(n, device=device)
x = torch.rand((3, n), device=device)
C = torch.tensor(circulant(c.detach().cpu().numpy()), dtype=c.dtype, device=c.device)
slow = x @ C.t()
fast = circulant_multiply(c, x)
print('Error compared to slow multiply: ', (slow - fast).abs().max().item())
# TODO: move test into subpackage
if __name__ == '__main__':
test_circulant_multiply(100)
|
structured-nets-master
|
pytorch/structure/circulant.py
|
from .hadamard import hadamard_transform
import torch
import numpy as np
from scipy.linalg import hadamard
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
# S,G,B: diagonal
# P: permutation
# x: batch_size x n_features
def fastfood_multiply(S,G,B,P,x):
HBx = hadamard_transform(B*x)
PHBx = HBx[:, P]
HGPHBx = hadamard_transform(G*PHBx)
return S*HGPHBx
def test_fastfood_multiply(n, batch_size):
S = np.random.randn(n)
G = np.random.randn(n)
B = np.random.randn(n)
P = np.random.permutation(n)
x = np.random.randn(batch_size,n)
H = hadamard(n)
HBx = np.dot(H,(B*x).T).T
PHBx = HBx[:,P]
HGPHBx = np.dot(H,(G*PHBx).T).T
output_explicit = S*HGPHBx
S = torch.tensor(S, dtype=torch.float, device=device)
G = torch.tensor(G, dtype=torch.float, device=device)
B = torch.tensor(B, dtype=torch.float, device=device)
P = torch.tensor(P, dtype=torch.long, device=device)
x = torch.tensor(x, dtype=torch.float, device=device)
output = fastfood_multiply(S,G,B,P,x)
print(np.linalg.norm(output_explicit - output))
# TODO: move test into subpackage
if __name__ == '__main__':
test_fastfood_multiply(128,50)
|
structured-nets-master
|
pytorch/structure/fastfood.py
|
'''Functions to multiply by a Toeplitz-like matrix.
'''
import numpy as np
import torch
from .complex_utils import complex_mult, conjugate
from .krylov import Krylov
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
##### Fast multiplication for the Toeplitz-like case
def toeplitz_krylov_transpose_multiply(v, u, f=0.0):
"""Multiply Krylov(Z_f, v_i)^T @ u.
Parameters:
v: (rank, n)
u: (batch_size, n)
f: real number
Returns:
product: (batch, rank, n)
"""
_, n = u.shape
_, n_ = v.shape
assert n == n_, 'u and v must have the same last dimension'
if f != 0.0: # cycle version
# Computing the roots of f
mod = abs(f) ** (torch.arange(n, dtype=u.dtype, device=u.device) / n)
if f > 0:
arg = torch.stack((torch.ones(n, dtype=u.dtype, device=u.device),
torch.zeros(n, dtype=u.dtype, device=u.device)), dim=-1)
else: # Find primitive roots of -1
angles = torch.arange(n, dtype=u.dtype, device=u.device) / n * np.pi
arg = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
eta = mod[:, np.newaxis] * arg
eta_inverse = (1.0 / mod)[:, np.newaxis] * conjugate(arg)
u_f = torch.ifft(eta_inverse * u[..., np.newaxis], 1)
v_f = torch.fft(eta * v[..., np.newaxis], 1)
uv_f = complex_mult(u_f[:, np.newaxis], v_f[np.newaxis])
uv = torch.fft(uv_f, 1)
# We only need the real part of complex_mult(eta, uv)
return eta[..., 0] * uv[..., 0] - eta[..., 1] * uv[..., 1]
else:
u_f = torch.rfft(torch.cat((u.flip(1), torch.zeros_like(u)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
uv_f = complex_mult(u_f[:, np.newaxis], v_f[np.newaxis])
return torch.irfft(uv_f, 1, signal_sizes=(2 * n, ))[..., :n].flip(2)
def toeplitz_krylov_multiply_by_autodiff(v, w, f=0.0):
"""Multiply \sum_i Krylov(Z_f, v_i) @ w_i, using Pytorch's autodiff.
This function is just to check the result of toeplitz_krylov_multiply.
Parameters:
v: (rank, n)
w: (batch_size, rank, n)
f: real number
Returns:
product: (batch, n)
"""
batch_size, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
u = torch.zeros((batch_size, n), dtype=v.dtype, device=v.device, requires_grad=True)
prod = toeplitz_krylov_transpose_multiply(v, u, f)
result, = torch.autograd.grad(prod, u, grad_outputs=w, create_graph=True)
return result
def toeplitz_krylov_multiply(v, w, f=0.0):
"""Multiply \sum_i Krylov(Z_f, v_i) @ w_i.
Parameters:
v: (rank, n)
w: (batch_size, rank, n)
f: real number
Returns:
product: (batch, n)
"""
_, rank, n = w.shape
rank_, n_ = v.shape
assert n == n_, 'w and v must have the same last dimension'
assert rank == rank_, 'w and v must have the same rank'
if f != 0.0: # cycle version
# Computing the roots of f
mod = abs(f) ** (torch.arange(n, dtype=w.dtype, device=w.device) / n)
if f > 0:
arg = torch.stack((torch.ones(n, dtype=w.dtype, device=w.device),
torch.zeros(n, dtype=w.dtype, device=w.device)), dim=-1)
else: # Find primitive roots of -1
angles = torch.arange(n, dtype=w.dtype, device=w.device) / n * np.pi
arg = torch.stack((torch.cos(angles), torch.sin(angles)), dim=-1)
eta = mod[:, np.newaxis] * arg
eta_inverse = (1.0 / mod)[:, np.newaxis] * conjugate(arg)
w_f = torch.fft(eta * w[..., np.newaxis], 1)
v_f = torch.fft(eta * v[..., np.newaxis], 1)
wv_sum_f = complex_mult(w_f, v_f).sum(dim=1)
wv_sum = torch.ifft(wv_sum_f, 1)
# We only need the real part of complex_mult(eta_inverse, wv_sum)
return eta_inverse[..., 0] * wv_sum[..., 0] - eta_inverse[..., 1] - wv_sum[..., 1]
else:
w_f = torch.rfft(torch.cat((w, torch.zeros_like(w)), dim=-1), 1)
v_f = torch.rfft(torch.cat((v, torch.zeros_like(v)), dim=-1), 1)
wv_sum_f = complex_mult(w_f, v_f).sum(dim=1)
return torch.irfft(wv_sum_f, 1, signal_sizes=(2 * n, ))[..., :n]
def toeplitz_mult(G, H, x, cycle=True):
"""Multiply \sum_i Krylov(Z_f, G_i) @ Krylov(Z_f, H_i) @ x.
Parameters:
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
cycle: whether to use f = (1, -1) or f = (0, 0)
Returns:
product: Tensor of shape (batch_size, n)
"""
# f = (1,-1) if cycle else (1,1)
f = (1, -1) if cycle else (0, 0)
transpose_out = toeplitz_krylov_transpose_multiply(H, x, f[1])
return toeplitz_krylov_multiply(G, transpose_out, f[0])
##### Slow multiplication for the Toeplitz-like case
def toeplitz_Z_f_linear_map(f=0.0):
"""The linear map for multiplying by Z_f.
This implementation is slow and not batched wrt rank, but easy to understand.
Parameters:
f: real number
Returns:
linear_map: v -> product, with v of shape (n, )
"""
return lambda v: torch.cat((f * v[[-1]], v[:-1]))
def krylov_toeplitz_fast(v, f=0.0):
"""Explicit construction of Krylov matrix [v A @ v A^2 @ v ... A^{n-1} @ v]
where A = Z_f. This uses vectorized indexing and cumprod so it's much
faster than using the Krylov function.
Parameters:
v: the starting vector of size n or (rank, n).
f: real number
Returns:
K: Krylov matrix of size (n, n) or (rank, n, n).
"""
rank, n = v.shape
a = torch.arange(n, device=v.device)
b = -a
indices = a[:, np.newaxis] + b[np.newaxis]
K = v[:, indices]
K[:, indices < 0] *= f
return K
def toeplitz_mult_slow(G, H, x, cycle=True):
"""Multiply \sum_i Krylov(Z_f, G_i) @ Krylov(Z_f, H_i) @ x.
Uses the explicit Krylov construction with slow (and easy to understand)
linear map.
Parameters:
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
cycle: whether to use f = (1, -1) or f = (0, 0)
Returns:
product: Tensor of shape (batch_size, n)
"""
assert G.shape == H.shape, 'G and H must have the same shape'
rank, n = G.shape
f = (1, -1) if cycle else (0, 0)
krylovs = [(Krylov(toeplitz_Z_f_linear_map(f[0]), G[i]), Krylov(toeplitz_Z_f_linear_map(f[1]), H[i]).t()) for i in range(rank)]
prods = [K[0] @ (K[1] @ x.t()) for K in krylovs]
return sum(prods).t()
def toeplitz_mult_slow_fast(G, H, x, cycle=True):
"""Multiply \sum_i Krylov(Z_f, G_i) @ Krylov(Z_f, H_i) @ x.
Uses the fast construction of Krylov matrix.
Parameters:
G: Tensor of shape (rank, n)
H: Tensor of shape (rank, n)
x: Tensor of shape (batch_size, n)
cycle: whether to use f = (1, -1) or f = (0, 0)
Returns:
product: Tensor of shape (batch_size, n)
"""
assert G.shape == H.shape
f_G, f_H = (1, -1) if cycle else (0, 0)
K_G, K_H = krylov_toeplitz_fast(G, f_G), krylov_toeplitz_fast(H, f_H)
return ((x @ K_H) @ K_G.transpose(1, 2)).sum(dim=0)
def test_toeplitz_mult():
v = torch.tensor([[0,1,0,-1],[0,1,2,3]], dtype=torch.float, device=device, requires_grad=True)
u = torch.tensor([[1,1,1,1],[0,1,2,3]], dtype=torch.float, device=device, requires_grad=True)
w = toeplitz_krylov_transpose_multiply(v, u, f=-1)
# output:
# [[[ 0 2 2 0]
# [ 6 0 -4 -6]]
# [[ -2 2 4 2]
# [ 14 8 0 -8]]]
toeplitz_mult(v, v, u)
toeplitz_mult_slow(v, v, u)
# output:
# array([[-16., -20., -4., 16.],
# [ 16., -8., 12., 64.]])
toeplitz_mult(v, v, u, cycle=False)
toeplitz_mult_slow(v, v, u, cycle=False)
# output:
# array([[ 0., 6., 16., 26.],
# [ 0., 12., 38., 66.]])
m = 10
n = 1<<m
batch_size = 50
rank = 16
u = torch.rand((batch_size, n), requires_grad=True, device=device)
v = torch.rand((rank, n), requires_grad=True, device=device)
result = toeplitz_mult(v, v, u, cycle=True)
grad, = torch.autograd.grad(result.sum(), v, retain_graph=True)
result_slow = toeplitz_mult_slow(v, v, u, cycle=True)
grad_slow, = torch.autograd.grad(result_slow.sum(), v, retain_graph=True)
result_slow_fast = toeplitz_mult_slow_fast(v, v, u, cycle=True)
grad_slow_fast, = torch.autograd.grad(result_slow_fast.sum(), v, retain_graph=True)
# These max and mean errors should be small
print((result - result_slow).abs().max().item())
print((result - result_slow).abs().mean().item())
print((grad - grad_slow).abs().max().item())
print((grad - grad_slow).abs().mean().item())
print((result - result_slow_fast).abs().max().item())
print((result - result_slow_fast).abs().mean().item())
print((grad - grad_slow_fast).abs().max().item())
print((grad - grad_slow_fast).abs().mean().item())
def test_memory():
"""Memory stress test to make sure there's no memory leak.
"""
for _ in range(10000):
a = torch.empty((2,4096), dtype=torch.float, device=device, requires_grad=True)
b = torch.empty((2,4096), dtype=torch.float, device=device, requires_grad=True)
c = toeplitz_mult(a, a, b)
g, = torch.autograd.grad(torch.sum(c), a, retain_graph=True)
# TODO: move test into subpackage
if __name__ == '__main__':
test_toeplitz_mult()
# test_memory()
|
structured-nets-master
|
pytorch/structure/toeplitz.py
|
import torch.cuda
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'hadamard_cuda', [
'hadamard_cuda.cpp',
'hadamard_cuda_kernel.cu'
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
ext_modules.append(extension)
setup(
name='hadamard_cuda',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension})
|
structured-nets-master
|
pytorch/structure/hadamard_cuda/setup.py
|
import numpy as np
import itertools
import pyfftw
import sys
sys.path.insert(0,'../../../pytorch/')
from structure.scratch.krylovslow import krylov_construct
# define fft calls
def _plan_ffts(in_shape, lib='numpy'):
out_shape = in_shape[:-1] + (in_shape[-1]//2 + 1,)
if lib == 'numpy':
x_for = np.zeros(shape=in_shape)
fft = lambda: np.fft.rfft(x_for)
y_bak = np.empty(shape=out_shape, dtype='complex128')
ifft = lambda: np.fft.irfft(y_bak)
return ((x_for, fft), (y_bak, ifft))
if lib == 'scipy':
pass
if lib == 'fftw':
out_shape = in_shape[:-1] + (in_shape[-1]//2 + 1,)
x_for = pyfftw.empty_aligned(in_shape, dtype='float64')
y_for = pyfftw.empty_aligned(out_shape, dtype='complex128')
fft_for = pyfftw.FFTW(x_for, y_for, direction='FFTW_FORWARD', flags=['FFTW_MEASURE']) # don't destroy input so 0s are preserved
x_for[:] = 0
x_bak = pyfftw.empty_aligned(in_shape, dtype='float64')
y_bak = pyfftw.empty_aligned(out_shape, dtype='complex128')
fft_bak = pyfftw.FFTW(y_bak, x_bak, direction='FFTW_BACKWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'])
return ((x_for, fft_for), (y_bak, fft_bak))
def plan_ffts(m, lib='numpy'):
fft_plans = [None] * m
for d in range(m-1,-1,-1):
n1, n2 = 1<<d, 1<<(m-d)
in_shape = (4,n1,n2)
fft_plans[d] = _plan_ffts(in_shape, lib)
return fft_plans
# TODO: put this as subfunction of main function
# this is specialized to subdiagonal for now
# @profile
def _resolvent_bilinear_flattened(fft_plans, subd, m, d, S):
# pass at depth d computes 4 arrays:
# each array is length n, indexed by x_{m-1}, ..., x_{m-d}, y_{m-d-1}, ..., y_0
# for convenience, store as x_{m-d}, ..., x_{m-1}, y_{m-d-1}, ..., y_0 (index is bit-reversed)
# assert d < m # assume leaf pass done in main function
S_00, S_01, S_10, S_11 = S # answers to previous layer: indexed by x_{m-d-1}, x_{m-d}, ..., x_{m-1}, y_{m-d-2}, ..., y_0
# these are the same as the S0[0,0],S1[0,0] in the recursive version
# assert S_00.shape == (1<<(d+1), 1<<(m-d-1))
n1, n2 = 1<<d, 1<<(m-d-1) # input shape 2n1 x n2, output shape n1 x 2n2
((S_, fft), (T_, ifft)) = fft_plans[d]
S0_10_mult_subdiag, S0_11, S1_01, S1_11 = S_ ## pass
S0_10_mult_subdiag[:,:n2] = S_10[:n1,:]
S1_01[:,:n2] = S_01[n1:,:]
S0_11[:,:n2] = S_11[:n1,:]
S1_11[:,:n2] = S_11[n1:,:] ## dS_11[...] = dS1_11[...]
# polynomial multiplications
S0_10_f, S0_11_f, S1_01_f, S1_11_f = fft() ## dS_ = fft(dS*_**_f)
# subproblem for branch x_{m-d}, ..., x_{m-1} is A[\overline{x_{m-1}...x_{m-d}} + 2^{m-d-1}]
T_[0] = S1_01_f * S0_10_f
T_[1] = S1_01_f * S0_11_f
T_[2] = S1_11_f * S0_10_f
T_[3] = S1_11_f * S0_11_f ## dS1_01_f += dT_[0] * S0_10_f; dS0_10_f += dT_[0] * S1_01_f
## note that the S*_**_f are the only things that need to be stored in t he forward pass
## also note that there is an optimization here; should only need half
T__ = ifft() ## dT_ = ifft(dT__) (because DFT matrix symmetric)
T__ *= subd[n1:n1*2, np.newaxis] ## dT__ *= subd[...]
## for learning A, should get somethiign like dsubd[...] = T__
T_00, T_01, T_10, T_11 = T__
# polynomial additions
T_00[:,n2:] += S_00[:n1,:] ## dS_00[:n1,:] = T_00[:,n2:]
T_00[:,n2:] += S_00[n1:,:]
T_01[:,n2:] += S_01[:n1,:]
T_10[:,n2:] += S_10[n1:,:]
## autodiff correspondences annotated in with '##'
## this function takes in S and outputs T;
## the backwards pass calls these lines in reverse,
## taking dT and outputting dS where ## dx := \partial{L}/\partial{x},
## (L is the final output of the entire algorithm)
return (T_00, T_01, T_10, T_11)
def bitreversal_slow(x, n, m):
""" Compute the bit reversal permutation """
assert n == 1<<m # power of 2 for now
x_ = x.reshape([2]*m)
x_bf_ = np.empty(shape=[2]*m)
for i in itertools.product(*([[0,1]]*m)):
x_bf_[i[::-1]] = x_[i]
x_bf = x_bf_.reshape(n)
return x_bf
# note that this can be sped up by pre-allocating memory:
# %timeit np.hstack((x, np.zeros((32,32)))) : 9.16 µs ± 221 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
# %timeit y = np.zeros((32,64)); y[:,:32] = x : 3.63 µs ± 573 ns per loop (mean ± std. dev. of 7 runs, 100000 loops each)
def bitreversal_stack(x, n, m):
""" faster version in numpy """
assert n == 1<<m
n1, n2 = n, 1
x_ = x.reshape((n1,n2))
for i in range(m):
n1 //= 2
n2 *= 2
x_ = np.hstack((x_[:n1,:], x_[n1:,:]))
return x_.squeeze()
# another bit reversal algorithm that's asymptotically faster:
# http://www.lite.etsii.urjc.es/manuel.rubiosanchez/papers/Bit-Reversal-Article.pdf
# we don't need to implement any fast ones because we only need to calculae the permutation once and then index into it
# call with:
# resolvent_bilinear_flattened = create(n, m, 'numpy')
def create(n, m, lib='numpy'):
fft_plans = plan_ffts(m, lib)
bf_perm = bitreversal_stack(np.arange(n), n, m)
# Shorter versions but much slower. Maybe we don't care about speed because
# this will done only once.
# bf_perm_1 = np.array([int(np.binary_repr(i, width=m)[::-1], 2) for i in range(n)])
# bf_perm_2 = np.array([int(f'{x:0{m}b}'[::-1], 2) for i in range(n)])
# bf_perm_3 = np.array([int(bin(i + n)[:2:-1], 2) for i in range(n)])
bitreversal = lambda x, n, m: x[bf_perm]
# @profile
def resolvent_bilinear_flattened(A, v, u, n, m):
assert n == 1<<m # power of 2 for now
# assume A is subdiagonal for now
subd = np.empty((n,))
subd[1:] = np.diagonal(A, -1)
subd = bitreversal(subd, n, m)
# reshape u,v to be indexed consistently with the above
# i.e. bit flip their indices
u_bf = bitreversal(u, n, m).reshape((n,1)) # tri says use [:,np.newaxis]
v_bf = bitreversal(v, n, m).reshape((n,1))
S = (u_bf*v_bf, u_bf, v_bf, np.ones((n,1)))
for d in range(m)[::-1]:
S = _resolvent_bilinear_flattened(fft_plans, subd, m, d, S)
# return np.flip(S[0], axis=-1)
return S[0].squeeze()[::-1]
return resolvent_bilinear_flattened
class KrylovTransposeMultiply():
"""Multiply Krylov(A, v)^T @ u when A is zero except on the subdiagonal.
"""
def __init__(self, n, batch_size=1, rank=1):
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
self.n = n
self.m = m
self.batch_size = batch_size
self.rank = rank
self.plan_ffts_forward_pass()
def plan_ffts_forward_pass(self):
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
self.S_storage = [np.empty((batch_size + rank, n))] * m
self.S_f_storage = [np.empty((batch_size + rank, 1 << d, (1 << (m - d - 1)) + 1), dtype='complex128') for d in range(m)]
self.T_f_storage = [np.empty((batch_size, rank, (1 << (m - d - 1)) + 1), dtype='complex128') for d in range(m)]
self.T_storage = [np.empty((batch_size, rank, 1 << (m - d))) for d in range(m)]
self.ffts_forward_pass = []
for d, (S, S_f, T_f, T) in enumerate(zip(self.S_storage, self.S_f_storage, self.T_f_storage, self.T_storage)):
S = S.reshape((batch_size + rank, 1 << d, 1 << (m - d)))
fft_time2freq = pyfftw.FFTW(S, S_f, direction='FFTW_FORWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'], threads=1)
fft_freq2time = pyfftw.FFTW(T_f, T, direction='FFTW_BACKWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'], threads=1)
self.ffts_forward_pass.append((fft_time2freq, fft_freq2time))
def __call__(self, subdiag, v, u):
"""Multiply Krylov(A, v)^T @ u when A is zero except on the subdiagonal.
We don't use bit reversal here.
"""
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
u, v = u.reshape(batch_size, n), v.reshape(rank, n)
result = np.zeros((batch_size, rank, n), dtype=u.dtype)
# T_00_sum = u @ v.T
T_00_sum = (u[:, np.newaxis] * v).sum(axis=-1)
result[:, :, 0] += T_00_sum
T_01 = u.reshape(batch_size, n, 1).copy() # Copy since we'll be changing this array directly
T_10 = v.reshape(rank, n, 1)
T_11 = np.ones(n)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S = self.S_storage[d].reshape((batch_size + rank, n1, 2 * n2))
S_f = self.S_f_storage[d]
T_f = self.T_f_storage[d]
T = self.T_storage[d]
fft_time2freq, fft_freq2time = self.ffts_forward_pass[d]
S_00_sum, S_01, S_10, S_11 = T_00_sum, T_01, T_10, T_11
S[:, :, n2:] = 0.0
S0_10_mult_subdiag, S1_01 = S[:rank, :, :n2], S[rank:rank + batch_size, :, :n2]
S0_10_mult_subdiag[:] = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
S1_01[:] = S_01[:, 1::2]
# polynomial multiplications
S_f = fft_time2freq(S, output_array=S_f)
S0_10_f, S1_01_f = S_f[:rank], S_f[rank:rank + batch_size]
T_00_f_sum = T_f
# T_00_f_sum[:] = (S1_01_f[:, np.newaxis] * S0_10_f[np.newaxis]).sum(axis=-2)
np.einsum("bnm,rnm->brm", S1_01_f, S0_10_f, out=T_00_f_sum)
T = fft_freq2time(T_f, output_array=T)
T_00_sum = T
# polynomial additions
result[:, :, 1:2*n2] += T_00_sum[..., :-1]
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
# T_01 = np.concatenate((S_01[:, ::2], S_01[:, 1::2] * S0_11_mult_subdiag[:, np.newaxis]), axis=-1)
T_01 = S_01.reshape(batch_size, n1, 2 * n2)
T_01[:, :, n2:] *= S0_11_mult_subdiag[:, np.newaxis]
T_10 = np.concatenate((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), axis=-1)
T_11 = S0_11_mult_subdiag * S_11[1::2]
return result
class KrylovMultiply():
"""Multiply Krylov(A, v) @ w when A is zero except on the subdiagonal.
"""
def __init__(self, n, batch_size=1, rank=1):
m = int(np.log2(n))
assert n == 1 << m, 'n must be a power of 2'
self.n = n
self.m = m
self.batch_size = batch_size
self.rank = rank
self.plan_ffts_forward_pass_u_zero()
self.plan_ffts_backward_pass()
def plan_ffts_forward_pass_u_zero(self):
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
self.S_storage = [np.empty((rank, n))] * m
self.S_f_storage = [np.empty((rank, 1 << d, (1 << (m - d - 1)) + 1), dtype='complex128') for d in range(m)]
self.ffts_forward_pass = []
for d, (S, S_f) in enumerate(zip(self.S_storage, self.S_f_storage)):
S = S.reshape((rank, 1 << d, 1 << (m - d)))
fft_time2freq = pyfftw.FFTW(S, S_f, direction='FFTW_FORWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'], threads=1)
self.ffts_forward_pass.append(fft_time2freq)
def plan_ffts_backward_pass(self):
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
self.dT_storage = [np.empty((batch_size, rank, 1 << (m - d))) for d in range(m)]
self.dT_f_storage = [np.empty((batch_size, rank, (1 << (m - d - 1)) + 1), dtype='complex128') for d in range(m)]
self.dS_f_storage = [np.empty((batch_size, 1 << d, (1 << (m - d - 1)) + 1), dtype='complex128') for d in range(m)]
self.dS_storage = [np.empty((batch_size, n))] * m
self.ffts_backward_pass = []
for d, (dT, dT_f, dS_f, dS) in enumerate(zip(self.dT_storage, self.dT_f_storage, self.dS_f_storage, self.dS_storage)):
dS = dS.reshape((batch_size, 1 << d, 1 << (m - d)))
fft_time2freq = pyfftw.FFTW(dT, dT_f, direction='FFTW_FORWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'], threads=1)
fft_freq2time = pyfftw.FFTW(dS_f, dS, direction='FFTW_BACKWARD', flags=['FFTW_MEASURE', 'FFTW_DESTROY_INPUT'], threads=1)
self.ffts_backward_pass.append((fft_time2freq, fft_freq2time))
def __call__(self, subdiag, v, w):
n, m, batch_size, rank = self.n, self.m, self.batch_size, self.rank
# Forward pass. Since K @ w can be computed by autodiffing K^T @ u, we
# carry out the forward pass K^T @ u for u = 0 here to save the
# intermediate values. This code is exactly the same as the function
# @krylov_transpose_multiply, specialized to the case where u = 0.
save_for_backward = [None] * m
T_10 = v.reshape(rank, n, 1)
T_11 = np.ones(n)
for d in range(m)[::-1]:
n1, n2 = 1 << d, 1 << (m - d - 1)
S = self.S_storage[d].reshape((rank, n1, 2 * n2))
S_f = self.S_f_storage[d]
fft_time2freq = self.ffts_forward_pass[d]
S_10, S_11 = T_10, T_11
S0_10_mult_subdiag = S[:, :, :n2]
S0_10_mult_subdiag[:] = S_10[:, ::2] * subdiag[(n2 - 1)::(2 * n2), np.newaxis]
S[:, :, n2:] = 0.0
S0_10_mult_subdiag_f = fft_time2freq(S, output_array=S_f)
T_10 = np.concatenate((S_10[:, 1::2], S0_10_mult_subdiag * S_11[1::2][:, np.newaxis]), axis=-1)
S0_11_mult_subdiag = S_11[::2] * subdiag[(n2 - 1)::(2 * n2)]
save_for_backward[d] = S0_10_mult_subdiag_f, S0_11_mult_subdiag
T_11 = S0_11_mult_subdiag * S_11[1::2]
# Backward pass
w, v = w.reshape(batch_size, rank, n), v.reshape((rank, n))
dT_01 = np.zeros((batch_size, 1, n), dtype=w.dtype)
for d in range(m):
n1, n2 = 1 << d, 1 << (m - d - 1)
dT = self.dT_storage[d]
dT_f = self.dT_f_storage[d]
dS_f = self.dS_f_storage[d]
dS = self.dS_storage[d].reshape((batch_size, n1, 2 * n2))
fft_time2freq, fft_freq2time = self.ffts_backward_pass[d]
S0_10_mult_subdiag_f, S0_11_mult_subdiag = save_for_backward[d]
dS_01 = np.empty((batch_size, 2 * n1, n2), dtype=w.dtype)
dS_01[:, ::2] = dT_01[:, :, :n2]
dT_00_sum = dT
dT_00_sum[:, :, :2*n2 - 1] = w[:, :, 1:2*n2]
dT_00_sum[:, :, -1] = 0.0
dT_00_sum_f = fft_time2freq(dT, output_array=dT_f)
dS1_01_f = dS_f
# dS1_01_f[:] = (np.conjugate(S0_10_mult_subdiag_f, out=S0_10_mult_subdiag_f) * dT_00_sum_f[:, :, np.newaxis]).sum(axis=1)
np.einsum("brm,rnm->bnm", dT_00_sum_f, np.conjugate(S0_10_mult_subdiag_f, out=S0_10_mult_subdiag_f), out=dS1_01_f)
dS1_01 = fft_freq2time(dS_f, output_array=dS)
dS_01[:, 1::2] = dT_01[:, :, n2:] * S0_11_mult_subdiag[:, np.newaxis] + dS1_01[:, :, :n2]
dT_01 = dS_01
# du = ((dT_00_sum[:, :, np.newaxis] * v[np.newaxis, :, :, np.newaxis]).sum(dim=1) + dT_01).squeeze(axis=-1)
du = w[:, :, 0] @ v + dT_01.squeeze(axis=-1)
return du
def test_krylov_transpose_multiply():
m = 14
n = 1<<m
batch_size = 3
rank = 2
subdiag = np.random.random(n-1)
A = np.diag(subdiag, -1)
u = np.random.random((batch_size, n))
v = np.random.random((rank, n))
# k1 = krylov_mult_slow(A,v,u,n)
# k1_allocated = krylov_mult_slow_allocated(A,v,u,n)
# k11 = krylov_mult_slow_faster(A,v,u,n)
# k2 = krylov_mult(A,v,u,n)
resolvent_bilinear_flattened = create(n, m, lib='fftw')
krylov_transpose_multiply = KrylovTransposeMultiply(n, batch_size, rank)
k3 = resolvent_bilinear_flattened(A, v[0], u[0], n, m)
k3_nobf = krylov_transpose_multiply(subdiag, v, u)
assert np.allclose(k3, k3_nobf[0, 0])
def test_krylov_multiply():
m = 14
n = 1<<m
batch_size = 3
rank = 2
subdiag = np.random.random(n-1)
A = np.diag(subdiag, -1)
w = np.random.random((batch_size, rank, n))
v = np.random.random((rank, n))
krylov_multiply = KrylovMultiply(n, batch_size, rank)
result1 = krylov_multiply(subdiag, v, w)
Ks = [krylov_construct(A, v[i], n) for i in range(rank)]
result2 = np.stack([w[:, i] @ Ks[i] for i in range(rank)]).swapaxes(0, 1).sum(axis=1)
assert np.allclose(result1, result2)
def main():
test_krylov_transpose_multiply()
test_krylov_multiply()
if __name__ == '__main__':
main()
|
structured-nets-master
|
pytorch/structure/scratch/krylovfast.py
|
import os
os.environ['MKL_NUM_THREADS'] = '1'
os.environ['OMP_NUM_THREADS'] = '1'
os.environ['NUMEXPR_NUM_THREADS'] = '1'
os.environ['VECLIB_MAXIMUM_THREADS'] = '1'
import numpy as np
from krylovfast import *
from krylovslow import *
np.random.seed(0)
# n, m = 2, 1
# A = np.array([[0,0],[1,0]])
# u = np.array([1,1])
# v = np.array([1,1])
# print(resolvent_bilinear(A,v,u,n))
# ans: [2 1], [1, 1], [1 1], [0 1]
# n, m = 4, 2
# A = np.diag(np.arange(1,4),-1)
# u = np.ones(4)
# v = np.ones(4)
# print(resolvent_bilinear(A,v,u,4))
# print(krylov_mult(A,v,u,4))
# print(krylov_mult_slow(A,v,u,4))
# print(krylov_mult_slow_faster(A,v,u,4))
# print(resolvent_bilinear_flattened(A,v,u,4,2))
# ans: [4 6 8 6], [1 1 2 6], [1 3 6 6], [0 0 0 6]
m = 14
n = 1<<m
subdiag = np.random.random(n-1)
A = np.diag(subdiag, -1)
u = np.random.random(n)
v = np.random.random(n)
# k1 = krylov_mult_slow(A,v,u,n)
# k1_allocated = krylov_mult_slow_allocated(A,v,u,n)
# k11 = krylov_mult_slow_faster(A,v,u,n)
# k2 = krylov_mult(A,v,u,n)
resolvent_bilinear_flattened = create(n, m, lib='fftw')
krylov_transpose_multiply = KrylovTransposeMultiply(n)
k3 = resolvent_bilinear_flattened(A, v, u, n, m)
k3_nobf = krylov_transpose_multiply(subdiag, v, u)
np.allclose(k3, k3_nobf)
[resolvent_bilinear_flattened(A, v, u, n, m) for i in range(100)]
# print(np.linalg.norm(k1-k11))
# print(np.linalg.norm(k1-k2))
# print(np.linalg.norm(k1-k3))
# print(np.linalg.norm(k1-k3b))
|
structured-nets-master
|
pytorch/structure/scratch/tests_snippets.py
|
import numpy as np
import itertools
p=2
d=3
N=p << (d-1)
f = np.arange(N)
print(np.fft.fft(f))
def init(f):
x = np.zeros(d*[p], dtype=np.complex_)
idx = [list(range(p)) for i in range(d)]
powers = np.array([p**i for i in range(d)])
for t in itertools.product(*idx):
x[t] = f[np.sum(powers*np.array(t))]
return x
x = init(f)
print(x.shape)
def unshape(x):
f = np.zeros(p**d, dtype=np.complex_)
idx = [list(range(p)) for i in range(d)]
powers = np.array([p**i for i in range(d)])
for t in itertools.product(*idx):
f[np.sum(powers*np.array(t))] = x[t]
return f
# x = f.reshape([[p]*d]).astype(np.complex_)
# At pass r the layout is
#
# x_0,..., x_{d-r-1}, y_{r-1}, ..., y_{0}
# So at
# r = 0 => x_0,..., x_{d-1}
# r = 1 => x_0,..., x_{d-2}, y_0
# r = 2 => x_0,..., x_{d-3}, y_0, y_1
# r = d => y_0,..., y_{d-1}
#
def pass_it_(x,x_new,r, verbose=False):
# The index ranges
# (x_0,...,x_{d-r-2},x_{d-r-1}, y_{0}, .., y_{r-1}, y_r)
idx = [list(range(p)) for i in range(d+1)]
omega = -2*np.complex(0,1)*np.pi/(p**d)
powers = np.array([p**i for i in range(r+1)])
# powers = np.array([p**i for i in range(r,-1,-1)])
for t in itertools.product(*idx):
# The last index are the ys
x_base = t[0:d-r-1]
x_last = t[d-r-1] # this is xm
y_base = t[d-r:d]
y_last = t[d]
# marginalize out over xm, but keep the ys in the same order?
new_t = x_base + y_base + (y_last,)
old_t = x_base + (x_last,) + y_base
y_sum = np.sum(np.array(t[d-r:d+1]) * powers) * p**(d-r-1)
if verbose:
print(f"x={x_base},{x_last} -> y={y_base},{y_last} :: new={new_t} += old={old_t} y_sum={y_sum} {y_sum*x_last}")
q = omega*x_last*y_sum
x_new[new_t] += x[old_t]*np.exp(q)
if verbose: print("**")
return x_new
def pass_it(x,r,verbose=False):
x_new = np.zeros(d*[p], dtype=np.complex_)
return pass_it_(x,x_new,r,verbose=verbose)
def fft_pass(x):
_x = np.copy(x)
x_new = np.zeros(d*[p], dtype=np.complex_)
for r in range(d):
pass_it_(_x,x_new,r)
(_x,x_new) = (x_new,_x)
x_new[:] = 0
return _x
def slow_fft(x):
y = np.zeros(x.shape, dtype=np.complex_)
idx = [list(range(p)) for i in range(d)]
omega = -2*np.complex(0,1)*np.pi/(p**d)
powers = np.array([p**i for i in range(d)])
# powers = np.array([p**i for i in range(d-1,-1,-1)])
for t in itertools.product(*idx):
y_t = np.sum(powers*np.array(t))
for u in itertools.product(*idx):
x_t = np.sum(powers*np.array(u))
y[t] += x[u]*np.exp(omega*y_t*x_t)
return y
|
structured-nets-master
|
pytorch/structure/scratch/fft.py
|
import numpy as np
import scipy.fftpack as fft
from scipy import signal
# should create a poly class later
p1 = np.full(5, 2)
p2 = np.full(10, 3)
def poly_add(p1, p2, n):
"""p1,p2 of degree exactly n-1"""
# TODO: change these to equals
assert p1.shape == (n,)
assert p2.shape == (n,)
# n = np.maximum(p1.shape[0], p2.shape[0])
# q1 = np.pad(p1, (0,n-p1.shape[0]), 'constant')
# q2 = np.pad(p2, (0,n-p2.shape[0]), 'constant')
# print(q1)
# print(q2)
return p1+p2
def poly_mult_slow(p1, p2):
d1 = p1.shape[0] - 1
d2 = p2.shape[0] - 1
n = d1 + d2
prod = np.zeros(n+1)
for i in range(d1+1):
for j in range(d2+1):
prod[i+j] += p1[i]*p2[j]
return prod
def poly_mult_fft(p1, p2):
d1 = p1.shape[0] - 1
d2 = p2.shape[0] - 1
# if d1 < 0:
# p1 = np.array([0])
# d1 = 0
# if d2 < 0:
# p2 = np.array([0])
# d2 = 0
# n = d1 + d2
# numpy fft
# f1 = np.fft.rfft(p1, n+1)
# f2 = np.fft.rfft(p2, n+1)
# prod = np.fft.irfft(f1*f2, n+1)
# scipy fft (currently has bug because it stores output of rfft differently)
f1 = fft.rfft(p1, n+1)
f2 = fft.rfft(p2, n+1)
prod = fft.irfft(f1*f2, n+1)
# prod = signal.convolve(p1, p2, method='fft')
return prod
# define an alias for easy testing
def poly_mult(p1, p2):
# return poly_mult_slow(p1, p2)
d1 = p1.shape[0] - 1
d2 = p2.shape[0] - 1
n = d1 + d2
# q1 = np.pad(p1, (0,d2), 'constant')
# q2 = np.pad(p2, (0,d1), 'constant')
# assert q1.shape[0] == n+1
# assert q2.shape[0] == n+1
if n >= 128:
prod = signal.fftconvolve(p1, p2, mode='full')
else:
prod = np.convolve(p1, p2)
# prod = np.convolve(p1, p2)
# if prod.shape[0] != n+1:
# print(d1, d2, p1.shape, p2.shape, prod.shape)
# assert false
# assert prod.shape[0] == n+1
return prod
def poly_inv(p, n):
"""
invert p mod x^n
"""
assert n >= 1
if n == 1:
return np.array([1 / p[0]])
# represent p = p_low + x^k p_high, and its inverse q similarly
d = p.shape[0]
k = (n+1)//2
# invert the lower order terms
q_low = poly_inv(p[:min(d,k)], k)
# print(q_low)
# since 2k >= n, p q_l + x^k p_l q_h = 1 (mod x^n)
# so p_l q_h = (1 - p q_l)/x^k (mod x^{n-k})
r = poly_mult(p, q_low)
r[0] -= 1
# assert np.all(r[:min(r.shape[0],k)] == 0)
# but we know p_l^{-1} mod x^{n-k} since we already know it mod x^k
q_high = poly_mult(-r[k:min(r.shape[0],n)], q_low)
# q_low = np.pad(q_low, (0,k-q_low.shape[0]), 'constant')
q = np.concatenate((q_low, q_high))[:n]
# q = np.trim_zeros(q, 'b')
return q
def resolvent_bilinear(A, v, u, n):
"""
Compute [u e_n]^T * (I-Ax)^{-1} * [v e_1]
(2x2 matrix of rational fractions)
output: array of shape (2, 2, n), array shape (n)
(numerator, denominator)
invariants:
numerator has degree n-1
denominator degree n
"""
if n == 1:
# don't know how write outer product in numpy
return (np.array([[[ u[0]*v[0] ], [ u[0]*1 ]], [[ 1*v[0] ], [ 1*1 ]]]), np.array([1,-A[0,0]]))
k = n//2
# Let M00 = M[0:k, 0:k], M10 = M[k:n, 0:k], M11 = M[k:n,k:n]
# i.e. M = [M00 0 ; M10 M11] (where M = I-Ax)
# then M^{-1} = [M00^{-1} 0 ; -M11^{-1} M_10^{-1} M_00^{-1}]
S0, d0 = resolvent_bilinear(A[:k,:k], v[:k], u[:k], k)
S1, d1 = resolvent_bilinear(A[k:,k:], v[k:], u[k:], n-k)
# the part corresponding to bottom left corner is
# -A[k, k-1]x * u_1^T M_11^{-1} e_1 * e_k^T M_00^{-1} v_0
# or S1[:,1] * S0[1,:]
L = np.array([[poly_mult(S1[0,1], S0[1,0]), poly_mult(S1[0,1], S0[1,1])], [poly_mult( S1[1,1], S0[1,0] ), poly_mult( S1[1,1], S0[1,1] )]])
# print(L)
L = A[k,k-1] * np.pad(L, ((0,0),(0,0),(1,0)), 'constant') # multiply by X
# TODO: above padding should be able to be optimized away; when we allocate memory properly can store the coefficients directly in the right place
# print(L)
# clear denominators
# S0 = np.array([[ poly_mult(s, d1) for s in r ] for r in S0])
# S1 = np.array([[ poly_mult(s, d0) for s in r ] for r in S1])
# print(S0)
# really need to define poly matrix operations
# S = np.array([[poly_add(S0[i,j],S1[i,j]) for j in range(2)] for i in range(2)])
# S = np.array([[poly_add(S[i,j],L[i,j]) for j in range(2)] for i in range(2)])
# L[0,0] = poly_add(L[0,0], poly_mult(S0[0,0], d1), n)
# L[0,1] = poly_add(L[0,1], poly_mult(S0[0,1], d1), n)
# L[0,0] = poly_add(L[0,0], poly_mult(S1[0,0], d0), n)
# L[1,0] = poly_add(L[1,0], poly_mult(S1[1,0], d0), n)
L[0,0] += poly_mult(S0[0,0], d1) + poly_mult(S1[0,0], d0)
L[0,1] += poly_mult(S0[0,1], d1)
L[1,0] += poly_mult(S1[1,0], d0)
return (L, poly_mult(d0,d1))
def krylov_mult(A, v, u, m):
"""
Compute the matrix-vector product Kry(A, v)^T * u
A: R^{n \times n}, lower triangular and 2-banded
u: R^n
v: R^n
m: output dimension (i.e. width of K)
"""
n = v.shape[0]
assert A.shape == (n,n)
R, d = resolvent_bilinear(A,v,u,n)
ans = poly_mult(R[0,0], poly_inv(d, m))
return ans[:m]
def Amult(d, subd, v):
ans = d*v
ans[1:] += subd*v[:-1]
return ans
def krylov_mult_slow(A, v, u, m):
n = v.shape[0]
assert A.shape == (n,n)
cols = [v]
d = np.diagonal(A, 0)
subd = np.diagonal(A, -1)
for i in range(1,m):
cols.append(Amult(d, subd, cols[-1]))
K = np.stack(cols, axis=1)
return K.T @ u
def krylov_mult_slow_allocated(A, v, u, m):
n = v.shape[0]
assert A.shape == (n,n)
d = np.diagonal(A, 0)
subd = np.diagonal(A, -1)
# Allocate memory at once to K
K_T = np.empty((m, n))
K_T[0] = v
for i in range(1,m):
K_T[i] = Amult(d, subd, K_T[i-1])
return K_T @ u
def krylov_construct(A, v, m):
n = v.shape[0]
assert A.shape == (n,n)
d = np.diagonal(A, 0)
subd = np.diagonal(A, -1)
K = np.zeros(shape=(m,n))
K[0,:] = v
for i in range(1,m):
K[i,1:] = subd*K[i-1,:-1]
return K
def krylov_mult_slow_faster(A, v, u, m):
K = krylov_construct(A, v, m)
return K @ u
|
structured-nets-master
|
pytorch/structure/scratch/krylovslow.py
|
import torch.cuda
from setuptools import setup
from torch.utils.cpp_extension import CppExtension, CUDAExtension, BuildExtension
from torch.utils.cpp_extension import CUDA_HOME
ext_modules = []
if torch.cuda.is_available() and CUDA_HOME is not None:
extension = CUDAExtension(
'diag_mult_cuda', [
'diag_mult_cuda.cpp',
'diag_mult_cuda_kernel.cu'
],
extra_compile_args={'cxx': ['-g'],
'nvcc': ['-O2']})
ext_modules.append(extension)
setup(
name='diag_mult_cuda',
ext_modules=ext_modules,
cmdclass={'build_ext': BuildExtension})
|
structured-nets-master
|
pytorch/structure/diag_mult_cuda/setup.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from setuptools import setup
with open("README.md") as f:
readme = f.read()
setup(
name="BLINK",
version="0.1.0",
description="BLINK: Better entity LINKing",
url="", # TODO
classifiers=[
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=["setuptools>=18.0",],
install_requires=[
"torch>=1.2.0",
"pysolr>=3.8.1",
"emoji>=0.5.3",
"regex>=2019.8.19",
"matplotlib>=3.1.0",
"tqdm>=4.32.1",
"nltk>=3.4.4",
"numpy>=1.17.2",
"segtok>=1.5.7",
"flair>=0.4.3",
"pytorch-transformers>=1.2.0",
"colorama>=0.4.3",
"termcolor>=1.1.0",
"faiss-cpu>=1.6.1",
],
)
|
BLINK-main
|
setup.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import os
import pysolr
import sys
import blink.candidate_retrieval.utils as utils
def get_model(params):
return BM45_Candidate_Generator(params)
class Candidate_Generator:
def __init__(self, parameters=None):
pass
def get_candidates(self, mention_data):
"""Given the mentions from the named entity recognition model, generates candidates for each mention and adds them as an additional field to the mention dictionary"""
pass
class BM45_Candidate_Generator(Candidate_Generator):
ESCAPE_CHARS_RE = re.compile(r'(?<!\\)(?P<char>[&|+\-!(){}[\]\/^"~*?:])')
def __init__(self, params):
self.solr_address = params["solr_address"]
self.raw_solr_fields = params["raw_solr_fields"]
self.solr = pysolr.Solr(self.solr_address, always_commit=True, timeout=100)
self.rows = params["rows"]
self.query = params["query"]
self.keys = [k.strip() for k in params["keys"].split(",")]
self.c = 0
self.query_arguments = {
"fl": "* score",
"rows": self.rows,
"defType": "edismax",
}
if params["boosting"] is not None:
self.query_arguments["bf"] = params["boosting"]
def _filter_result(self, cand, detailed=True):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
if detailed:
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
return res
def get_candidates(self, mention_data):
solr = self.solr
# Build query
keys = self.keys
query = self.query
if not self.raw_solr_fields:
query = query.format(
*[
BM45_Candidate_Generator.solr_escape(mention_data[key])
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
else:
query = query.format(
*[
mention_data[key]
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
try:
results = solr.search(query, **self.query_arguments)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("\nException:", exc_type, "- line", exc_tb.tb_lineno)
print(repr(e))
c = self.c
if c < 10:
print(
"Exception with: \naddress: {} \nquery: {} \nmention_data: {} \n".format(
self.solr_address, query, str(mention_data)
)
)
self.c = c + 1
return []
# Filter the data in the retrieved objects, while ignoring the ones without a wikidata_id (only a very small fraction in the dataset; they are noise)
filtered_results = [
self._filter_result(cand) for cand in results.docs if "wikidata_id" in cand
]
return filtered_results
@staticmethod
def process_mentions_for_candidate_generator(sentences, mentions):
for m in mentions:
m["context"] = sentences[m["sent_idx"]]
return mentions
@staticmethod
def solr_escape(string):
if (string == "OR") or (string == "AND"):
return string.lower()
interior = r"\s+(OR|AND)\s+"
start = r"^(OR|AND) "
end = r" (OR|AND)$"
string = re.sub(interior, lambda x: x.group(0).lower(), string)
string = re.sub(start, lambda x: x.group(0).lower(), string)
string = re.sub(end, lambda x: x.group(0).lower(), string)
return BM45_Candidate_Generator.ESCAPE_CHARS_RE.sub(r"\\\g<char>", string)
|
BLINK-main
|
blink/candidate_generation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import prettytable
import blink.main_dense as main_dense
import blink.candidate_ranking.utils as utils
DATASETS = [
{
"name": "AIDA-YAGO2 testa",
"filename": "data/BLINK_benchmark/AIDA-YAGO2_testa.jsonl",
},
{
"name": "AIDA-YAGO2 testb",
"filename": "data/BLINK_benchmark/AIDA-YAGO2_testb.jsonl",
},
{"name": "ACE 2004", "filename": "data/BLINK_benchmark/ace2004_questions.jsonl"},
{"name": "aquaint", "filename": "data/BLINK_benchmark/aquaint_questions.jsonl"},
{
"name": "clueweb - WNED-CWEB (CWEB)",
"filename": "data/BLINK_benchmark/clueweb_questions.jsonl",
},
{"name": "msnbc", "filename": "data/BLINK_benchmark/msnbc_questions.jsonl"},
{
"name": "wikipedia - WNED-WIKI (WIKI)",
"filename": "data/BLINK_benchmark/wnedwiki_questions.jsonl",
},
]
PARAMETERS = {
"faiss_index": None,
"index_path": None,
"test_entities": None,
"test_mentions": None,
"interactive": False,
"biencoder_model": "models/biencoder_wiki_large.bin",
"biencoder_config": "models/biencoder_wiki_large.json",
"entity_catalogue": "models/entity.jsonl",
"entity_encoding": "models/all_entities_large.t7",
"crossencoder_model": "models/crossencoder_wiki_large.bin",
"crossencoder_config": "models/crossencoder_wiki_large.json",
"output_path": "output",
"fast": False,
"top_k": 100,
}
args = argparse.Namespace(**PARAMETERS)
logger = utils.get_logger(args.output_path)
models = main_dense.load_models(args, logger)
table = prettytable.PrettyTable(
[
"DATASET",
"biencoder accuracy",
"recall at 100",
"crossencoder normalized accuracy",
"overall unormalized accuracy",
"support",
]
)
for dataset in DATASETS:
logger.info(dataset["name"])
PARAMETERS["test_mentions"] = dataset["filename"]
args = argparse.Namespace(**PARAMETERS)
(
biencoder_accuracy,
recall_at,
crossencoder_normalized_accuracy,
overall_unormalized_accuracy,
num_datapoints,
predictions,
scores,
) = main_dense.run(args, logger, *models)
table.add_row(
[
dataset["name"],
round(biencoder_accuracy, 4),
round(recall_at, 4),
round(crossencoder_normalized_accuracy, 4),
round(overall_unormalized_accuracy, 4),
num_datapoints,
]
)
logger.info("\n{}".format(table))
|
BLINK-main
|
blink/run_benchmark.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from blink.candidate_ranking.bert_reranking import BertReranker
def get_model(params):
return BertReranker(params)
|
BLINK-main
|
blink/reranker.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import emoji
def get_model(parameters):
return Wikimedia_Data_Fetcher(parameters["path_to_candidate_data_dict"])
class Wikimedia_Data_Fetcher:
def __init__(self, path_to_data):
self.data = pickle.load(open(path_to_data, "rb"))
def get_data_for_entity(self, entity_data):
"""Given an entity data dictionary that contains some linking data (ex. title or ID), additional information (ex. description, aliases etc.) is added to the given entity dictionary"""
data = self.data
title = entity_data["wikipedia_title"]
if "wikidata_info" in data[title]:
if ("aliases" in data[title]["wikidata_info"]) and (
data[title]["wikidata_info"]["aliases"]
) is not None:
aliases = [
alias
for alias in data[title]["wikidata_info"]["aliases"]
if alias not in emoji.UNICODE_EMOJI
]
else:
aliases = None
else:
aliases = None
entity_data["aliases"] = aliases
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(data[title].get(key, ""))
entity_data["sentences"] = sents
return entity_data
|
BLINK-main
|
blink/candidate_data_fetcher.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import sys
from tqdm import tqdm
import logging
import torch
import numpy as np
from colorama import init
from termcolor import colored
import blink.ner as NER
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from blink.biencoder.biencoder import BiEncoderRanker, load_biencoder
from blink.crossencoder.crossencoder import CrossEncoderRanker, load_crossencoder
from blink.biencoder.data_process import (
process_mention_data,
get_candidate_representation,
)
import blink.candidate_ranking.utils as utils
from blink.crossencoder.train_cross import modify, evaluate
from blink.crossencoder.data_process import prepare_crossencoder_data
from blink.indexer.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
def _print_colorful_text(input_sentence, samples):
init() # colorful output
msg = ""
if samples and (len(samples) > 0):
msg += input_sentence[0 : int(samples[0]["start_pos"])]
for idx, sample in enumerate(samples):
msg += colored(
input_sentence[int(sample["start_pos"]) : int(sample["end_pos"])],
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if idx < len(samples) - 1:
msg += input_sentence[
int(sample["end_pos"]) : int(samples[idx + 1]["start_pos"])
]
else:
msg += input_sentence[int(sample["end_pos"]) :]
else:
msg = input_sentence
print("Failed to identify entity from text:")
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, show_url=False
):
print(colored(sample["mention"], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
to_print = "id:{}\ntitle:{}\ntext:{}\n".format(e_id, e_title, e_text[:256])
if show_url:
to_print += "url:{}\n".format(e_url)
print(to_print)
def _annotate(ner_model, input_sentences):
ner_output_data = ner_model.predict(input_sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
samples = []
for mention in mentions:
record = {}
record["label"] = "unknown"
record["label_id"] = -1
# LOWERCASE EVERYTHING !
record["context_left"] = sentences[mention["sent_idx"]][
: mention["start_pos"]
].lower()
record["context_right"] = sentences[mention["sent_idx"]][
mention["end_pos"] :
].lower()
record["mention"] = mention["text"].lower()
record["start_pos"] = int(mention["start_pos"])
record["end_pos"] = int(mention["end_pos"])
record["sent_idx"] = mention["sent_idx"]
samples.append(record)
return samples
def _load_candidates(
entity_catalogue, entity_encoding, faiss_index=None, index_path=None, logger=None
):
# only load candidate encoding if not using faiss index
if faiss_index is None:
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
if logger:
logger.info("Using faiss index to retrieve entities.")
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw.")
indexer.deserialize_from(index_path)
# load all the 5903527 entities
title2id = {}
id2title = {}
id2text = {}
wikipedia_id2local_id = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if "idx" in entity:
split = entity["idx"].split("curid=")
if len(split) > 1:
wikipedia_id = int(split[-1].strip())
else:
wikipedia_id = entity["idx"].strip()
assert wikipedia_id not in wikipedia_id2local_id
wikipedia_id2local_id[wikipedia_id] = local_idx
title2id[entity["title"]] = local_idx
id2title[local_idx] = entity["title"]
id2text[local_idx] = entity["text"]
local_idx += 1
return (
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
indexer,
)
def __map_test_entities(test_entities_path, title2id, logger):
# load the 732859 tac_kbp_ref_know_base entities
kb2id = {}
missing_pages = 0
n = 0
with open(test_entities_path, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
if entity["title"] not in title2id:
missing_pages += 1
else:
kb2id[entity["entity_id"]] = title2id[entity["title"]]
n += 1
if logger:
logger.info("missing {}/{} pages".format(missing_pages, n))
return kb2id
def __load_test(test_filename, kb2id, wikipedia_id2local_id, logger):
test_samples = []
with open(test_filename, "r") as fin:
lines = fin.readlines()
for line in lines:
record = json.loads(line)
record["label"] = str(record["label_id"])
# for tac kbp we should use a separate knowledge source to get the entity id (label_id)
if kb2id and len(kb2id) > 0:
if record["label"] in kb2id:
record["label_id"] = kb2id[record["label"]]
else:
continue
# check that each entity id (label_id) is in the entity collection
elif wikipedia_id2local_id and len(wikipedia_id2local_id) > 0:
try:
key = int(record["label"].strip())
if key in wikipedia_id2local_id:
record["label_id"] = wikipedia_id2local_id[key]
else:
continue
except:
continue
# LOWERCASE EVERYTHING !
record["context_left"] = record["context_left"].lower()
record["context_right"] = record["context_right"].lower()
record["mention"] = record["mention"].lower()
test_samples.append(record)
if logger:
logger.info("{}/{} samples considered".format(len(test_samples), len(lines)))
return test_samples
def _get_test_samples(
test_filename, test_entities_path, title2id, wikipedia_id2local_id, logger
):
kb2id = None
if test_entities_path:
kb2id = __map_test_entities(test_entities_path, title2id, logger)
test_samples = __load_test(test_filename, kb2id, wikipedia_id2local_id, logger)
return test_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params):
_, tensor_data = process_mention_data(
samples,
tokenizer,
biencoder_params["max_context_length"],
biencoder_params["max_cand_length"],
silent=True,
logger=None,
debug=biencoder_params["debug"],
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(biencoder, dataloader, candidate_encoding, top_k=100, indexer=None):
biencoder.model.eval()
labels = []
nns = []
all_scores = []
for batch in tqdm(dataloader):
context_input, _, label_ids = batch
with torch.no_grad():
if indexer is not None:
context_encoding = biencoder.encode_context(context_input).numpy()
context_encoding = np.ascontiguousarray(context_encoding)
scores, indicies = indexer.search_knn(context_encoding, top_k)
else:
scores = biencoder.score_candidate(
context_input, None, cand_encs=candidate_encoding # .to(device)
)
scores, indicies = scores.topk(top_k)
scores = scores.data.numpy()
indicies = indicies.data.numpy()
labels.extend(label_ids.data.numpy())
nns.extend(indicies)
all_scores.extend(scores)
return labels, nns, all_scores
def _process_crossencoder_dataloader(context_input, label_input, crossencoder_params):
tensor_data = TensorDataset(context_input, label_input)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=crossencoder_params["eval_batch_size"]
)
return dataloader
def _run_crossencoder(crossencoder, dataloader, logger, context_len, device="cuda"):
crossencoder.model.eval()
accuracy = 0.0
crossencoder.to(device)
res = evaluate(crossencoder, dataloader, device, logger, context_len, zeshel=False, silent=False)
accuracy = res["normalized_accuracy"]
logits = res["logits"]
if accuracy > -1:
predictions = np.argsort(logits, axis=1)
else:
predictions = []
return accuracy, predictions, logits
def load_models(args, logger=None):
# load biencoder model
if logger:
logger.info("loading biencoder model")
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
biencoder_params["path_to_model"] = args.biencoder_model
biencoder = load_biencoder(biencoder_params)
crossencoder = None
crossencoder_params = None
if not args.fast:
# load crossencoder model
if logger:
logger.info("loading crossencoder model")
with open(args.crossencoder_config) as json_file:
crossencoder_params = json.load(json_file)
crossencoder_params["path_to_model"] = args.crossencoder_model
crossencoder = load_crossencoder(crossencoder_params)
# load candidate entities
if logger:
logger.info("loading candidate entities")
(
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
) = _load_candidates(
args.entity_catalogue,
args.entity_encoding,
faiss_index=getattr(args, 'faiss_index', None),
index_path=getattr(args, 'index_path' , None),
logger=logger,
)
return (
biencoder,
biencoder_params,
crossencoder,
crossencoder_params,
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer,
)
def run(
args,
logger,
biencoder,
biencoder_params,
crossencoder,
crossencoder_params,
candidate_encoding,
title2id,
id2title,
id2text,
wikipedia_id2local_id,
faiss_indexer=None,
test_data=None,
):
if not test_data and not args.test_mentions and not args.interactive:
msg = (
"ERROR: either you start BLINK with the "
"interactive option (-i) or you pass in input test mentions (--test_mentions)"
"and test entitied (--test_entities)"
)
raise ValueError(msg)
id2url = {
v: "https://en.wikipedia.org/wiki?curid=%s" % k
for k, v in wikipedia_id2local_id.items()
}
stopping_condition = False
while not stopping_condition:
samples = None
if args.interactive:
logger.info("interactive mode")
# biencoder_params["eval_batch_size"] = 1
# Load NER model
ner_model = NER.get_model()
# Interactive
text = input("insert text:")
# Identify mentions
samples = _annotate(ner_model, [text])
_print_colorful_text(text, samples)
else:
if logger:
logger.info("test dataset mode")
if test_data:
samples = test_data
else:
# Load test mentions
samples = _get_test_samples(
args.test_mentions,
args.test_entities,
title2id,
wikipedia_id2local_id,
logger,
)
stopping_condition = True
# don't look at labels
keep_all = (
args.interactive
or samples[0]["label"] == "unknown"
or samples[0]["label_id"] < 0
)
# prepare the data for biencoder
if logger:
logger.info("preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params
)
# run biencoder
if logger:
logger.info("run biencoder")
top_k = args.top_k
labels, nns, scores = _run_biencoder(
biencoder, dataloader, candidate_encoding, top_k, faiss_indexer
)
if args.interactive:
print("\nfast (biencoder) predictions:")
_print_colorful_text(text, samples)
# print biencoder prediction
idx = 0
for entity_list, sample in zip(nns, samples):
e_id = entity_list[0]
e_title = id2title[e_id]
e_text = id2text[e_id]
e_url = id2url[e_id]
_print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, args.show_url
)
idx += 1
print()
if args.fast:
# use only biencoder
continue
else:
biencoder_accuracy = -1
recall_at = -1
if not keep_all:
# get recall values
top_k = args.top_k
x = []
y = []
for i in range(1, top_k):
temp_y = 0.0
for label, top in zip(labels, nns):
if label in top[:i]:
temp_y += 1
if len(labels) > 0:
temp_y /= len(labels)
x.append(i)
y.append(temp_y)
# plt.plot(x, y)
biencoder_accuracy = y[0]
recall_at = y[-1]
print("biencoder accuracy: %.4f" % biencoder_accuracy)
print("biencoder recall@%d: %.4f" % (top_k, y[-1]))
if args.fast:
predictions = []
for entity_list in nns:
sample_prediction = []
for e_id in entity_list:
e_title = id2title[e_id]
sample_prediction.append(e_title)
predictions.append(sample_prediction)
# use only biencoder
return (
biencoder_accuracy,
recall_at,
-1,
-1,
len(samples),
predictions,
scores,
)
# prepare crossencoder data
context_input, candidate_input, label_input = prepare_crossencoder_data(
crossencoder.tokenizer, samples, labels, nns, id2title, id2text, keep_all,
)
context_input = modify(
context_input, candidate_input, crossencoder_params["max_seq_length"]
)
dataloader = _process_crossencoder_dataloader(
context_input, label_input, crossencoder_params
)
# run crossencoder and get accuracy
accuracy, index_array, unsorted_scores = _run_crossencoder(
crossencoder,
dataloader,
logger,
context_len=biencoder_params["max_context_length"],
)
if args.interactive:
print("\naccurate (crossencoder) predictions:")
_print_colorful_text(text, samples)
# print crossencoder prediction
idx = 0
for entity_list, index_list, sample in zip(nns, index_array, samples):
e_id = entity_list[index_list[-1]]
e_title = id2title[e_id]
e_text = id2text[e_id]
e_url = id2url[e_id]
_print_colorful_prediction(
idx, sample, e_id, e_title, e_text, e_url, args.show_url
)
idx += 1
print()
else:
scores = []
predictions = []
for entity_list, index_list, scores_list in zip(
nns, index_array, unsorted_scores
):
index_list = index_list.tolist()
# descending order
index_list.reverse()
sample_prediction = []
sample_scores = []
for index in index_list:
e_id = entity_list[index]
e_title = id2title[e_id]
sample_prediction.append(e_title)
sample_scores.append(scores_list[index])
predictions.append(sample_prediction)
scores.append(sample_scores)
crossencoder_normalized_accuracy = -1
overall_unormalized_accuracy = -1
if not keep_all:
crossencoder_normalized_accuracy = accuracy
print(
"crossencoder normalized accuracy: %.4f"
% crossencoder_normalized_accuracy
)
if len(samples) > 0:
overall_unormalized_accuracy = (
crossencoder_normalized_accuracy * len(label_input) / len(samples)
)
print(
"overall unnormalized accuracy: %.4f" % overall_unormalized_accuracy
)
return (
biencoder_accuracy,
recall_at,
crossencoder_normalized_accuracy,
overall_unormalized_accuracy,
len(samples),
predictions,
scores,
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--interactive", "-i", action="store_true", help="Interactive mode."
)
# test_data
parser.add_argument(
"--test_mentions", dest="test_mentions", type=str, help="Test Dataset."
)
parser.add_argument(
"--test_entities", dest="test_entities", type=str, help="Test Entities."
)
# biencoder
parser.add_argument(
"--biencoder_model",
dest="biencoder_model",
type=str,
default="models/biencoder_wiki_large.bin",
help="Path to the biencoder model.",
)
parser.add_argument(
"--biencoder_config",
dest="biencoder_config",
type=str,
default="models/biencoder_wiki_large.json",
help="Path to the biencoder configuration.",
)
parser.add_argument(
"--entity_catalogue",
dest="entity_catalogue",
type=str,
# default="models/tac_entity.jsonl", # TAC-KBP
default="models/entity.jsonl", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--entity_encoding",
dest="entity_encoding",
type=str,
# default="models/tac_candidate_encode_large.t7", # TAC-KBP
default="models/all_entities_large.t7", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
# crossencoder
parser.add_argument(
"--crossencoder_model",
dest="crossencoder_model",
type=str,
default="models/crossencoder_wiki_large.bin",
help="Path to the crossencoder model.",
)
parser.add_argument(
"--crossencoder_config",
dest="crossencoder_config",
type=str,
default="models/crossencoder_wiki_large.json",
help="Path to the crossencoder configuration.",
)
parser.add_argument(
"--top_k",
dest="top_k",
type=int,
default=10,
help="Number of candidates retrieved by biencoder.",
)
# output folder
parser.add_argument(
"--output_path",
dest="output_path",
type=str,
default="output",
help="Path to the output.",
)
parser.add_argument(
"--fast", dest="fast", action="store_true", help="only biencoder mode"
)
parser.add_argument(
"--show_url",
dest="show_url",
action="store_true",
help="whether to show entity url in interactive mode",
)
parser.add_argument(
"--faiss_index", type=str, default=None, help="whether to use faiss index",
)
parser.add_argument(
"--index_path", type=str, default=None, help="path to load indexer",
)
args = parser.parse_args()
logger = utils.get_logger(args.output_path)
models = load_models(args, logger)
run(args, logger, *models)
|
BLINK-main
|
blink/main_dense.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
BLINK-main
|
blink/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import numpy
import os
import time
import torch
from blink.indexer.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer
import blink.candidate_ranking.utils as utils
logger = utils.get_logger()
def main(params):
output_path = params["output_path"]
output_dir, _ = os.path.split(output_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
logger = utils.get_logger(output_dir)
logger.info("Loading candidate encoding from path: %s" % params["candidate_encoding"])
candidate_encoding = torch.load(params["candidate_encoding"])
vector_size = candidate_encoding.size(1)
index_buffer = params["index_buffer"]
if params["hnsw"]:
logger.info("Using HNSW index in FAISS")
index = DenseHNSWFlatIndexer(vector_size, index_buffer)
else:
logger.info("Using Flat index in FAISS")
index = DenseFlatIndexer(vector_size, index_buffer)
logger.info("Building index.")
index.index_data(candidate_encoding.numpy())
logger.info("Done indexing data.")
if params.get("save_index", None):
index.serialize(output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_path",
required=True,
type=str,
help="output file path",
)
parser.add_argument(
"--candidate_encoding",
default="models/all_entities_large.t7",
type=str,
help="file path for candidte encoding.",
)
parser.add_argument(
"--hnsw", action='store_true',
help='If enabled, use inference time efficient HNSW index',
)
parser.add_argument(
"--save_index", action='store_true',
help='If enabled, save index',
)
parser.add_argument(
'--index_buffer', type=int, default=50000,
help="Temporal memory data buffer size (in samples) for indexer",
)
params = parser.parse_args()
params = params.__dict__
main(params)
|
BLINK-main
|
blink/build_faiss_index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from flair.models import SequenceTagger
from flair.data import Sentence
def get_model(parameters=None):
return Flair(parameters)
class NER_model:
def __init__(self, parameters=None):
pass
def predict(self, sents):
"""Sents: List of plain text consequtive sentences.
Returns a dictionary consisting of a list of sentences and a list of mentions, where for each mention AT LEAST (it may give additional information) the following information is given:
sent_idx - the index of the sentence that contains the mention
text - the textual span that we hypothesise that represents an entity
start_pos - the character idx at which the textual mention starts
end_pos - the character idx at which the mention ends"""
pass
class Flair(NER_model):
def __init__(self, parameters=None):
self.model = SequenceTagger.load("ner")
def predict(self, sentences):
mentions = []
for sent_idx, sent in enumerate(sentences):
sent = Sentence(sent, use_tokenizer=True)
self.model.predict(sent)
sent_mentions = sent.to_dict(tag_type="ner")["entities"]
for mention in sent_mentions:
mention["sent_idx"] = sent_idx
mentions.extend(sent_mentions)
return {"sentences": sentences, "mentions": mentions}
|
BLINK-main
|
blink/ner.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import io
import json
import os
import pickle
from segtok.segmenter import split_multi
##### Reading helpers #####
def read_sentences_from_file(path_to_file, one_sentence_per_line=True):
lines = []
with io.open(path_to_file, mode="r", encoding="utf-8") as file:
for line in file:
line = line.strip()
if line != "":
lines.append(line.strip())
if one_sentence_per_line:
sentences = lines
else:
text = " ".join(lines)
sentences = list(split_multi(text))
sentences = [sentence for sentence in sentences if sentence != ""]
return sentences
##### Printing / writing helpers #####
def get_candidate_summary(candidate):
wikipedia_id = candidate["wikipedia_id"]
wikidata_id = candidate["wikidata_id"]
wikipedia_title = candidate["wikipedia_title"]
return "{}, {}, {}".format(wikipedia_id, wikidata_id, wikipedia_title)
def present_sentence_mentions(sentence, mentions, output_file):
if output_file != None:
f = io.open(output_file, mode="a", encoding="utf-8")
output = lambda s: f.write("{}\n".format(s))
else:
output = lambda s: print(s)
output("Sentence: {}".format(sentence))
mention_entity_pairs = []
for mention in mentions:
candidates = mention["candidates"]
# prediction = mention.get('predicted_candidate_idx', 0)
prediction = mention["predicted_candidate_idx"]
if prediction < len(candidates):
# print(type(mention['prob_assigned_to_candidate']))
# print(mention['prob_assigned_to_candidate'])
mention_rep = "{} ({}, {}) - {} (conf. {:.5f})".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
get_candidate_summary(candidates[prediction]),
mention["prob_assigned_to_candidate"],
)
else:
mention_rep = "{} ({}, {}) - {}".format(
mention["text"],
mention["start_pos"],
mention["end_pos"],
"No candidate selected",
)
mention_entity_pairs.append(mention_rep)
if len(mention_entity_pairs) != 0:
output("Mention-Entity pairs: \n{}".format("\n".join(mention_entity_pairs)))
else:
output("No detected mentions")
output("")
def sentence_mentions_pairs(sentences, mentions):
mentions_per_sent = {}
for m in mentions:
sent_idx = int(m["sent_idx"])
curr_ments = mentions_per_sent.get(sent_idx, [])
curr_ments.append(m)
mentions_per_sent[sent_idx] = curr_ments
pairs = []
for idx, sent in enumerate(sentences):
pairs.append((sent, mentions_per_sent.get(idx, [])))
return pairs
def present_annotated_sentences(sentences, mentions, output_file=None):
pairs = sentence_mentions_pairs(sentences, mentions)
for sent, ments in pairs:
present_sentence_mentions(sent, ments, output_file)
def write_dicts_as_json_per_line(list_of_dicts, txt_file_path):
with io.open(txt_file_path, mode="w", encoding="utf-8") as file:
for idx, mention in enumerate(list_of_dicts):
json_string = json.dumps(mention)
file.write(json_string)
if idx != (len(list_of_dicts) - 1):
file.write("\n")
def get_mentions_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_sentences_txt_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "sentences.jsonl"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def get_end2end_pickle_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "mentions_and_sentences.pickle"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
def write_end2end_pickle_output(sentences, mentions, output_file_id):
obj = {"sentences": sentences, "mentions": mentions}
with open(get_end2end_pickle_output_file_path(output_file_id), "wb") as file:
pickle.dump(obj, file)
def get_end2end_pretty_output_file_path(output_folder_path):
os.makedirs(output_folder_path, exist_ok=True)
file_name = "pretty.txt"
path_to_file = os.path.join(output_folder_path, file_name)
return path_to_file
|
BLINK-main
|
blink/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import blink.utils as utils
import blink.ner as NER
import blink.candidate_generation as CG
import blink.candidate_data_fetcher as CDF
import blink.reranker as R
import argparse
import shutil
def main(parameters):
print("Parameters:", parameters)
# Read data
sentences = utils.read_sentences_from_file(
parameters["path_to_input_file"],
one_sentence_per_line=parameters["one_sentence_per_line"],
)
# Identify mentions
ner_model = NER.get_model(parameters)
ner_output_data = ner_model.predict(sentences)
sentences = ner_output_data["sentences"]
mentions = ner_output_data["mentions"]
output_folder_path = parameters["output_folder_path"]
if (
(output_folder_path is not None)
and os.path.exists(output_folder_path)
and os.listdir(output_folder_path)
):
print(
"The given output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
answer = input("Would you like to empty the existing directory? [Y/N]\n")
if answer.strip() == "Y":
print("Deleting {}...".format(output_folder_path))
shutil.rmtree(output_folder_path)
else:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
output_folder_path
)
)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
sentences, utils.get_sentences_txt_file_path(output_folder_path)
)
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Generate candidates and get the data that describes the candidates
candidate_generator = CG.get_model(parameters)
candidate_generator.process_mentions_for_candidate_generator(
sentences=sentences, mentions=mentions
)
for mention in mentions:
mention["candidates"] = candidate_generator.get_candidates(mention)
if parameters["consider_additional_datafetcher"]:
data_fetcher = CDF.get_model(parameters)
for candidate in mention["candidates"]:
data_fetcher.get_data_for_entity(candidate)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
# Reranking
reranking_model = R.get_model(parameters)
reranking_model.rerank(mentions, sentences)
if output_folder_path is not None:
utils.write_dicts_as_json_per_line(
mentions, utils.get_mentions_txt_file_path(output_folder_path)
)
utils.write_end2end_pickle_output(sentences, mentions, output_folder_path)
utils.present_annotated_sentences(
sentences,
mentions,
utils.get_end2end_pretty_output_file_path(output_folder_path),
)
# Showcase results
utils.present_annotated_sentences(sentences, mentions)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Input data
parser.add_argument(
"--path_to_input_file",
"--i",
dest="path_to_input_file",
type=str,
required=True,
)
parser.add_argument(
"--one_sentence_per_line",
action="store_true",
help="Set if the input file has one sentence per line",
)
# Candidate generation
parser.add_argument(
"--solr_address",
default="http://localhost:8983/solr/wikipedia",
type=str,
help="The address to the solr index.",
)
parser.add_argument(
"--query",
type=str,
default='title:( {} ) OR aliases:" {} " OR sent_desc_1:( {} )^0.5',
help="The query following the argument template of str.format",
)
parser.add_argument(
"--keys",
type=str,
default="text,text,context",
help="The comma separated list of keys to be feeded to str.format with the query as the formating string.",
)
parser.add_argument(
"--boosting",
default="log(sum(num_incoming_links,1))",
type=str,
help="The address to the solr index.",
)
parser.add_argument(
"--raw_solr_fields",
action="store_true",
help="Whether to escape the special characters in the solr queries.",
)
# Candidate desciptions and additional data
parser.add_argument(
"--consider_additional_datafetcher",
action="store_true",
help="Whether to include some additional data to the candidates using a datafetcher.",
)
parser.add_argument(
"--path_to_candidate_data_dict",
default="data/KB_data/title2enriched_parsed_obj_plus.p",
type=str,
help="The path to the data used by the data fetcher (the default path points to the wikipedia data).",
)
# Reranking
parser.add_argument(
"--path_to_model",
"--m",
dest="path_to_model",
type=str,
required=True,
help="The full path to the model.",
)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--evaluation_batch_size",
default=1,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--top_k",
type=int,
default=80,
help="The number of candidates retrieved by the candiadate generator and considered by the reranker",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether to use CUDA when available"
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--context_key",
default="tagged_context",
type=str,
help="The field that contains the mention context.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
# Output
parser.add_argument(
"--output_folder_path",
"--o",
dest="output_folder_path",
default=None,
type=str,
help="A path to the folder where the mentions and sentences are to be dumped. If it is not given, the results would not be saved.",
)
args = parser.parse_args()
args.rows = args.top_k
parameters = args.__dict__
main(parameters)
|
BLINK-main
|
blink/main_solr.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from multiprocessing.pool import ThreadPool
from candidate_generators import (
Simple_Candidate_Generator,
Pregenerated_Candidates_Data_Fetcher,
)
import multiprocessing
import utils
import time
import argparse
import pickle
import os
from evaluator import Evaluator
from tqdm import tqdm
import pysolr
from tqdm import tqdm
def run_thread(arguments):
mentions = arguments["data"]
candidate_generator = arguments["candidate_generator"]
args = arguments["args"]
if args.keep_pregenerated_candidates:
data_fetcher = arguments["pregenereted_cands_data_fetcher"]
if arguments["id"] == 0:
print("Query args: ", candidate_generator.query_arguments)
print_query_flag = True
for mention in tqdm(mentions):
mention["generated_candidates"] = candidate_generator.get_candidates(
mention, print_query_flag=print_query_flag
)
print_query_flag = False
if args.keep_pregenerated_candidates:
wikidata_ids = mention["candidates_wikidata_ids"]
mention["candidates_data"] = data_fetcher.get_candidates_data(
wikidata_ids
)
else:
for mention in mentions:
mention["generated_candidates"] = candidate_generator.get_candidates(
mention
)
if args.keep_pregenerated_candidates:
wikidata_ids = mention["candidates_wikidata_ids"]
mention["candidates_data"] = data_fetcher.get_candidates_data(
wikidata_ids
)
return arguments["id"], mentions
def split(a, n):
k, m = divmod(len(a), n)
return (a[i * k + min(i, m) : (i + 1) * k + min(i + 1, m)] for i in range(n))
def main(args):
wall_start = time.time()
parameters = get_parameters(args)
print("Candidate generator parameters:", parameters)
datasets = utils.get_datasets(
args.include_aida_train, args.keep_pregenerated_candidates
)
if args.single_dataset:
datasets = [datasets[0]]
mentions = utils.get_list_of_mentions(datasets)
# NUM_TREADS = multiprocessing.cpu_count()
NUM_THREADS = args.num_threads
pool = ThreadPool(NUM_THREADS)
# Split the data into approximately equal parts and give one block to each thread
data_per_thread = split(mentions, NUM_THREADS)
if args.keep_pregenerated_candidates:
arguments = [
{
"id": idx,
"data": data_bloc,
"args": args,
"candidate_generator": Simple_Candidate_Generator(parameters),
"pregenereted_cands_data_fetcher": Pregenerated_Candidates_Data_Fetcher(
parameters
),
}
for idx, data_bloc in enumerate(data_per_thread)
]
else:
arguments = [
{
"id": idx,
"data": data_bloc,
"args": args,
"candidate_generator": Simple_Candidate_Generator(parameters),
}
for idx, data_bloc in enumerate(data_per_thread)
]
results = pool.map(run_thread, arguments)
# Merge the results
processed_mentions = []
for _id, mentions in results:
processed_mentions = processed_mentions + mentions
has_gold = 0
pool.terminate()
pool.join()
execution_time = (time.time() - wall_start) / 60
print("The execution took:", execution_time, " minutes")
# Evaluate the generation
evaluator = Evaluator(processed_mentions)
evaluator.candidate_generation(
save_gold_pos=True, save_pregenerated_gold_pos=args.keep_pregenerated_candidates
)
# Dump the data if the dump_mentions flag was set
if args.dump_mentions:
print("Dumping processed mentions")
# Create the directory for the mention dumps if it does not exist
dump_folder = args.dump_mentions_folder
os.makedirs(dump_folder, exist_ok=True)
dump_object = {}
dump_object["mentions"] = processed_mentions
dump_object["total_per_dataset"] = evaluator.total_per_dataset
dump_object["has_gold_per_dataset"] = evaluator.has_gold_per_dataset
dump_object["parameters"] = parameters
dump_object["args"] = args
dump_object["execution_time"] = execution_time
pickle.dump(
dump_object,
open(os.path.join(dump_folder, args.dump_file_id), "wb"),
protocol=4,
)
# evaluator.candidate_generation(max_rank=100)
return evaluator.recall
def get_parameters(args):
parameters = {
"collection_name": args.collection_name,
"rows": args.rows,
"solr_address": args.solr_address,
}
parameters["query_data"] = {}
parameters["query_data"]["string"] = args.query
parameters["query_data"]["keys"] = [k.strip() for k in args.keys.split(",")]
parameters["boosting"] = args.boosting
return parameters
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# Debugging setting
parser.add_argument("--single_dataset", dest="single_dataset", action="store_true")
parser.set_defaults(single_dataset=False)
# Query parameters
parser.add_argument(
"--query",
type=str,
default='title:( {} ) OR aliases:" {} " OR sent_desc_1:( {} )^0.5',
help="The query following the argument template of q.format",
)
parser.add_argument(
"--keys",
type=str,
default="mention,mention,sent_context_curr",
help="The comma separated list of keys to be feeded to str.format with the query as the formating string. Example fields `mention`, `query_context`, `query_truncated_10_context` or `query_truncated_25_context`",
)
parser.add_argument("--rows", type=int, default=80)
parser.add_argument("--collection_name", type=str, default="wikipedia")
parser.add_argument("--solr_address", type=str, default="http://localhost:8983")
parser.add_argument(
"--boosting", type=str, default="log(sum(num_incoming_links,1))"
)
# Multithreading
parser.add_argument("--num_threads", type=int, required=True)
# Candidates dumping
parser.add_argument("--dump_mentions", dest="dump_mentions", action="store_true")
parser.set_defaults(dump_mentions=False)
parser.add_argument(
"--dump_mentions_folder", type=str, default="data/mention_dumps"
)
parser.add_argument("--dump_file_id", type=str)
# Include training dataset
parser.add_argument(
"--include_aida_train", dest="include_aida_train", action="store_true"
)
parser.set_defaults(include_aida_train=False)
# Keep pregenerated candidates
parser.add_argument(
"--keep_pregenerated_candidates",
action="store_true",
help="Whether to keep the candidates given with the dataset.",
)
args = parser.parse_args()
print(args)
main(args)
|
BLINK-main
|
blink/candidate_retrieval/perform_and_evaluate_candidate_retrieval_multithreaded.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import pysolr
import pickle
import emoji
import time
import os
parser = argparse.ArgumentParser()
parser.add_argument(
"--processed_data_file_path",
type=str,
help="The full path to the data file",
required=True,
)
parser.add_argument(
"--collection_name",
type=str,
help="The solr collection name, in which the ingestion should be performed",
required=True,
)
parser.add_argument(
"--add_sentence_data", dest="add_sentence_data", action="store_true"
)
parser.set_defaults(add_sentence_data=False)
parser.add_argument(
"--remove_disambiguation_pages",
dest="remove_disambiguation_pages",
action="store_true",
)
parser.set_defaults(remove_disambiguation_pages=False)
parser.add_argument("--min_tokens", type=int, default=0)
args = parser.parse_args()
processed_data_path = args.processed_data_file_path
collection_name = args.collection_name
# processed_data_path = "/scratch/martinjosifoski/data/en-wiki-filtered-wikidata"
def remove_all_docs():
solr.delete(q="*:*")
def load_data():
return pickle.load(open(processed_data_path, "rb"))
def get_data_for_key(data, title):
obj = {}
obj["id"] = data[title]["wikipedia_id"]
obj["title"] = title
if ("wikidata_info" in data[title]) and (
data[title]["wikidata_info"]["wikidata_id"] is not None
):
obj["wikidata_id"] = data[title]["wikidata_info"]["wikidata_id"]
else:
obj["wikidata_id"] = data[title]["wikidata_id_from_index"]
description = data[title]["intro_concatenated"]
obj["desc"] = description
if "wikidata_info" in data[title]:
if "description" in data[title]["wikidata_info"]:
wikidata_description = data[title]["wikidata_info"]["description"]
else:
wikidata_description = ""
if ("aliases" in data[title]["wikidata_info"]) and (
data[title]["wikidata_info"]["aliases"]
) is not None:
aliases = " ".join(
[
'"{}"'.format(alias)
for alias in data[title]["wikidata_info"]["aliases"]
if alias not in emoji.UNICODE_EMOJI
]
)
else:
aliases = ""
else:
aliases = ""
wikidata_description = ""
obj["aliases"] = aliases
obj["wikidata_desc"] = wikidata_description
obj["num_tokens"] = data[title]["num_tokens"]
obj["num_incoming_links"] = data[title].get("num_incoming_links", 0)
if args.add_sentence_data:
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
obj[key] = data[title].get(key, "")
return obj
print("Loading data")
title2data = load_data()
for key in title2data:
title2data[key]["intro_concatenated"] = " ".join(
[line for line in title2data[key]["intro_lines"] if line != ""]
)
# Filter documents with less then `args.min_tokens` tokens
if args.min_tokens != 0:
print("Removing documents with less then {} tokens".format(args.min_tokens))
print("Number of docs BEFORE removal:", len(title2data))
title2data = {
key: value
for key, value in title2data.items()
if value["num_tokens"] >= args.min_tokens
}
print("Number of docs AFTER removal:", len(title2data))
print("")
# Remove disambiguation pages
if args.remove_disambiguation_pages:
print("Remove disambiguation pages")
print("Number of docs BEFORE removal:", len(title2data))
titles_to_delete = []
for title in title2data:
parsed_obj = title2data[title]
if ("disambiguation" in title) or ("Disambiguation" in title):
titles_to_delete.append(title)
else:
if (parsed_obj.get("wikidata_info", None) is not None) and (
parsed_obj["wikidata_info"].get("description", None) is not None
):
wikidata_info = parsed_obj["wikidata_info"]
if ("disambiguation page" in wikidata_info["description"]) or (
"Disambiguation page" in wikidata_info["description"]
):
titles_to_delete.append(title)
for title in titles_to_delete:
del title2data[title]
print("Number of docs AFTER removal:", len(title2data))
print("Number of removed docs:", len(titles_to_delete))
print("")
ingestion_data = [get_data_for_key(title2data, key) for key in title2data]
print("Starting ingestion")
wall_start = time.time()
l = 0
r = step = 10000
solr = pysolr.Solr(
"http://localhost:8983/solr/{}".format(collection_name),
always_commit=True,
timeout=100,
)
c = 0
for r in range(r, len(ingestion_data), step):
c += 1
if (c % 10) == 0:
print("Processed", c, "batches")
temp_data = ingestion_data[l:r]
solr.add(temp_data, commit=True)
l = r
solr.add(ingestion_data[l : len(ingestion_data)], commit=True)
solr.commit()
print("The processing took:", (time.time() - wall_start) / 60, " minutes")
|
BLINK-main
|
blink/candidate_retrieval/data_ingestion.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pickle
import os
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_folder",
type=str,
help="The full path to the output folder",
required=True,
)
args = parser.parse_args()
output_folder = args.output_folder
output_file_path = os.path.join(output_folder, "en-wiki-filtered-wikidata")
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
# Add wikidata_id from the download index to wikipedia articles whenever we have it
wikipediaid2wikidataid_file_path = os.path.join(
output_folder, "wikipediaid2wikidataid.p"
)
wikipedia_data_filtered_file_path = os.path.join(output_folder, "en-wiki-filtered")
wikipediaid2wikidataid = pickle.load(open(wikipediaid2wikidataid_file_path, "rb"))
wikipedia_data_filtered = pickle.load(open(wikipedia_data_filtered_file_path, "rb"))
for key in wikipedia_data_filtered.keys():
wikipedia_id, wikipedia_title = key
wikipedia_id = int(wikipedia_id)
if wikipedia_id in wikipediaid2wikidataid:
wikidata_id = wikipediaid2wikidataid[wikipedia_id]
wikipedia_data_filtered[key]["wikidata_id_from_index"] = wikidata_id
else:
wikipedia_data_filtered[key]["wikidata_id_from_index"] = None
# Read the processed wikidata object and generate amenable mappings
wikidataid_title2parsed_obj_file_path = os.path.join(
output_folder, "wikidataid_title2parsed_obj.p"
)
wikidataid_title2parsed_obj = pickle.load(
open(wikidataid_title2parsed_obj_file_path, "rb")
)
title2parsed_obj = {}
wikidataid2parsed_obj = {}
for key in wikidataid_title2parsed_obj.keys():
wikidata_id, wikipedia_title = key
wikidataid_title2parsed_obj[key]["wikidata_id"] = wikidata_id
wikidataid_title2parsed_obj[key]["wikipedia_title"] = wikipedia_title
title2parsed_obj[wikipedia_title] = wikidataid_title2parsed_obj[key]
wikidataid2parsed_obj[wikidata_id] = wikidataid_title2parsed_obj[key]
matched_by_title = 0
not_matched_by_title_list = []
matched_by_id = 0
not_matched_by_anything = []
# link wikipedia with wikidata
for key in wikipedia_data_filtered.keys():
wikipedia_id, wikipedia_title = key
wikipedia_id = int(wikipedia_id)
wikidata_id_from_index = wikipedia_data_filtered[key]["wikidata_id_from_index"]
## 1) TITLE 2) ID
## works better, linking is more accurate
if wikipedia_title in title2parsed_obj:
matched_by_title += 1
wikipedia_data_filtered[key]["wikidata_info"] = title2parsed_obj[
wikipedia_title
]
else:
not_matched_by_title_list.append(
(wikipedia_id, wikipedia_title, wikidata_id_from_index)
)
if (wikidata_id_from_index is not None) and (
wikidata_id_from_index in wikidataid2parsed_obj
):
matched_by_id += 1
wikipedia_data_filtered[key]["wikidata_info"] = wikidataid2parsed_obj[
wikidata_id_from_index
]
else:
not_matched_by_anything.append(
(wikipedia_id, wikipedia_title, wikidata_id_from_index)
)
## 1) ID 2) TITLE
# if (wikidata_id_from_index is not None) and (wikidata_id_from_index in wikidataid2parsed_obj):
# matched_by_id += 1
# wikipedia_data_filtered[key]['wikidata_info'] = wikidataid2parsed_obj[wikidata_id_from_index]
# else:
# not_matched_by_title_list.append((wikipedia_id, wikipedia_title, wikidata_id_from_index))
# if wikipedia_title in title2parsed_obj:
# matched_by_title += 1
# wikipedia_data_filtered[key]['wikidata_info'] = title2parsed_obj[wikipedia_title]
# else:
# not_matched_by_anything.append((wikipedia_id, wikipedia_title, wikidata_id_from_index))
print("Matched by title:", matched_by_title)
print("Matched by id:", matched_by_id)
print("Not found:", len(not_matched_by_anything))
print("Dumping", output_file_path)
pickle.dump(wikipedia_data_filtered, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/link_wikipedia_and_wikidata.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
import urllib.parse
import regex
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
pattern = re.compile("(<a href=([^>]+)>((?:.(?!\<\/a\>))*.)<\/a>)")
docs_failed_xml = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
lines = []
lines.append(line.strip())
if line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["lines"] = lines
text = " ".join([l for l in lines if l != ""])
try:
doc_xml = ET.fromstring(text)
links_xml = doc_xml.getchildren()
links = []
for link_xml in links_xml:
link = {}
link["xml_attributes"] = link_xml.attrib
link["text"] = link_xml.text.strip()
link["href_unquoted"] = urllib.parse.unquote(
link_xml.attrib["href"]
)
link_xml.tail = ""
link["raw"] = ET.tostring(
link_xml, encoding="unicode", method="xml"
)
links.append(link)
temp_obj["links_xml"] = links
except Exception as e:
temp_obj["links_xml"] = None
docs_failed_xml += 1
text = " ".join([l for l in lines[1:-1] if l != ""])
links = []
for match in pattern.findall(text):
raw, href, text = match
link = {}
link["raw"] = raw
link["href_unquoted"] = urllib.parse.unquote(href.strip('"'))
link["text"] = text
links.append(link)
temp_obj["links_regex"] = links
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# # check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
# print('Portion of documents with improper xml: {:.2f}%'.format(docs_failed_xml*100/len(id_title2parsed_obj)))
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output_links.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
first_paragraph_flag = True
lines = []
continue
if not first_paragraph_flag:
continue
if line.startswith("Section::::") or line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["intro_lines"] = lines
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# # check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
first_paragraph_flag = False
continue
lines.append(line.strip())
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import re
import pickle
import os
import time
import numpy as np
"""
This script is adapted from https://github.com/lephong/mulrel-nel
"""
def read_csv_file(path, added_params):
data = {}
info = True
with open(path, "r", encoding="utf8") as f:
for line in f:
comps = line.strip().split("\t")
doc_name = comps[0] + " " + comps[1]
mention = comps[2]
lctx = comps[3]
rctx = comps[4]
if comps[6] != "EMPTYCAND":
cands = [c.split(",") for c in comps[6:-2]]
cands = [
(",".join(c[2:]).replace('"', "%22").replace(" ", "_"), float(c[1]))
for c in cands
]
else:
cands = []
gold = comps[-1].split(",")
if gold[0] == "-1":
gold = (
",".join(gold[2:]).replace('"', "%22").replace(" ", "_"),
1e-5,
-1,
)
else:
gold = (
",".join(gold[3:]).replace('"', "%22").replace(" ", "_"),
1e-5,
-1,
)
if added_params["generate_cands"]:
if info:
print("Generating candidates")
info = False
cands = added_params["cand_generator"].process(mention)
if doc_name not in data:
data[doc_name] = []
data[doc_name].append(
{
"mention": mention,
"context": (lctx, rctx),
"candidates": cands,
"gold": gold,
}
)
return data
### Adds original textual data to pregenerated data
def read_conll_file(data, path):
conll = {}
with open(path, "r", encoding="utf8") as f:
cur_sent = None
cur_doc = None
for line in f:
line = line.strip()
if line.startswith("-DOCSTART-"):
docname = line.split()[1][1:]
conll[docname] = {"sentences": [], "mentions": []}
cur_doc = conll[docname]
cur_sent = []
else:
if line == "":
cur_doc["sentences"].append(cur_sent)
cur_sent = []
else:
comps = line.split("\t")
tok = comps[0]
cur_sent.append(tok)
if len(comps) >= 6:
bi = comps[1]
wikilink = comps[4]
if bi == "I":
cur_doc["mentions"][-1]["end"] += 1
else:
new_ment = {
"sent_id": len(cur_doc["sentences"]),
"start": len(cur_sent) - 1,
"end": len(cur_sent),
"wikilink": wikilink,
}
cur_doc["mentions"].append(new_ment)
# merge with data
rmpunc = re.compile("[\W]+")
for doc_name, content in data.items():
conll_doc = conll[doc_name.split()[0]]
content[0]["conll_doc"] = conll_doc
cur_conll_m_id = 0
for m in content:
mention = m["mention"]
gold = m["gold"]
while True:
cur_conll_m = conll_doc["mentions"][cur_conll_m_id]
cur_conll_mention = " ".join(
conll_doc["sentences"][cur_conll_m["sent_id"]][
cur_conll_m["start"] : cur_conll_m["end"]
]
)
if rmpunc.sub("", cur_conll_mention.lower()) == rmpunc.sub(
"", mention.lower()
):
m["conll_m"] = cur_conll_m
cur_conll_m_id += 1
break
else:
cur_conll_m_id += 1
return data
##### Check whether an entity is a person and if the doc contains other references with a more descriptive name for the person
##### (ex. John vs John Snow vs John Snow Stark). Then processes the candidate lists for all of the mentions that fit this description.
def load_person_names(path):
data = []
with open(path, "r", encoding="utf8") as f:
for line in f:
data.append(line.strip().replace(" ", "_"))
return set(data)
def find_coref(ment, mentlist, person_names):
cur_m = ment["mention"].lower()
coref = []
for m in mentlist:
if len(m["candidates"]) == 0 or m["candidates"][0][0] not in person_names:
continue
mention = m["mention"].lower()
start_pos = mention.find(cur_m)
if start_pos == -1 or mention == cur_m:
continue
end_pos = start_pos + len(cur_m) - 1
if (start_pos == 0 or mention[start_pos - 1] == " ") and (
end_pos == len(mention) - 1 or mention[end_pos + 1] == " "
):
coref.append(m)
return coref
def with_coref(dataset, person_names):
for data_name, content in dataset.items():
for cur_m in content:
coref = find_coref(cur_m, content, person_names)
if coref is not None and len(coref) > 0:
cur_cands = {}
for m in coref:
for c, p in m["candidates"]:
cur_cands[c] = cur_cands.get(c, 0) + p
for c in cur_cands.keys():
cur_cands[c] /= len(coref)
cur_m["candidates"] = sorted(
list(cur_cands.items()), key=lambda x: x[1]
)[::-1]
######
def eval(testset, system_pred, nel=False):
gold = []
pred = []
for doc_name, content in testset.items():
gold += [c["gold"][0] for c in content] # the gold named entity
pred += [
c["pred"][0] for c in system_pred[doc_name]
] # the predicted named entity
true_pos = 0
for g, p in zip(gold, pred):
if g == p and p != "NIL":
true_pos += 1
if nel:
NIL_preds = len([p for p in pred if p == "NIL"])
total_discovered_mentions = 0
for doc_name, content in testset.items():
total_discovered_mentions += np.sum(
len(ment) for ment in content[0]["ments_per_sent_flair"]
)
precision = true_pos / (total_discovered_mentions - NIL_preds)
else:
precision = true_pos / len([p for p in pred if p != "NIL"])
recall = true_pos / len(gold)
f1 = 2 * precision * recall / (precision + recall)
return precision, recall, f1
def get_candidate_generator(added_params):
if added_params["candidate_generator_type"] == "p_e_m":
if "p_e_m_data_path" in added_params:
return FetchCandidateEntities(added_params["p_e_m_data_path"])
else:
return FetchCandidateEntities()
else:
pass
class CoNLLDataset:
"""
reading dataset from CoNLL dataset, extracted by https://github.com/dalab/deep-ed/
"""
def __init__(self, path, person_path, conll_path, added_params):
if added_params["generate_ments_and_cands"]:
added_params["generate_cands"] = False
if added_params["generate_cands"] or added_params["generate_ments_and_cands"]:
added_params["cand_generator"] = get_candidate_generator(added_params)
print(added_params)
print("load csv")
self.train = read_csv_file(path + "/aida_train.csv", added_params)
self.testA = read_csv_file(path + "/aida_testA.csv", added_params)
self.testB = read_csv_file(path + "/aida_testB.csv", added_params)
self.ace2004 = read_csv_file(path + "/wned-ace2004.csv", added_params)
self.aquaint = read_csv_file(path + "/wned-aquaint.csv", added_params)
self.clueweb = read_csv_file(path + "/wned-clueweb.csv", added_params)
self.msnbc = read_csv_file(path + "/wned-msnbc.csv", added_params)
self.wikipedia = read_csv_file(path + "/wned-wikipedia.csv", added_params)
self.wikipedia.pop("Jiří_Třanovský Jiří_Třanovský", None)
print("process coref")
person_names = load_person_names(person_path)
with_coref(self.train, person_names)
with_coref(self.testA, person_names)
with_coref(self.testB, person_names)
with_coref(self.ace2004, person_names)
with_coref(self.aquaint, person_names)
with_coref(self.clueweb, person_names)
with_coref(self.msnbc, person_names)
with_coref(self.wikipedia, person_names)
print("load conll")
read_conll_file(self.train, conll_path + "/AIDA/aida_train.txt")
read_conll_file(self.testA, conll_path + "/AIDA/testa_testb_aggregate_original")
read_conll_file(self.testB, conll_path + "/AIDA/testa_testb_aggregate_original")
read_conll_file(
self.ace2004, conll_path + "/wned-datasets/ace2004/ace2004.conll"
)
read_conll_file(
self.aquaint, conll_path + "/wned-datasets/aquaint/aquaint.conll"
)
read_conll_file(self.msnbc, conll_path + "/wned-datasets/msnbc/msnbc.conll")
read_conll_file(
self.clueweb, conll_path + "/wned-datasets/clueweb/clueweb.conll"
)
read_conll_file(
self.wikipedia, conll_path + "/wned-datasets/wikipedia/wikipedia.conll"
)
if added_params["generate_cands"]:
print(
"Number of candidates not present in p_e_m originally, but present when lowercased",
len(added_params["cand_generator"].lower_org),
)
print(
"Number of candidates not present in p_e_m originally, but present in p_e_m_lower when lowercased ",
len(added_params["cand_generator"].lower_lower),
)
class FetchCandidateEntities(object):
"""takes as input a string or a list of words and checks if it is inside p_e_m
if yes it returns the candidate entities otherwise it returns None.
it also checks if string.lower() inside p_e_m and if string.lower() inside p_e_m_low"""
def __init__(self, p_e_m_data_path="data/basic_data/p_e_m_data/"):
print("Reading p_e_m dictionaries")
# return
wall_start = time.time()
self.lower_org = []
self.lower_lower = []
self.p_e_m = pickle.load(
open(os.path.join(p_e_m_data_path, "p_e_m_dict.pickle"), "rb")
)
self.p_e_m_lower = pickle.load(
open(os.path.join(p_e_m_data_path, "p_e_m_lower_dict.pickle"), "rb")
)
self.mention_total_freq = pickle.load(
open(os.path.join(p_e_m_data_path, "mention_total_freq.pickle"), "rb")
)
print("The reading took:", (time.time() - wall_start) / 60, " minutes")
def process(self, span):
"""span can be either a string or a list of words"""
title = span.title()
# 'obama 44th president of united states'.title() # 'Obama 44Th President Of United States'
title_freq = (
self.mention_total_freq[title] if title in self.mention_total_freq else 0
)
span_freq = (
self.mention_total_freq[span] if span in self.mention_total_freq else 0
)
if title_freq == 0 and span_freq == 0:
if span.lower() in self.p_e_m:
self.lower_org.append(span)
return self.p_e_m[span.lower()]
elif span.lower() in self.p_e_m_lower:
self.lower_lower.append(span)
return self.p_e_m_lower[span.lower()]
else:
return []
else:
if span_freq > title_freq:
return self.p_e_m[span]
else:
return self.p_e_m[title]
|
BLINK-main
|
blink/candidate_retrieval/dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
import os
import io
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--output", type=str, help="The full path to the data folder", required=True
)
args = parser.parse_args()
data_folder = args.output
output_file_name = "title2enriched_parsed_obj.p"
output_file_path = os.path.join(data_folder, output_file_name)
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
linktitle2wikidataid_file_name = "linktitle2wikidataid.p"
linktitle2wikidataid_path = os.path.join(data_folder, linktitle2wikidataid_file_name)
linktitle2wikidataid = pickle.load(open(linktitle2wikidataid_path, "rb"))
# Read links data
links_file_name = "en-wikilinks-processed"
links_file_path = os.path.join(data_folder, links_file_name)
links_data = pickle.load(open(links_file_path, "rb"))
print("Links data is loaded")
# Read full text data
full_num_tokens_file_name = "en-wiki-full-text"
full_num_tokens_file_path = os.path.join(data_folder, full_num_tokens_file_name)
full_num_tokens_data = pickle.load(open(full_num_tokens_file_path, "rb"))
print("Full text and number of tokens data is loaded")
# Read linked (wikipedia with wikidata) data
filtered_and_wikidata_file_name = "en-wiki-filtered-wikidata"
filtered_and_wikidata_file_path = os.path.join(
data_folder, filtered_and_wikidata_file_name
)
filtered_and_wikidata_data = pickle.load(open(filtered_and_wikidata_file_path, "rb"))
print("Introduction text, linked with wikidata data is loaded")
# Transform the linked data into a title2parsed_obj dictionary
# Add the number of tokens information
title2parsed_obj = {}
for key in filtered_and_wikidata_data.keys():
wikipedia_id, wikipedia_title = key
filtered_and_wikidata_data[key]["wikipedia_id"] = wikipedia_id
filtered_and_wikidata_data[key]["wikipedia_title"] = wikipedia_title
filtered_and_wikidata_data[key]["num_tokens"] = full_num_tokens_data[key][
"num_tokens"
]
title2parsed_obj[wikipedia_title] = filtered_and_wikidata_data[key]
total = {"xml": 0, "regex": 0}
found = {"xml": 0, "regex": 0}
not_found = {"xml": [], "regex": []}
# Counting using the title
for key in links_data.keys():
wikipedia_id, wikipedia_title = key
if links_data[key]["links_xml"] != None:
links = links_data[key]["links_xml"]
total["xml"] = total["xml"] + len(links)
for link in links:
title = link["href_unquoted"]
if title in title2parsed_obj:
title2parsed_obj[title]["num_incoming_links"] = (
title2parsed_obj[title].get("num_incoming_links", 0) + 1
)
found["xml"] = found["xml"] + 1
else:
not_found["xml"].append(link)
else:
links = links_data[key]["links_regex"]
total["regex"] = total["regex"] + len(links)
for link in links:
title = link["href_unquoted"]
if title in title2parsed_obj:
title2parsed_obj[title]["num_incoming_links"] = (
title2parsed_obj[title].get("num_incoming_links", 0) + 1
)
found["regex"] = found["regex"] + 1
else:
not_found["regex"].append(link)
print(
"Matched {:2f}% using only the title".format(
(found["xml"] + found["regex"]) * 100 / (total["xml"] + total["regex"])
)
)
# Counting using the index
wikidataid2count = {}
for link in not_found["xml"] + not_found["regex"]:
title = link["href_unquoted"]
title = title.replace(" ", "_")
if title in linktitle2wikidataid:
wikidata_id = linktitle2wikidataid[title]
wikidataid2count[wikidata_id] = wikidataid2count.get(wikidata_id, 0) + 1
found["xml"] = found["xml"] + 1
elif title.capitalize() in linktitle2wikidataid:
wikidata_id = linktitle2wikidataid[title.capitalize()]
wikidataid2count[wikidata_id] = wikidataid2count.get(wikidata_id, 0) + 1
found["xml"] = found["xml"] + 1
print(
"Matched {:2f}% by additionally using the title to wikidataid index".format(
(found["xml"] + found["regex"]) * 100 / (total["xml"] + total["regex"])
)
)
# Adding the counts from the index to the original dictionary
updated = 0
wikdiata_info = 0
wikidata_id_from_index = 0
for key in title2parsed_obj:
parsed_obj = title2parsed_obj[key]
wikidata_id = None
if parsed_obj.get("wikidata_info", None) is not None:
wikdiata_info += 1
if parsed_obj["wikidata_info"].get("wikidata_id", None) is not None:
wikidata_id = parsed_obj["wikidata_info"]["wikidata_id"]
else:
if parsed_obj.get("wikidata_id_from_index", None) is not None:
wikidata_id_from_index += 1
wikidata_id = parsed_obj["wikidata_id_from_index"]
if (wikidata_id is not None) and (wikidata_id in wikidataid2count):
parsed_obj["num_incoming_links"] = (
parsed_obj.get("num_incoming_links", 0) + wikidataid2count[wikidata_id]
)
updated += 1
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
# Include unprocessed data and dump it together with the processed data
# (convenient if we want to extent the data that we use)
for wikipedia_title in title2parsed_obj.keys():
wikipedia_id = title2parsed_obj[wikipedia_title]["wikipedia_id"]
key = wikipedia_id, wikipedia_title
title2parsed_obj[wikipedia_title]["links_data"] = {
"links_xml": links_data[key]["links_xml"],
"links_regex": links_data[key]["links_regex"],
}
title2parsed_obj[wikipedia_title]["lines_full_text"] = full_num_tokens_data[key][
"lines"
]
output_file_name = "title2parsed_obj_full_data.p"
output_file_path = os.path.join(data_folder, output_file_name)
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/enrich_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sys
import pickle
import subprocess
import blink.candidate_retrieval.dataset as D
import re
import os
ESCAPE_CHARS_RE = re.compile(r'(?<!\\)(?P<char>[&|+\-!(){}[\]\/^"~*?:])')
def solr_escape(string):
if (string == "OR") or (string == "AND"):
return string.lower()
interior = r"\s+(OR|AND)\s+"
start = r"^(OR|AND) "
end = r" (OR|AND)$"
string = re.sub(interior, lambda x: x.group(0).lower(), string)
string = re.sub(start, lambda x: x.group(0).lower(), string)
string = re.sub(end, lambda x: x.group(0).lower(), string)
return ESCAPE_CHARS_RE.sub(r"\\\g<char>", string)
linktitle2id = None
def get_wikidata_id_from_link_name(link):
global linktitle2id
if linktitle2id is None:
path_to_file = "data/KB_data/linktitle2wikidataid.p"
if os.path.isfile(path_to_file):
linktitle2id = pickle.load(open(path_to_file, "rb"))
else:
subprocess.call(
"./blink/candidate_retrieval/scripts/generate_wiki2wikidata_mapping.sh"
)
linktitle2id = pickle.load(open(path_to_file, "rb"))
return linktitle2id.get(link, None)
def get_datasets(get_test_dataset=False, get_pregenereted_candidates_wikidata_id=False):
train_and_benchmarking_data_dir = "data/train_and_benchmark_data"
datadir = os.path.join(
train_and_benchmarking_data_dir, "generated/test_train_data/"
)
conll_path = os.path.join(
train_and_benchmarking_data_dir, "basic_data/test_datasets/"
)
person_path = os.path.join(
train_and_benchmarking_data_dir, "basic_data/p_e_m_data/persons.txt"
)
p_e_m_path = os.path.join(train_and_benchmarking_data_dir, "basic_data/p_e_m_data/")
added_params = {
"generate_cands": False,
"generate_ments_and_cands": False,
"candidate_generator_type": "p_e_m",
"p_e_m_data_path": p_e_m_path,
}
conll = D.CoNLLDataset(datadir, person_path, conll_path, added_params)
dev_datasets = [
("aida-A", conll.testA),
("aida-B", conll.testB),
("msnbc", conll.msnbc),
("aquaint", conll.aquaint),
("ace2004", conll.ace2004),
("clueweb", conll.clueweb),
("wikipedia", conll.wikipedia),
]
if get_test_dataset:
dev_datasets.append(("aida-train", conll.train))
not_found = []
total = 0
for ds_name, dataset in dev_datasets:
print("Processing dataset:", ds_name)
for doc_name, content in dataset.items():
for m in content:
total += 1
link = m["gold"][0]
wikidata_id = get_wikidata_id_from_link_name(link)
if wikidata_id is None:
not_found.append(m)
m["gold_wikidata_id"] = wikidata_id
if get_pregenereted_candidates_wikidata_id:
cands = []
for candidate in m["candidates"]:
link, prob = candidate
wikidata_id = get_wikidata_id_from_link_name(link)
cands.append((wikidata_id, link, prob))
m["candidates_wikidata_ids"] = cands
print("Number of entities:", total)
print(
"Wikidata ID not found for:",
len(not_found),
"({:.3f} %)".format(len(not_found) * 1.0 / total),
)
return dev_datasets
def get_sent_context(mention, key, solr_escaped=True):
if not solr_escaped:
mention_data_key = "sent_context_orig"
else:
mention_data_key = "sent_context"
if key.endswith("next"):
if key.endswith("prev_next"):
res = "{} {} {}".format(
""
if mention[mention_data_key][0] is None
else mention[mention_data_key][0],
mention[mention_data_key][1],
""
if mention[mention_data_key][2] is None
else mention[mention_data_key][2],
)
else:
res = "{} {}".format(
mention[mention_data_key][1],
""
if mention[mention_data_key][2] is None
else mention[mention_data_key][2],
)
elif key.endswith("prev"):
res = "{} {}".format(
""
if mention[mention_data_key][0] is None
else mention[mention_data_key][0],
mention[mention_data_key][1],
)
else:
res = mention[mention_data_key][1]
return res.strip()
def get_list_of_mentions(dev_datasets):
mentions = []
total_invalid = 0
total_valid = 0
for ds_name, dataset in dev_datasets:
invalid = 0
valid = 0
print("Processing dataset:", ds_name)
for doc_name, content in dataset.items():
sentences = content[0]["conll_doc"]["sentences"]
for m in content:
gold_wikidata_id = m["gold_wikidata_id"]
left_context, right_context = m["context"]
m["mention_orig"] = m["mention"]
m["mention"] = solr_escape(m["mention"])
if left_context != "EMPTYCTXT":
left_context_orig = left_context
left_context = solr_escape(left_context)
else:
left_context = ""
if right_context != "EMPTYCTXT":
right_context_orig = right_context
right_context = solr_escape(right_context)
else:
right_context = ""
m["left_context_orig"] = left_context_orig
m["right_context_orig"] = right_context_orig
m["query_context"] = "{} {} {}".format(
left_context, m["mention"], right_context
).strip()
m["query_context_orig"] = "{} {} {}".format(
left_context_orig, m["mention_orig"], right_context_orig
).strip()
truncated_left_context = " ".join(left_context.split(" ")[-25:])
truncated_right_context = " ".join(right_context.split(" ")[:25])
m["query_truncated_25_context"] = "{} {} {}".format(
truncated_left_context, m["mention"], truncated_right_context
).strip()
truncated_left_context = " ".join(left_context.split(" ")[-10:])
truncated_right_context = " ".join(right_context.split(" ")[:10])
m["query_truncated_10_context"] = "{} {} {}".format(
truncated_left_context, m["mention"], truncated_right_context
).strip()
m["dataset_name"] = ds_name
m["doc_name"] = doc_name
sent_id, start, end = (
m["conll_m"]["sent_id"],
m["conll_m"]["start"],
m["conll_m"]["end"],
)
prev_sent_id = sent_id - 1
next_sent_id = sent_id + 1
sent_orig = " ".join(sentences[sent_id]).strip()
m["left_query_sent_context_orig"] = " ".join(sentences[sent_id][:start])
m["right_query_sent_context_orig"] = " ".join(sentences[sent_id][end:])
sent = solr_escape(sent_orig)
# try:
# context_parts_lower = '{} {} {}'.format(m['left_query_sent_context_orig'], m['mention_orig'], m['right_query_sent_context_orig']).strip().lower()
# context_orig_lower = sent_orig.lower()
# assert(context_parts_lower == context_orig_lower)
# except:
# print(context_parts_lower)
# print(context_orig_lower)
# input("")
if prev_sent_id > 0:
prev_sent_orig = " ".join(sentences[prev_sent_id])
prev_sent = solr_escape(prev_sent_orig)
else:
prev_sent_orig = None
prev_sent = None
if next_sent_id < len(sentences):
next_sent_orig = " ".join(sentences[next_sent_id])
next_sent = solr_escape(next_sent_orig)
else:
next_sent_orig = None
next_sent = None
m["sent_context"] = (prev_sent, sent, next_sent)
m["sent_context_orig"] = (prev_sent_orig, sent_orig, next_sent_orig)
# m['sent_context_prev'] = get_sent_context(m, 'sent_context_prev')
# m['sent_context_next'] = get_sent_context(m, 'sent_context_next')
# m['sent_context_prev_next'] = get_sent_context(m, 'sent_context_prev_next')
# m['sent_context_curr'] = get_sent_context(m, 'sent_context_curr')
if gold_wikidata_id is None:
invalid += 1
continue
mentions.append(m)
valid += 1
print("Invalid: ", invalid)
print("Valid: ", valid)
total_invalid += invalid
total_valid += valid
return mentions
def write_candidate_generation_results_for_a_run_to_file(run, results_dump_file_path):
txt_file_path = "{}.txt".format(results_dump_file_path)
with open(txt_file_path, "a+") as file:
id_ = "Q: `{}` === K: `{}` === ID: `{}`".format(
run[0]["query"], run[0]["keys"], run[0]["dump_file_id"]
)
res = " --- ".join(
["{} - {:.2f}".format(key, run[1][key]) for key in sorted(run[1].keys())]
)
file.write("{} === {}\n".format(res, id_))
def write_candidate_generation_execution_time_to_file(
results_dump_file_path, execution_time
):
txt_file_path = "{}.txt".format(results_dump_file_path)
with open(txt_file_path, "a+") as file:
file.write("The execution took: {} minutes".format(execution_time))
def write_candidate_generation_results_to_file(
runs, results_dump_file_path, execution_time=None
):
runs.sort(key=lambda run: -run[1]["overall"])
for run in runs:
write_candidate_generation_results_for_a_run_to_file(
run, results_dump_file_path
)
if execution_time is not None:
write_candidate_generation_execution_time_to_file(
results_dump_file_path, execution_time
)
|
BLINK-main
|
blink/candidate_retrieval/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import pysolr
import sys
import utils
def mention_data_summary(mention):
return (mention["mention"], mention["query_truncated_25_context"])
class Simple_Candidate_Generator:
def __init__(self, params):
self.collection_name = params["collection_name"]
self.solr_address = params["solr_address"]
self.solr = pysolr.Solr(
"{}/solr/{}".format(self.solr_address, self.collection_name),
always_commit=True,
timeout=100,
)
self.rows = params["rows"]
self.query_data = params["query_data"]
self.c = 0
self.query_arguments = {
"fl": "* score",
"rows": self.rows,
"defType": "edismax",
}
if params["boosting"] is not None:
self.query_arguments["bf"] = params["boosting"]
def _filter_result(self, cand):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
res["num_incoming_links"] = cand.get("num_incoming_links", 0)
res["score"] = cand["score"]
return res
def get_candidates(
self,
mention_data,
verbose=False,
print_number_of_docs_retrieved=False,
print_query_flag=False,
):
solr = self.solr
query_data = self.query_data
# Build query
keys = query_data["keys"]
query = query_data["string"]
query = query.format(
*[
mention_data[key]
if key in mention_data
else utils.get_sent_context(mention_data, key)
for key in keys
]
)
if print_query_flag:
print("Query: {}".format(query))
try:
results = solr.search(query, **self.query_arguments)
except Exception as e:
exc_type, exc_obj, exc_tb = sys.exc_info()
print("\nException:", exc_type, "- line", exc_tb.tb_lineno)
print(repr(e))
c = self.c
c += 1
if c < 10:
print(
"Exception with: \ncollection_name: {} \nquery: {} \nmention_data: {} \ndataset_name: {}\nquery_args: {}\n".format(
self.collection_name,
query,
mention_data_summary(mention_data),
mention_data["dataset_name"],
str(self.query_arguments),
)
)
return []
if print_number_of_docs_retrieved:
print("Retrieved {0} result(s).".format(len(results)))
# Return the full retrieved objects (debuging purposes)
if verbose:
return results
# Filter the data in the retrieved objects, while ignoring the ones without a wikidata_id (only a very small fraction in the dataset; they are noise)
filtered_results = [
self._filter_result(cand) for cand in results.docs if "wikidata_id" in cand
]
return filtered_results
class Pregenerated_Candidates_Data_Fetcher:
def __init__(self, parameters):
solr_address = "http://localhost:8983/solr/{}".format(
parameters["collection_name"]
)
query_arguments = {"fl": "* score", "rows": 1, "defType": "edismax"}
query_arguments["bf"] = "log(sum(num_incoming_links,1))"
self.solr = pysolr.Solr(solr_address, always_commit=True, timeout=100)
self.query_arguments = query_arguments
def get_candidates_data(self, candidates_wikidata_ids):
candidates_rich = []
for candidate in candidates_wikidata_ids:
candidate_data = self.get_candidate_data_for_wikidata_id(candidate[0])
if candidate_data != None:
candidate_data["p_e_m_score"] = candidate[2]
candidates_rich.append(candidate_data)
return candidates_rich
@staticmethod
def filter_result(cand, detailed=True):
wikidata_id = cand.get("wikidata_id", None)
res = {
"wikidata_id": wikidata_id,
"wikipedia_id": cand["id"],
"wikipedia_title": cand["title"],
}
if detailed:
res["aliases"] = cand.get("aliases", None)
sents = []
for k in range(0, 10):
key = "sent_desc_{}".format(k + 1)
sents.append(cand.get(key, ""))
res["sentences"] = sents
return res
def get_candidate_data_for_wikidata_id(self, wikidata_id):
results = self.solr.search(
"wikidata_id:{}".format(wikidata_id), **self.query_arguments
)
if len(results) == 0:
return None
filtered_results = [
Pregenerated_Candidates_Data_Fetcher.filter_result(cand)
for cand in results.docs
if "wikidata_id" in cand
]
return filtered_results[0]
|
BLINK-main
|
blink/candidate_retrieval/candidate_generators.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import xml.etree.ElementTree as ET
import io
import re
import argparse
import os
import pickle
import sys
parser = argparse.ArgumentParser()
parser.add_argument(
"--input", type=str, help="The full path to the file to process", required=True
)
parser.add_argument(
"--output", type=str, help="The full path to the output file", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
xml_end_tag = "</doc>"
entities_with_duplicate_titles = set()
title2id = {}
id_title2parsed_obj = {}
num_lines = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
num_lines += 1
c = 0
with io.open(input_file_path, mode="rt", encoding="utf-8", errors="ignore") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
if line.startswith("<doc id="):
doc_xml = ET.fromstring("{}{}".format(line, xml_end_tag))
doc_attr = doc_xml.attrib
lines = []
continue
if line.startswith("</doc>"):
temp_obj = {}
temp_obj["url"] = doc_attr["url"]
temp_obj["lines"] = lines
text = " ".join([l for l in lines if l != ""])
temp_obj["num_tokens"] = len(text.split(" "))
id_, title = doc_attr["id"], doc_attr["title"]
key = (id_, title)
id_title2parsed_obj[key] = temp_obj
# check for duplicate titles
# if title in title2id:
# entities_with_duplicate_titles.add(id_)
# entities_with_duplicate_titles.add(title2id[title])
# print("DUPLICATE TITLE:", id_, title2id[title])
# else:
# title2id[title] = id_
continue
# if it is not a document start or end tag, add it to lines
lines.append(line.strip())
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wiki_extractor_output_full.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import bz2
import sys
import pickle
import os
import json
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input",
type=str,
help="The full path to the wikidata dump for processing",
required=True,
)
parser.add_argument(
"--output", type=str, help="The full path to the output folder", required=True
)
args = parser.parse_args()
input_file_path = args.input
output_file_path = args.output
if not os.path.isfile(input_file_path):
print("Input file `{}` doesn't exist!".format(output_file_path))
sys.exit()
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
id_title2parsed_obj = {}
num_lines = 0
with bz2.open(input_file_path, "rt") as f:
for line in f:
num_lines += 1
c = 0
with bz2.open(input_file_path, "rt") as f:
for line in f:
c += 1
if c % 1000000 == 0:
print("Processed: {:.2f}%".format(c * 100 / num_lines))
try:
json_obj = json.loads(line.strip().strip(","))
if ("sitelinks" not in json_obj) or ("enwiki" not in json_obj["sitelinks"]):
continue
id_, title = json_obj["id"], json_obj["sitelinks"]["enwiki"]["title"]
key = id_, title
parsed_obj = {}
if "en" in json_obj["aliases"]:
parsed_obj["aliases"] = [
alias["value"] for alias in json_obj["aliases"]["en"]
]
else:
parsed_obj["aliases"] = None
if "en" in json_obj["labels"]:
parsed_obj["wikidata_label"] = json_obj["labels"]["en"]["value"]
else:
parsed_obj["wikidata_label"] = None
if "en" in json_obj["descriptions"]:
parsed_obj["description"] = json_obj["descriptions"]["en"]["value"]
else:
parsed_obj["description"] = None
if "enwikiquote" in json_obj["sitelinks"]:
parsed_obj["enwikiquote_title"] = json_obj["sitelinks"]["enwikiquote"][
"title"
]
id_title2parsed_obj[key] = parsed_obj
except Exception as e:
line = line.strip().strip(",")
if line == "[" or line == "]":
continue
exc_type, exc_obj, exc_tb = sys.exc_info()
print("Exception:", exc_type, "- line", exc_tb.tb_lineno)
if len(line) < 30:
print("Failed line:", line)
print("Processed: {:.2f}%".format(c * 100 / num_lines))
print("Dumping", output_file_path)
pickle.dump(id_title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_wikidata.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import pickle
import nltk.data
import argparse
import sys
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(
"--output", type=str, help="The full path to the data folder", required=True
)
args = parser.parse_args()
data_folder = args.output
output_file_name = "title2enriched_parsed_obj_plus.p"
output_file_path = os.path.join(data_folder, output_file_name)
if os.path.isfile(output_file_path):
print("Output file `{}` already exists!".format(output_file_path))
sys.exit()
print("Reading title2parsed_obj data")
title2enriched_parsed_obj_file_name = "title2enriched_parsed_obj.p"
title2enriched_parsed_obj_path = os.path.join(
data_folder, title2enriched_parsed_obj_file_name
)
title2parsed_obj = pickle.load(open(title2enriched_parsed_obj_path, "rb"))
print("Reading title2parsed_obj_full_data")
title2parsed_obj_full_data_file_name = "title2parsed_obj_full_data.p"
title2parsed_obj_full_data_full_path = os.path.join(
data_folder, title2parsed_obj_full_data_file_name
)
title2parsed_obj_full = pickle.load(open(title2parsed_obj_full_data_full_path, "rb"))
sent_detector = nltk.data.load("tokenizers/punkt/english.pickle")
for title in tqdm(title2parsed_obj_full.keys()):
lines = title2parsed_obj_full[title]["lines_full_text"][1:] # remove title
lines = [
line for line in lines if not line.startswith("Section::")
] # remove section titles
lines = [
line.strip() for line in lines if line != ""
] # remove blank lines and trailing spaces
text = " ".join(lines)
sentences = sent_detector.tokenize(text)
sentences = [sent.strip() for sent in sentences]
for k in range(0, min(10, len(sentences))):
key = "sent_desc_{}".format(k + 1)
value = sentences[k]
title2parsed_obj[title][key] = value
print("Dumping", output_file_path)
pickle.dump(title2parsed_obj, open(output_file_path, "wb"), protocol=4)
|
BLINK-main
|
blink/candidate_retrieval/process_intro_sents.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import sqlite3
import pickle
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--input_file",
type=str,
help="The full path to the precomputed index",
required=True,
)
parser.add_argument(
"--output_folder",
type=str,
help="The full path to the output folder",
required=True,
)
args = parser.parse_args()
precomp_index_path = args.input_file
output_folder_path = args.output_folder
output_file = os.path.join(output_folder_path, "linktitle2wikidataid.p")
if not os.path.isfile(output_file):
conn = sqlite3.connect(precomp_index_path)
cursorObj = conn.cursor()
cursorObj.execute("SELECT wikipedia_title, wikidata_id FROM mapping")
data = cursorObj.fetchall()
linktitle2wikidataid = {item[0]: item[1] for item in data}
pickle.dump(linktitle2wikidataid, open(output_file, "wb"))
else:
print("Output file `{}` already exists!".format(output_file))
output_file = os.path.join(output_folder_path, "wikipediaid2wikidataid.p")
if not os.path.isfile(output_file):
conn = sqlite3.connect(precomp_index_path)
cursorObj = conn.cursor()
cursorObj.execute("SELECT wikipedia_id, wikidata_id FROM mapping")
data = cursorObj.fetchall()
wikipediaid2wikidataid = {item[0]: item[1] for item in data}
pickle.dump(wikipediaid2wikidataid, open(output_file, "wb"))
else:
print("Output file `{}` already exists!".format(output_file))
|
BLINK-main
|
blink/candidate_retrieval/generate_wiki2wikidata_mappings.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
class Evaluator:
def __init__(self, data):
self.data = data
def candidate_generation(
self, max_rank=None, save_gold_pos=False, save_pregenerated_gold_pos=False
):
has_gold_per_dataset = {}
total_per_dataset = {}
recall = {}
processed_mentions = self.data
if max_rank is None:
print("Max rank: None")
else:
print("Max rank", max_rank)
for mention in processed_mentions:
dataset_name = mention["dataset_name"]
gold_wikidata_id = mention["gold_wikidata_id"]
gold_pos = -1
for idx, cand in enumerate(mention["generated_candidates"]):
cand_wikidata_id = cand["wikidata_id"]
if gold_wikidata_id == cand_wikidata_id:
gold_pos = idx + 1 # Because idx starts at 0
break
if save_gold_pos:
mention["gold_pos"] = gold_pos
if gold_pos > 0 and ((max_rank is None) or gold_pos <= max_rank):
has_gold = has_gold_per_dataset.get(dataset_name, 0) + 1
has_gold_per_dataset[dataset_name] = has_gold
if save_pregenerated_gold_pos:
pre_gen_gold_pos = -1
for idx, cand in enumerate(mention["candidates_data"]):
cand_wikidata_id = cand["wikidata_id"]
if gold_wikidata_id == cand_wikidata_id:
pre_gen_gold_pos = idx + 1 # Because idx starts at 0
break
mention["pre_gen_candidates_gold_pos"] = pre_gen_gold_pos
total = total_per_dataset.get(dataset_name, 0) + 1
total_per_dataset[dataset_name] = total
total = 0
has_gold = 0
for dataset_name in total_per_dataset:
has_gold_ds = has_gold_per_dataset.get(dataset_name, 0)
total_ds = total_per_dataset[dataset_name]
has_gold += has_gold_ds
total += total_ds
recall[dataset_name] = has_gold_ds / total_ds
print("Dataset:", dataset_name)
print(
"Recall (w.r.t candidate generation): {:.3f}".format(
recall[dataset_name]
)
)
recall["overall"] = has_gold / total
print(
"Overal recall (w.r.t candidate generation): {:.3f}".format(
recall["overall"]
)
)
self.has_gold_per_dataset = has_gold_per_dataset
self.total_per_dataset = total_per_dataset
self.total = total
self.has_gold = has_gold
self.recall = recall
def candidate_generation_recall_at(self, ax=None, max_rank=None):
processed_mentions = self.data
total_num_of_docs = len(processed_mentions)
gold_positions = np.array(
[
mention["gold_pos"]
for mention in processed_mentions
if mention["gold_pos"] >= 0
]
)
if ax == None:
fig = plt.figure(figsize=(7, 7))
ax = plt.subplot(111)
ax.set_ylabel(str("Recall"))
ax.set_xlabel(str("True entity rank"))
rank_count_pairs = sorted(Counter(gold_positions).items(), key=lambda x: x[0])
# rank_count_pairs = rank_count_pairs[:k]
counts = [i[1] for i in rank_count_pairs]
recall = np.cumsum(counts) / total_num_of_docs * 100
rankings = [i[0] for i in rank_count_pairs]
if max_rank is not None:
for idx, rank in enumerate(rankings):
if rank > max_rank:
rankings = rankings[:idx]
recall = recall[:idx]
break
ax.plot(rankings, recall)
|
BLINK-main
|
blink/candidate_retrieval/evaluator.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import pickle
import json
import emoji
import sys
import os
import io
import blink.candidate_retrieval.utils as utils
from tqdm import tqdm
parser = argparse.ArgumentParser()
parser.add_argument(
"--processed_mention_data_file_path",
type=str,
help="The full path to the mention data file",
default="data/mention_dumps/train_and_eval_data",
)
parser.add_argument(
"--dump_folder_path",
type=str,
help="The path to the dump folder",
default="data/train_and_benchmark_processed_json",
)
# Keep pregenerated candidates
parser.add_argument(
"--keep_pregenerated_candidates",
action="store_true",
help="Whether to keep the candidates given with the dataset.",
)
args = parser.parse_args()
print(args)
dump_folder = args.dump_folder_path
path_to_processed_mention_data = args.processed_mention_data_file_path
os.makedirs(dump_folder, exist_ok=True)
print("Reading data")
run_dump = pickle.load(open(path_to_processed_mention_data, "rb"))
mentions = run_dump["mentions"]
dataset2processed_mentions = {}
for m in tqdm(mentions):
mention_obj = {}
mention_obj["candidates"] = m["generated_candidates"]
# Gold data
mention_obj["gold_pos"] = m["gold_pos"]
mention_obj["gold"] = m["gold"]
# Mention data
mention_obj["text"] = m["mention_orig"]
# mention_obj['query_context_50'] = m['query_context_orig']
# mention_obj['query_context_sent_prev_curr_next'] = utils.get_sent_context(m, 'prev_next', solr_escaped=False)
# mention_obj['tagged_context_50'] = (m['left_context_orig'], m['right_context_orig'])
prev_sent = m["sent_context_orig"][0] if m["sent_context_orig"][0] != None else ""
next_sent = m["sent_context_orig"][2] if m["sent_context_orig"][2] != None else ""
mention_obj["tagged_query_context_sent_prev_curr_next"] = (
"{} {}".format(prev_sent, m["left_query_sent_context_orig"]).strip(),
"{} {}".format(m["right_query_sent_context_orig"], next_sent).strip(),
)
mention_obj["tagged_query_context_sent_curr"] = (
m["left_query_sent_context_orig"].strip(),
m["right_query_sent_context_orig"].strip(),
)
# Keep the candidates given with the dataset (used for the purposes of comparison with baseline)
if args.keep_pregenerated_candidates:
mention_obj["pregenerated_candidates"] = m["candidates_data"]
mention_obj["pregenerated_gold_pos"] = m["pre_gen_candidates_gold_pos"]
# Add data to output dics
dataset_name = m["dataset_name"]
processed_mentions = dataset2processed_mentions.get(dataset_name, [])
processed_mentions.append(mention_obj)
dataset2processed_mentions[dataset_name] = processed_mentions
for dataset_name in dataset2processed_mentions:
print("Dumping dataset:", dataset_name)
processed_mentions = dataset2processed_mentions[dataset_name]
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(dump_folder, file_name)
# with open(txt_file_path, "w+") as file:
with io.open(txt_file_path, mode="w", encoding="utf-8") as file:
for idx, mention in enumerate(processed_mentions):
json_string = json.dumps(mention)
file.write(json_string)
if idx != (len(processed_mentions) - 1):
file.write("\n")
|
BLINK-main
|
blink/candidate_retrieval/json_data_generation.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from collections import OrderedDict
from tqdm import tqdm
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.modeling_roberta import (
RobertaConfig,
RobertaModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.tokenization_roberta import RobertaTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def load_crossencoder(params):
# Init model
crossencoder = CrossEncoderRanker(params)
return crossencoder
class CrossEncoderModule(torch.nn.Module):
def __init__(self, params, tokenizer):
super(CrossEncoderModule, self).__init__()
model_path = params["bert_model"]
if params.get("roberta"):
encoder_model = RobertaModel.from_pretrained(model_path)
else:
encoder_model = BertModel.from_pretrained(model_path)
encoder_model.resize_token_embeddings(len(tokenizer))
self.encoder = BertEncoder(
encoder_model,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = self.encoder.bert_model.config
def forward(
self, token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
):
embedding_ctxt = self.encoder(token_idx_ctxt, segment_idx_ctxt, mask_ctxt)
return embedding_ctxt.squeeze(-1)
class CrossEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(CrossEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
if params.get("roberta"):
self.tokenizer = RobertaTokenizer.from_pretrained(params["bert_model"],)
else:
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
special_tokens_dict = {
"additional_special_tokens": [
ENT_START_TAG,
ENT_END_TAG,
ENT_TITLE_TAG,
],
}
self.tokenizer.add_special_tokens(special_tokens_dict)
self.NULL_IDX = self.tokenizer.pad_token_id
self.START_TOKEN = self.tokenizer.cls_token
self.END_TOKEN = self.tokenizer.sep_token
# init model
self.build_model()
if params["path_to_model"] is not None:
self.load_model(params["path_to_model"])
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def save(self, output_dir):
self.save_model(output_dir)
self.tokenizer.save_vocabulary(output_dir)
def build_model(self):
self.model = CrossEncoderModule(self.params, self.tokenizer)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def score_candidate(self, text_vecs, context_len):
# Encode contexts first
num_cand = text_vecs.size(1)
text_vecs = text_vecs.view(-1, text_vecs.size(-1))
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX, context_len,
)
embedding_ctxt = self.model(token_idx_ctxt, segment_idx_ctxt, mask_ctxt,)
return embedding_ctxt.view(-1, num_cand)
def forward(self, input_idx, label_input, context_len):
scores = self.score_candidate(input_idx, context_len)
loss = F.cross_entropy(scores, label_input, reduction="mean")
return loss, scores
def to_bert_input(token_idx, null_idx, segment_pos):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
if segment_pos > 0:
segment_idx[:, segment_pos:] = token_idx[:, segment_pos:] > 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
# token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
blink/crossencoder/crossencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import sys
import numpy as np
from tqdm import tqdm
import blink.biencoder.data_process as data
from blink.common.params import ENT_START_TAG, ENT_END_TAG
def prepare_crossencoder_mentions(
tokenizer,
samples,
max_context_length=32,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
context_input_list = [] # samples X 128
for sample in tqdm(samples):
context_tokens = data.get_context_representation(
sample,
tokenizer,
max_context_length,
mention_key,
context_key,
ent_start_token,
ent_end_token,
)
tokens_ids = context_tokens["ids"]
context_input_list.append(tokens_ids)
context_input_list = np.asarray(context_input_list)
return context_input_list
def prepare_crossencoder_candidates(
tokenizer, labels, nns, id2title, id2text, max_cand_length=128, topk=100
):
START_TOKEN = tokenizer.cls_token
END_TOKEN = tokenizer.sep_token
candidate_input_list = [] # samples X topk=10 X 128
label_input_list = [] # samples
idx = 0
for label, nn in zip(labels, nns):
candidates = []
label_id = -1
for jdx, candidate_id in enumerate(nn[:topk]):
if label == candidate_id:
label_id = jdx
rep = data.get_candidate_representation(
id2text[candidate_id],
tokenizer,
max_cand_length,
id2title[candidate_id],
)
tokens_ids = rep["ids"]
assert len(tokens_ids) == max_cand_length
candidates.append(tokens_ids)
label_input_list.append(label_id)
candidate_input_list.append(candidates)
idx += 1
sys.stdout.write("{}/{} \r".format(idx, len(labels)))
sys.stdout.flush()
label_input_list = np.asarray(label_input_list)
candidate_input_list = np.asarray(candidate_input_list)
return label_input_list, candidate_input_list
def filter_crossencoder_tensor_input(
context_input_list, label_input_list, candidate_input_list
):
# remove the - 1 : examples for which gold is not among the candidates
context_input_list_filtered = [
x
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
label_input_list_filtered = [
z
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
candidate_input_list_filtered = [
y
for x, y, z in zip(context_input_list, candidate_input_list, label_input_list)
if z != -1
]
return (
context_input_list_filtered,
label_input_list_filtered,
candidate_input_list_filtered,
)
def prepare_crossencoder_data(
tokenizer, samples, labels, nns, id2title, id2text, keep_all=False
):
# encode mentions
context_input_list = prepare_crossencoder_mentions(tokenizer, samples)
# encode candidates (output of biencoder)
label_input_list, candidate_input_list = prepare_crossencoder_candidates(
tokenizer, labels, nns, id2title, id2text
)
if not keep_all:
# remove examples where the gold entity is not among the candidates
(
context_input_list,
label_input_list,
candidate_input_list,
) = filter_crossencoder_tensor_input(
context_input_list, label_input_list, candidate_input_list
)
else:
label_input_list = [0] * len(label_input_list)
context_input = torch.LongTensor(context_input_list)
label_input = torch.LongTensor(label_input_list)
candidate_input = torch.LongTensor(candidate_input_list)
return (
context_input,
candidate_input,
label_input,
)
|
BLINK-main
|
blink/crossencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import time
import numpy as np
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
import blink.candidate_retrieval.utils
from blink.crossencoder.crossencoder import CrossEncoderRanker, load_crossencoder
import logging
import blink.candidate_ranking.utils as utils
import blink.biencoder.data_process as data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import BlinkParser
logger = None
def modify(context_input, candidate_input, max_seq_length):
new_input = []
context_input = context_input.tolist()
candidate_input = candidate_input.tolist()
for i in range(len(context_input)):
cur_input = context_input[i]
cur_candidate = candidate_input[i]
mod_input = []
for j in range(len(cur_candidate)):
# remove [CLS] token from candidate
sample = cur_input + cur_candidate[j][1:]
sample = sample[:max_seq_length]
mod_input.append(sample)
new_input.append(mod_input)
return torch.LongTensor(new_input)
def evaluate(reranker, eval_dataloader, device, logger, context_length, zeshel=False, silent=True):
reranker.model.eval()
if silent:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_accuracy = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
acc = {}
tot = {}
world_size = len(WORLDS)
for i in range(world_size):
acc[i] = 0.0
tot[i] = 0.0
all_logits = []
cnt = 0
for step, batch in enumerate(iter_):
if zeshel:
src = batch[2]
cnt += 1
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
label_input = batch[1]
with torch.no_grad():
eval_loss, logits = reranker(context_input, label_input, context_length)
logits = logits.detach().cpu().numpy()
label_ids = label_input.cpu().numpy()
tmp_eval_accuracy, eval_result = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
all_logits.extend(logits)
nb_eval_examples += context_input.size(0)
if zeshel:
for i in range(context_input.size(0)):
src_w = src[i].item()
acc[src_w] += eval_result[i]
tot[src_w] += 1
nb_eval_steps += 1
normalized_eval_accuracy = -1
if nb_eval_examples > 0:
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
if zeshel:
macro = 0.0
num = 0.0
for i in range(len(WORLDS)):
if acc[i] > 0:
acc[i] /= tot[i]
macro += acc[i]
num += 1
if num > 0:
logger.info("Macro accuracy: %.5f" % (macro / num))
logger.info("Micro accuracy: %.5f" % normalized_eval_accuracy)
else:
if logger:
logger.info("Eval accuracy: %.5f" % normalized_eval_accuracy)
results["normalized_accuracy"] = normalized_eval_accuracy
results["logits"] = all_logits
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = CrossEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
# utils.save_model(model, tokenizer, model_output_path)
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
max_seq_length = params["max_seq_length"]
context_length = params["max_context_length"]
fname = os.path.join(params["data_path"], "train.t7")
train_data = torch.load(fname)
context_input = train_data["context_vecs"]
candidate_input = train_data["candidate_vecs"]
label_input = train_data["labels"]
if params["debug"]:
max_n = 200
context_input = context_input[:max_n]
candidate_input = candidate_input[:max_n]
label_input = label_input[:max_n]
context_input = modify(context_input, candidate_input, max_seq_length)
if params["zeshel"]:
src_input = train_data['worlds'][:len(context_input)]
train_tensor_data = TensorDataset(context_input, label_input, src_input)
else:
train_tensor_data = TensorDataset(context_input, label_input)
train_sampler = RandomSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data,
sampler=train_sampler,
batch_size=params["train_batch_size"]
)
fname = os.path.join(params["data_path"], "valid.t7")
valid_data = torch.load(fname)
context_input = valid_data["context_vecs"]
candidate_input = valid_data["candidate_vecs"]
label_input = valid_data["labels"]
if params["debug"]:
max_n = 200
context_input = context_input[:max_n]
candidate_input = candidate_input[:max_n]
label_input = label_input[:max_n]
context_input = modify(context_input, candidate_input, max_seq_length)
if params["zeshel"]:
src_input = valid_data["worlds"][:len(context_input)]
valid_tensor_data = TensorDataset(context_input, label_input, src_input)
else:
valid_tensor_data = TensorDataset(context_input, label_input)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data,
sampler=valid_sampler,
batch_size=params["eval_batch_size"]
)
# evaluate before training
results = evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(params, optimizer, len(train_tensor_data), logger)
model.train()
best_epoch_idx = -1
best_score = -1
num_train_epochs = params["num_train_epochs"]
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
part = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
label_input = batch[1]
loss, _ = reranker(context_input, label_input, context_length)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}_{}".format(epoch_idx, part)
)
part += 1
utils.save_model(model, tokenizer, epoch_output_folder_path)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
# reranker.save(epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate(
reranker,
valid_dataloader,
device=device,
logger=logger,
context_length=context_length,
zeshel=params["zeshel"],
silent=params["silent"],
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_training_args()
parser.add_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
blink/crossencoder/train_cross.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import logging
import os
import torch
from tqdm import tqdm
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.biencoder import BiEncoderRanker
import blink.biencoder.data_process as data
import blink.biencoder.nn_prediction as nnquery
import blink.candidate_ranking.utils as utils
from blink.biencoder.zeshel_utils import WORLDS, load_entity_dict_zeshel, Stats
from blink.common.params import BlinkParser
def load_entity_dict(logger, params, is_zeshel):
if is_zeshel:
return load_entity_dict_zeshel(logger, params)
path = params.get("entity_dict_path", None)
assert path is not None, "Error! entity_dict_path is empty."
entity_list = []
logger.info("Loading entity description from path: " + path)
with open(path, 'rt') as f:
for line in f:
sample = json.loads(line.rstrip())
title = sample['title']
text = sample.get("text", "").strip()
entity_list.append((title, text))
if params["debug"] and len(entity_list) > 200:
break
return entity_list
# zeshel version of get candidate_pool_tensor
def get_candidate_pool_tensor_zeshel(
entity_dict,
tokenizer,
max_seq_length,
logger,
):
candidate_pool = {}
for src in range(len(WORLDS)):
if entity_dict.get(src, None) is None:
continue
logger.info("Get candidate desc to id for pool %s" % WORLDS[src])
candidate_pool[src] = get_candidate_pool_tensor(
entity_dict[src],
tokenizer,
max_seq_length,
logger,
)
return candidate_pool
def get_candidate_pool_tensor_helper(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
is_zeshel,
):
if is_zeshel:
return get_candidate_pool_tensor_zeshel(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
)
else:
return get_candidate_pool_tensor(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
)
def get_candidate_pool_tensor(
entity_desc_list,
tokenizer,
max_seq_length,
logger,
):
# TODO: add multiple thread process
logger.info("Convert candidate text to id")
cand_pool = []
for entity_desc in tqdm(entity_desc_list):
if type(entity_desc) is tuple:
title, entity_text = entity_desc
else:
title = None
entity_text = entity_desc
rep = data.get_candidate_representation(
entity_text,
tokenizer,
max_seq_length,
title,
)
cand_pool.append(rep["ids"])
cand_pool = torch.LongTensor(cand_pool)
return cand_pool
def encode_candidate(
reranker,
candidate_pool,
encode_batch_size,
silent,
logger,
is_zeshel,
):
if is_zeshel:
src = 0
cand_encode_dict = {}
for src, cand_pool in candidate_pool.items():
logger.info("Encoding candidate pool %s" % WORLDS[src])
cand_pool_encode = encode_candidate(
reranker,
cand_pool,
encode_batch_size,
silent,
logger,
is_zeshel=False,
)
cand_encode_dict[src] = cand_pool_encode
return cand_encode_dict
reranker.model.eval()
device = reranker.device
sampler = SequentialSampler(candidate_pool)
data_loader = DataLoader(
candidate_pool, sampler=sampler, batch_size=encode_batch_size
)
if silent:
iter_ = data_loader
else:
iter_ = tqdm(data_loader)
cand_encode_list = None
for step, batch in enumerate(iter_):
cands = batch
cands = cands.to(device)
cand_encode = reranker.encode_candidate(cands)
if cand_encode_list is None:
cand_encode_list = cand_encode
else:
cand_encode_list = torch.cat((cand_encode_list, cand_encode))
return cand_encode_list
def load_or_generate_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
):
candidate_pool = None
is_zeshel = params.get("zeshel", None)
if cand_pool_path is not None:
# try to load candidate pool from file
try:
logger.info("Loading pre-generated candidate pool from: ")
logger.info(cand_pool_path)
candidate_pool = torch.load(cand_pool_path)
except:
logger.info("Loading failed. Generating candidate pool")
if candidate_pool is None:
# compute candidate pool from entity list
entity_desc_list = load_entity_dict(logger, params, is_zeshel)
candidate_pool = get_candidate_pool_tensor_helper(
entity_desc_list,
tokenizer,
params["max_cand_length"],
logger,
is_zeshel,
)
if cand_pool_path is not None:
logger.info("Saving candidate pool.")
torch.save(candidate_pool, cand_pool_path)
return candidate_pool
def main(params):
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
cand_encode_path = params.get("cand_encode_path", None)
# candidate encoding is not pre-computed.
# load/generate candidate pool to compute candidate encoding.
cand_pool_path = params.get("cand_pool_path", None)
candidate_pool = load_or_generate_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
)
candidate_encoding = None
if cand_encode_path is not None:
# try to load candidate encoding from path
# if success, avoid computing candidate encoding
try:
logger.info("Loading pre-generated candidate encode path.")
candidate_encoding = torch.load(cand_encode_path)
except:
logger.info("Loading failed. Generating candidate encoding.")
if candidate_encoding is None:
candidate_encoding = encode_candidate(
reranker,
candidate_pool,
params["encode_batch_size"],
silent=params["silent"],
logger=logger,
is_zeshel=params.get("zeshel", None)
)
if cand_encode_path is not None:
# Save candidate encoding to avoid re-compute
logger.info("Saving candidate encoding to file " + cand_encode_path)
torch.save(candidate_encoding, cand_encode_path)
test_samples = utils.read_dataset(params["mode"], params["data_path"])
logger.info("Read %d test samples." % len(test_samples))
test_data, test_tensor_data = data.process_mention_data(
test_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params['context_key'],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
test_sampler = SequentialSampler(test_tensor_data)
test_dataloader = DataLoader(
test_tensor_data,
sampler=test_sampler,
batch_size=params["eval_batch_size"]
)
save_results = params.get("save_topk_result")
new_data = nnquery.get_topk_predictions(
reranker,
test_dataloader,
candidate_pool,
candidate_encoding,
params["silent"],
logger,
params["top_k"],
params.get("zeshel", None),
save_results,
)
if save_results:
save_data_dir = os.path.join(
params['output_path'],
"top%d_candidates" % params['top_k'],
)
if not os.path.exists(save_data_dir):
os.makedirs(save_data_dir)
save_data_path = os.path.join(save_data_dir, "%s.t7" % params['mode'])
torch.save(new_data, save_data_path)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
params = args.__dict__
mode_list = params["mode"].split(',')
for mode in mode_list:
new_params = params
new_params["mode"] = mode
main(new_params)
|
BLINK-main
|
blink/biencoder/eval_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Utility code for zeshel dataset
import json
import torch
DOC_PATH = "/private/home/ledell/zeshel/data/documents/"
WORLDS = [
'american_football',
'doctor_who',
'fallout',
'final_fantasy',
'military',
'pro_wrestling',
'starwars',
'world_of_warcraft',
'coronation_street',
'muppets',
'ice_hockey',
'elder_scrolls',
'forgotten_realms',
'lego',
'star_trek',
'yugioh'
]
world_to_id = {src : k for k, src in enumerate(WORLDS)}
def load_entity_dict_zeshel(logger, params):
entity_dict = {}
# different worlds in train/valid/test
if params["mode"] == "train":
start_idx = 0
end_idx = 8
elif params["mode"] == "valid":
start_idx = 8
end_idx = 12
else:
start_idx = 12
end_idx = 16
# load data
for i, src in enumerate(WORLDS[start_idx:end_idx]):
fname = DOC_PATH + src + ".json"
cur_dict = {}
doc_list = []
src_id = world_to_id[src]
with open(fname, 'rt') as f:
for line in f:
line = line.rstrip()
item = json.loads(line)
text = item["text"]
doc_list.append(text[:256])
if params["debug"]:
if len(doc_list) > 200:
break
logger.info("Load for world %s." % src)
entity_dict[src_id] = doc_list
return entity_dict
class Stats():
def __init__(self, top_k=1000):
self.cnt = 0
self.hits = []
self.top_k = top_k
self.rank = [1, 4, 8, 16, 32, 64, 100, 128, 256, 512]
self.LEN = len(self.rank)
for i in range(self.LEN):
self.hits.append(0)
def add(self, idx):
self.cnt += 1
if idx == -1:
return
for i in range(self.LEN):
if idx < self.rank[i]:
self.hits[i] += 1
def extend(self, stats):
self.cnt += stats.cnt
for i in range(self.LEN):
self.hits[i] += stats.hits[i]
def output(self):
output_json = "Total: %d examples." % self.cnt
for i in range(self.LEN):
if self.top_k < self.rank[i]:
break
output_json += " r@%d: %.4f" % (self.rank[i], self.hits[i] / float(self.cnt))
return output_json
|
BLINK-main
|
blink/biencoder/zeshel_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import logging
import torch
from tqdm import tqdm
import blink.candidate_ranking.utils as utils
from blink.biencoder.zeshel_utils import WORLDS, Stats
def get_topk_predictions(
reranker,
train_dataloader,
candidate_pool,
cand_encode_list,
silent,
logger,
top_k=10,
is_zeshel=False,
save_predictions=False,
):
reranker.model.eval()
device = reranker.device
logger.info("Getting top %d predictions." % top_k)
if silent:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader)
nn_context = []
nn_candidates = []
nn_labels = []
nn_worlds = []
stats = {}
if is_zeshel:
world_size = len(WORLDS)
else:
# only one domain
world_size = 1
candidate_pool = [candidate_pool]
cand_encode_list = [cand_encode_list]
logger.info("World size : %d" % world_size)
for i in range(world_size):
stats[i] = Stats(top_k)
oid = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, _, srcs, label_ids = batch
src = srcs[0].item()
scores = reranker.score_candidate(
context_input,
None,
cand_encs=cand_encode_list[src].to(device)
)
values, indicies = scores.topk(top_k)
old_src = src
for i in range(context_input.size(0)):
oid += 1
inds = indicies[i]
if srcs[i] != old_src:
src = srcs[i].item()
# not the same domain, need to re-do
new_scores = reranker.score_candidate(
context_input[[i]],
None,
cand_encs=cand_encode_list[src].to(device)
)
_, inds = new_scores.topk(top_k)
inds = inds[0]
pointer = -1
for j in range(top_k):
if inds[j].item() == label_ids[i].item():
pointer = j
break
stats[src].add(pointer)
if pointer == -1:
continue
if not save_predictions:
continue
# add examples in new_data
cur_candidates = candidate_pool[src][inds]
nn_context.append(context_input[i].cpu().tolist())
nn_candidates.append(cur_candidates.cpu().tolist())
nn_labels.append(pointer)
nn_worlds.append(src)
res = Stats(top_k)
for src in range(world_size):
if stats[src].cnt == 0:
continue
if is_zeshel:
logger.info("In world " + WORLDS[src])
output = stats[src].output()
logger.info(output)
res.extend(stats[src])
logger.info(res.output())
nn_context = torch.LongTensor(nn_context)
nn_candidates = torch.LongTensor(nn_candidates)
nn_labels = torch.LongTensor(nn_labels)
nn_data = {
'context_vecs': nn_context,
'candidate_vecs': nn_candidates,
'labels': nn_labels,
}
if is_zeshel:
nn_data["worlds"] = torch.LongTensor(nn_worlds)
return nn_data
|
BLINK-main
|
blink/biencoder/nn_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
|
BLINK-main
|
blink/biencoder/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import logging
import torch
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.zeshel_utils import world_to_id
from blink.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def select_field(data, key1, key2=None):
if key2 is None:
return [example[key1] for example in data]
else:
return [example[key1][key2] for example in data]
def get_context_representation(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
mention_tokens = []
if sample[mention_key] and len(sample[mention_key]) > 0:
mention_tokens = tokenizer.tokenize(sample[mention_key])
mention_tokens = [ent_start_token] + mention_tokens + [ent_end_token]
context_left = sample[context_key + "_left"]
context_right = sample[context_key + "_right"]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
context_tokens = (
context_left[-left_quota:] + mention_tokens + context_right[:right_quota]
)
context_tokens = ["[CLS]"] + context_tokens + ["[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
}
def get_candidate_representation(
candidate_desc,
tokenizer,
max_seq_length,
candidate_title=None,
title_tag=ENT_TITLE_TAG,
):
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
cand_tokens = tokenizer.tokenize(candidate_desc)
if candidate_title is not None:
title_tokens = tokenizer.tokenize(candidate_title)
cand_tokens = title_tokens + [title_tag] + cand_tokens
cand_tokens = cand_tokens[: max_seq_length - 2]
cand_tokens = [cls_token] + cand_tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(cand_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": cand_tokens,
"ids": input_ids,
}
def process_mention_data(
samples,
tokenizer,
max_context_length,
max_cand_length,
silent,
mention_key="mention",
context_key="context",
label_key="label",
title_key='label_title',
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
title_token=ENT_TITLE_TAG,
debug=False,
logger=None,
):
processed_samples = []
if debug:
samples = samples[:200]
if silent:
iter_ = samples
else:
iter_ = tqdm(samples)
use_world = True
for idx, sample in enumerate(iter_):
context_tokens = get_context_representation(
sample,
tokenizer,
max_context_length,
mention_key,
context_key,
ent_start_token,
ent_end_token,
)
label = sample[label_key]
title = sample.get(title_key, None)
label_tokens = get_candidate_representation(
label, tokenizer, max_cand_length, title,
)
label_idx = int(sample["label_id"])
record = {
"context": context_tokens,
"label": label_tokens,
"label_idx": [label_idx],
}
if "world" in sample:
src = sample["world"]
src = world_to_id[src]
record["src"] = [src]
use_world = True
else:
use_world = False
processed_samples.append(record)
if debug and logger:
logger.info("====Processed samples: ====")
for sample in processed_samples[:5]:
logger.info("Context tokens : " + " ".join(sample["context"]["tokens"]))
logger.info(
"Context ids : " + " ".join([str(v) for v in sample["context"]["ids"]])
)
logger.info("Label tokens : " + " ".join(sample["label"]["tokens"]))
logger.info(
"Label ids : " + " ".join([str(v) for v in sample["label"]["ids"]])
)
logger.info("Src : %d" % sample["src"][0])
logger.info("Label_id : %d" % sample["label_idx"][0])
context_vecs = torch.tensor(
select_field(processed_samples, "context", "ids"), dtype=torch.long,
)
cand_vecs = torch.tensor(
select_field(processed_samples, "label", "ids"), dtype=torch.long,
)
if use_world:
src_vecs = torch.tensor(
select_field(processed_samples, "src"), dtype=torch.long,
)
label_idx = torch.tensor(
select_field(processed_samples, "label_idx"), dtype=torch.long,
)
data = {
"context_vecs": context_vecs,
"cand_vecs": cand_vecs,
"label_idx": label_idx,
}
if use_world:
data["src"] = src_vecs
tensor_data = TensorDataset(context_vecs, cand_vecs, src_vecs, label_idx)
else:
tensor_data = TensorDataset(context_vecs, cand_vecs, label_idx)
return data, tensor_data
|
BLINK-main
|
blink/biencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import time
import numpy as np
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
from pytorch_transformers.modeling_utils import WEIGHTS_NAME
from blink.biencoder.biencoder import BiEncoderRanker, load_biencoder
import logging
import blink.candidate_ranking.utils as utils
import blink.biencoder.data_process as data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from blink.common.params import BlinkParser
logger = None
# The evaluate function during training uses in-batch negatives:
# for a batch of size B, the labels from the batch are used as label candidates
# B is controlled by the parameter eval_batch_size
def evaluate(
reranker, eval_dataloader, params, device, logger,
):
reranker.model.eval()
if params["silent"]:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_accuracy = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, candidate_input, _, _ = batch
with torch.no_grad():
eval_loss, logits = reranker(context_input, candidate_input)
logits = logits.detach().cpu().numpy()
# Using in-batch negatives, the label ids are diagonal
label_ids = torch.LongTensor(
torch.arange(params["eval_batch_size"])
).numpy()
tmp_eval_accuracy, _ = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += context_input.size(0)
nb_eval_steps += 1
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
logger.info("Eval accuracy: %.5f" % normalized_eval_accuracy)
results["normalized_accuracy"] = normalized_eval_accuracy
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
# Load train data
train_samples = utils.read_dataset("train", params["data_path"])
logger.info("Read %d train samples." % len(train_samples))
train_data, train_tensor_data = data.process_mention_data(
train_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
if params["shuffle"]:
train_sampler = RandomSampler(train_tensor_data)
else:
train_sampler = SequentialSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
# Load eval data
# TODO: reduce duplicated code here
valid_samples = utils.read_dataset("valid", params["data_path"])
logger.info("Read %d valid samples." % len(valid_samples))
valid_data, valid_tensor_data = data.process_mention_data(
valid_samples,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data, sampler=valid_sampler, batch_size=eval_batch_size
)
# evaluate before training
results = evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(params, optimizer, len(train_tensor_data), logger)
model.train()
best_epoch_idx = -1
best_score = -1
num_train_epochs = params["num_train_epochs"]
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input, candidate_input, _, _ = batch
loss, _ = reranker(context_input, candidate_input)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate(
reranker, valid_dataloader, params, device=device, logger=logger,
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path,
"epoch_{}".format(best_epoch_idx),
WEIGHTS_NAME,
)
reranker = load_biencoder(params)
utils.save_model(reranker.model, tokenizer, model_output_path)
if params["evaluate"]:
params["path_to_model"] = model_output_path
evaluate(params, logger=logger)
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_training_args()
parser.add_eval_args()
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
blink/biencoder/train_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"])
cand_bert = BertModel.from_pretrained(params['bert_model'])
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.config = ctxt_bert.config
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
embedding_ctxt = None
if token_idx_ctxt is not None:
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt
)
embedding_cands = None
if token_idx_cands is not None:
embedding_cands = self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_ctxt, embedding_cands
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(model_path)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False):
if cpu:
state_dict = torch.load(fname, map_location=lambda storage, location: "cpu")
else:
state_dict = torch.load(fname)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
embedding_context, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands, None, None, None
)
return embedding_context.cpu().detach()
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands.cpu().detach()
# TODO: why do we need cpu here?
# return embedding_cands
# Score candidates given context input and label input
# If cand_encs is provided (pre-computed), cand_ves is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
random_negs=True,
cand_encs=None, # pre-computed candidate encoding.
):
# Encode contexts first
token_idx_ctxt, segment_idx_ctxt, mask_ctxt = to_bert_input(
text_vecs, self.NULL_IDX
)
embedding_ctxt, _ = self.model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, None, None, None
)
# Candidate encoding is given, do not need to re-compute
# Directly return the score of context encoding and candidate encoding
if cand_encs is not None:
return embedding_ctxt.mm(cand_encs.t())
# Train time. We compare with all elements of the batch
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cand_vecs, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None, token_idx_cands, segment_idx_cands, mask_cands
)
if random_negs:
# train on random negatives
return embedding_ctxt.mm(embedding_cands.t())
else:
# train on hard negatives
embedding_ctxt = embedding_ctxt.unsqueeze(1) # batchsize x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # batchsize x embed_size x 2
scores = torch.bmm(embedding_ctxt, embedding_cands) # batchsize x 1 x 1
scores = torch.squeeze(scores)
return scores
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(self, context_input, cand_input, label_input=None):
flag = label_input is None
scores = self.score_candidate(context_input, cand_input, flag)
bs = scores.size(0)
if label_input is None:
target = torch.LongTensor(torch.arange(bs))
target = target.to(self.device)
loss = F.cross_entropy(scores, target, reduction="mean")
else:
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
# TODO: add parameters?
loss = loss_fct(scores, label_input)
return loss, scores
def to_bert_input(token_idx, null_idx):
""" token_idx is a 2D tensor int.
return token_idx, segment_idx and mask
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
blink/biencoder/biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from tqdm import tqdm
from pytorch_transformers.optimization import AdamW, WarmupLinearSchedule
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
class BertForReranking(BertPreTrainedModel):
r"""
Inputs:
**input_ids**: ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Indices of input sequence tokens in the vocabulary.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
To match pre-training, BERT input sequence should be formatted with [CLS] and [SEP] tokens as follows:
(a) For sequence pairs:
``tokens: [CLS] is this jack ##son ##ville ? [SEP] no it is not . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0 0 1 1 1 1 1 1``
(b) For single sequences:
``tokens: [CLS] the dog is hairy . [SEP]``
``token_type_ids: 0 0 0 0 0 0 0``
Indices can be obtained using :class:`pytorch_transformers.BertTokenizer`.
See :func:`pytorch_transformers.PreTrainedTokenizer.encode` and
:func:`pytorch_transformers.PreTrainedTokenizer.convert_tokens_to_ids` for details.
**token_type_ids**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Segment token indices to indicate first and second portions of the inputs.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Indices are selected in ``[0, 1]``: ``0`` corresponds to a `sentence A` token, ``1``
corresponds to a `sentence B` token
(see `BERT: Pre-training of Deep Bidirectional Transformers for Language Understanding`_ for more details).
**attention_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(batch_size, num_choices, sequence_length)``:
Mask to avoid performing attention on padding token indices.
The second dimension of the input (`num_choices`) indicates the number of choices to score.
Mask values selected in ``[0, 1]``:
``1`` for tokens that are NOT MASKED, ``0`` for MASKED tokens.
**head_mask**: (`optional`) ``torch.FloatTensor`` of shape ``(num_heads,)`` or ``(num_layers, num_heads)``:
Mask to nullify selected heads of the self-attention modules.
Mask values selected in ``[0, 1]``:
``1`` indicates the head is **not masked**, ``0`` indicates the head is **masked**.
**labels**: (`optional`) ``torch.LongTensor`` of shape ``(batch_size,)``:
Labels for computing the multiple choice classification loss.
Indices should be in ``[0, ..., num_choices]`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above)
Outputs: `Tuple` comprising various elements depending on the configuration (config) and inputs:
**loss**: (`optional`, returned when ``labels`` is provided) ``torch.FloatTensor`` of shape ``(1,)``:
Classification loss.
**classification_scores**: ``torch.FloatTensor`` of shape ``(batch_size, num_choices)`` where `num_choices` is the size of the second dimension
of the input tensors. (see `input_ids` above).
Classification scores (before SoftMax).
**hidden_states**: (`optional`, returned when ``config.output_hidden_states=True``)
list of ``torch.FloatTensor`` (one for the output of each layer + the output of the embeddings)
of shape ``(batch_size, sequence_length, hidden_size)``:
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
**attentions**: (`optional`, returned when ``config.output_attentions=True``)
list of ``torch.FloatTensor`` (one for each layer) of shape ``(batch_size, num_heads, sequence_length, sequence_length)``:
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention heads.
Examples::
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertForMultipleChoice.from_pretrained('bert-base-uncased')
choices = ["Hello, my dog is cute", "Hello, my cat is amazing"]
input_ids = torch.tensor([tokenizer.encode(s) for s in choices]).unsqueeze(0) # Batch size 1, 2 choices
labels = torch.tensor(1).unsqueeze(0) # Batch size 1
outputs = model(input_ids, labels=labels)
loss, classification_scores = outputs[:2]
"""
def __init__(self, config):
super(BertForReranking, self).__init__(config)
self.bert = BertModel(config)
self.dropout = nn.Dropout(config.hidden_dropout_prob)
self.classifier = nn.Linear(config.hidden_size, 1)
self.init_weights()
def forward(
self,
input_ids,
token_type_ids=None,
attention_mask=None,
labels=None,
position_ids=None,
head_mask=None,
entity_mask=None,
):
num_choices = input_ids.shape[1]
# from batch_size x cands x tokens -> (batch_size x cands) x tokens
flat_input_ids = input_ids.view(-1, input_ids.size(-1))
flat_token_type_ids = (
token_type_ids.view(-1, token_type_ids.size(-1))
if token_type_ids is not None
else None
)
flat_attention_mask = (
attention_mask.view(-1, attention_mask.size(-1))
if attention_mask is not None
else None
)
flat_position_ids = (
position_ids.view(-1, position_ids.size(-1))
if position_ids is not None
else None
)
outputs = self.bert(
flat_input_ids,
position_ids=flat_position_ids,
token_type_ids=flat_token_type_ids,
attention_mask=flat_attention_mask,
head_mask=head_mask,
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output)
logits = self.classifier(pooled_output)
reshaped_logits = logits.view(-1, num_choices)
entity_mask = (1.0 - entity_mask) * -1000.0
reshaped_logits = reshaped_logits + entity_mask
outputs = (reshaped_logits,)
if labels is not None:
loss_fct = CrossEntropyLoss()
loss = loss_fct(reshaped_logits, labels)
outputs = (loss,) + outputs
return outputs
class BertReranker:
def __init__(self, parameters):
if "path_to_model" not in parameters:
parameters["path_to_model"] = parameters["bert_model"]
self.parameters = parameters
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not parameters["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# Load the fine-tuned model and the tokenizer used by it
self.model = BertReranker.get_model(parameters)
self.model.to(self.device)
self.tokenizer = BertReranker.get_tokenizer(parameters)
print("The reranking model is loaded")
def rerank(self, mentions, sentences):
model = self.model
tokenizer = self.tokenizer
p = self.parameters
device = self.device
data, tensor_data = BertReranker._process_mentions_for_model(
p["context_key"],
mentions,
tokenizer,
p["max_seq_length"],
p["top_k"],
p["silent"],
sentences=sentences,
)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=p["evaluation_batch_size"]
)
softmax = torch.nn.Softmax(dim=1)
for input_ids, input_mask, segment_ids, mention_ids, entity_mask in tqdm(
dataloader, desc="Inferring"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
mention_ids = mention_ids.numpy()
entity_mask = entity_mask.to(device)
with torch.no_grad():
logits = self.model(
input_ids, segment_ids, input_mask, entity_mask=entity_mask
)[0]
probs = softmax(logits)
logits = logits.detach().cpu().numpy()
probs = probs.detach().cpu().numpy()
predictions = np.argmax(logits, axis=1)
for idx, mention_idx in enumerate(mention_ids):
pred = predictions[idx].item()
mentions[mention_idx]["predicted_candidate_idx"] = pred
mentions[mention_idx]["prob_assigned_to_candidate"] = probs[idx][
pred
].item()
return mentions
def get_scheduler_and_optimizer(self, parameters, train_tensor_data, logger):
model = self.model
num_train_optimization_steps = (
int(
len(train_tensor_data)
/ parameters["train_batch_size"]
/ parameters["gradient_accumulation_steps"]
)
* parameters["num_train_epochs"]
)
num_warmup_steps = int(
num_train_optimization_steps * parameters["warmup_proportion"]
)
param_optimizer = list(model.named_parameters())
param_optimizer = [n for n in param_optimizer]
no_decay = ["bias", "LayerNorm.bias", "LayerNorm.weight"]
optimizer_grouped_parameters = [
{
"params": [
p for n, p in param_optimizer if not any(nd in n for nd in no_decay)
],
"weight_decay": 0.01,
},
{
"params": [
p for n, p in param_optimizer if any(nd in n for nd in no_decay)
],
"weight_decay": 0.0,
},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=parameters["learning_rate"],
correct_bias=False,
)
scheduler = WarmupLinearSchedule(
optimizer,
warmup_steps=num_warmup_steps,
t_total=num_train_optimization_steps,
)
logger.info(" Num optimization steps = %d", num_train_optimization_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return optimizer, scheduler
@staticmethod
def get_model(parameters):
model = BertForReranking.from_pretrained(
parameters["path_to_model"],
num_labels=parameters["top_k"],
cache_dir=os.path.join(str(PYTORCH_PRETRAINED_BERT_CACHE), "local"),
)
if parameters["dataparallel_bert"]:
model.bert = torch.nn.DataParallel(model.bert)
print("Data parallel Bert")
return model
@staticmethod
def get_tokenizer(parameters):
tokenizer = BertTokenizer.from_pretrained(
parameters["path_to_model"], do_lower_case=parameters["lowercase_flag"]
)
return tokenizer
@staticmethod
def _get_candidate_representation(
context_tokens, candidate_desc, tokenizer, max_seq_length, max_sub_seq_length
):
"""Tokenizes and truncates description; combines it with the tokenized context and generates one input sample for bert"""
candidate_desc_tokens = tokenizer.tokenize(candidate_desc)
candidate_desc_tokens = candidate_desc_tokens[:max_sub_seq_length]
tokens = (
["[CLS]"] + context_tokens + ["[SEP]"] + candidate_desc_tokens + ["[SEP]"]
)
segment_ids = [0] * (len(context_tokens) + 2) + [1] * (
len(candidate_desc_tokens) + 1
)
input_ids = tokenizer.convert_tokens_to_ids(tokens)
input_mask = [1] * len(input_ids)
# Zero-pad up to the sequence length
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
return {
"tokens": tokens,
"input_ids": input_ids,
"input_mask": input_mask,
"segment_ids": segment_ids,
}
@staticmethod
def _get_mention_context_end2end(mention, sentences):
"""Given a mention and a list of sentences that follow the blink conventions, it returns a left and right context for the mention"""
sent_idx = mention["sent_idx"]
prev_sent = sentences[sent_idx - 1] if sent_idx > 0 else ""
next_sent = sentences[sent_idx + 1] if sent_idx + 1 < len(sentences) else ""
prev_sent = sentences[sent_idx - 1] if False else ""
next_sent = sentences[sent_idx + 1] if False else ""
sent = sentences[sent_idx]
curr_sent_prev = sent[: mention["start_pos"]].strip()
curr_sent_next = sent[mention["end_pos"] :].strip()
left_context = "{} {}".format(prev_sent, curr_sent_prev).strip()
right_context = "{} {}".format(curr_sent_next, next_sent).strip()
return (left_context, right_context)
@staticmethod
def _select_field(samples, field):
"""Helper function that returns a list of lists, each of which contains the information for all candidates for each sample"""
return [
[cand[field] for cand in sample["candidate_features"]] for sample in samples
]
@staticmethod
def _get_context_token_representation(
context_key,
sample,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
mention_text_key="text",
tagged=True,
):
"""Tags the mention, trims the context and concatenates everything to form the context representation"""
mention_tokens = (
[start_token] + tokenizer.tokenize(sample[mention_text_key]) + [end_token]
)
max_sub_seq_length = (max_sub_seq_length - len(mention_tokens)) // 2
context_left, context_right = sample[context_key]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
if len(context_left) > max_sub_seq_length:
context_left = context_left[-max_sub_seq_length:]
if len(context_right) > max_sub_seq_length:
context_right = context_right[:max_sub_seq_length]
context_tokens = context_left + mention_tokens + context_right
return context_tokens
@staticmethod
def _process_mentions_for_model(
context_key,
mentions,
tokenizer,
max_seq_length,
top_k,
silent,
start_token="[unused0]",
end_token="[unused1]",
debug=False,
tagged=True,
sentences=None,
candidates_key="candidates",
gold_key="gold_pos",
logger=None,
):
processed_mentions = []
if debug:
mentions = mentions[:200]
max_sub_seq_length = (max_seq_length - 3) // 2
if silent:
iter_ = mentions
else:
iter_ = tqdm(mentions)
for idx, mention in enumerate(iter_):
# if sentences is not none that means that we are processing end2end data for inference
if sentences is not None:
mention[context_key] = BertReranker._get_mention_context_end2end(
mention, sentences
)
context_tokens = BertReranker._get_context_token_representation(
context_key,
mention,
tokenizer,
max_sub_seq_length,
start_token,
end_token,
)
candidates = mention[candidates_key]
candidate_features = []
for candidate in candidates[:top_k]:
candidate_desc = " ".join(candidate["sentences"])
candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
candidate_features.append(candidate_obj)
entity_mask = [1] * len(candidate_features) + [0] * (
top_k - len(candidate_features)
)
if len(candidates) < top_k:
candidate_desc = ""
padding_candidate_obj = BertReranker._get_candidate_representation(
context_tokens,
candidate_desc,
tokenizer,
max_seq_length,
max_sub_seq_length,
)
for _ in range(top_k - len(candidates)):
candidate_features.append(padding_candidate_obj)
assert len(candidate_features) == top_k
assert len(entity_mask) == top_k
if sentences is not None:
processed_mentions.append(
{
"candidate_features": candidate_features,
"mention_idx": idx,
"entity_mask": entity_mask,
}
)
else:
label = mention[gold_key] - 1
processed_mentions.append(
{
"candidate_features": candidate_features,
"label": label,
"entity_mask": entity_mask,
}
)
all_input_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "input_ids"),
dtype=torch.long,
)
all_input_mask = torch.tensor(
BertReranker._select_field(processed_mentions, "input_mask"),
dtype=torch.long,
)
all_segment_ids = torch.tensor(
BertReranker._select_field(processed_mentions, "segment_ids"),
dtype=torch.long,
)
all_entity_masks = torch.tensor(
[s["entity_mask"] for s in processed_mentions], dtype=torch.float
)
data = {
"all_input_ids": all_input_ids,
"all_input_mask": all_input_mask,
"all_segment_ids": all_segment_ids,
"all_entity_masks": all_entity_masks,
}
if sentences is not None:
all_mention_indices = torch.tensor(
[s["mention_idx"] for s in processed_mentions], dtype=torch.long
)
data["all_mention_indices"] = all_mention_indices
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_mention_indices,
all_entity_masks,
)
else:
all_label = torch.tensor(
[s["label"] for s in processed_mentions], dtype=torch.long
)
data["all_label"] = all_label
tensor_data = TensorDataset(
all_input_ids,
all_input_mask,
all_segment_ids,
all_label,
all_entity_masks,
)
if logger != None:
logger.info("all_input_ids shape: {}".format(all_input_ids.shape))
logger.info("all_input_mask shape: {}".format(all_input_mask.shape))
logger.info("all_segment_ids shape: {}".format(all_segment_ids.shape))
logger.info("all_entity_masks shape: {}".format(all_entity_masks.shape))
if sentences is not None:
logger.info(
"all_mention_indices shape: {}".format(all_mention_indices.shape)
)
else:
logger.info("all_label shape: {}".format(all_label.shape))
return data, tensor_data
|
BLINK-main
|
blink/candidate_ranking/bert_reranking.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import io
import sys
import json
import torch
import logging
import numpy as np
from collections import OrderedDict
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from tqdm import tqdm
from blink.candidate_ranking.bert_reranking import BertReranker
from blink.biencoder.biencoder import BiEncoderRanker
def read_dataset(dataset_name, preprocessed_json_data_parent_folder, debug=False):
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(preprocessed_json_data_parent_folder, file_name)
samples = []
with io.open(txt_file_path, mode="r", encoding="utf-8") as file:
for line in file:
samples.append(json.loads(line.strip()))
if debug and len(samples) > 200:
break
return samples
def filter_samples(samples, top_k, gold_key="gold_pos"):
if top_k == None:
return samples
filtered_samples = [
sample
for sample in samples
if sample[gold_key] > 0 and sample[gold_key] <= top_k
]
return filtered_samples
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length."""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
def eval_precision_bm45_dataloader(dataloader, ks=[1, 5, 10], number_of_samples=None):
label_ids = torch.cat([label_ids for _, _, _, label_ids, _ in dataloader])
label_ids = label_ids + 1
p = {}
for k in ks:
p[k] = 0
for label in label_ids:
if label > 0:
for k in ks:
if label <= k:
p[k] += 1
for k in ks:
if number_of_samples is None:
p[k] /= len(label_ids)
else:
p[k] /= number_of_samples
return p
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels), outputs == labels
def remove_module_from_state_dict(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
name = "".join(key.split(".module"))
new_state_dict[name] = value
return new_state_dict
def save_model(model, tokenizer, output_dir):
"""Saves the model and the tokenizer used in the output directory."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
def get_logger(output_dir=None):
if output_dir != None:
os.makedirs(output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(
"{}/log.txt".format(output_dir), mode="a", delay=False
),
logging.StreamHandler(sys.stdout),
],
)
else:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger('Blink')
logger.setLevel(10)
return logger
def write_to_file(path, string, mode="w"):
with open(path, mode) as writer:
writer.write(string)
def get_reranker(parameters):
return BertReranker(parameters)
def get_biencoder(parameters):
return BiEncoderRanker(parameters)
|
BLINK-main
|
blink/candidate_ranking/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import pickle
import torch
import json
import sys
import io
import random
import sys
import time
import numpy as np
import pprint
import shutil
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from torch.utils.data.distributed import DistributedSampler
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.tokenization_bert import BertTokenizer
import blink.candidate_retrieval.utils
from blink.candidate_ranking.bert_reranking import BertForReranking
import logging
import utils
from evaluate import evaluate_model_on_dataset, evaluate
logger = None
def main(parameters):
# Read model
reranker = utils.get_reranker(parameters)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if parameters["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
parameters["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
# args.gradient_accumulation_steps = args.gradient_accumulation_steps // n_gpu
parameters["train_batch_size"] = (
parameters["train_batch_size"] // parameters["gradient_accumulation_steps"]
)
train_batch_size = parameters["train_batch_size"]
evaluation_batch_size = parameters["evaluation_batch_size"]
gradient_accumulation_steps = parameters["gradient_accumulation_steps"]
# Fix the random seeds
seed = parameters["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
logger = None
number_of_samples_per_dataset = {}
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
time_start = time.time()
model_output_path = parameters["model_output_path"]
# Make sure everything is in order with the output directiory
if os.path.exists(model_output_path) and os.listdir(model_output_path):
print(
"Output directory ({}) already exists and is not empty.".format(
model_output_path
)
)
answer = input("Would you like to empty the existing directory? [Y/N]\n")
if answer.strip() == "Y":
print("Deleteing {}...".format(model_output_path))
shutil.rmtree(model_output_path)
else:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(
model_output_path
)
)
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
utils.write_to_file(
os.path.join(model_output_path, "training_parameters.txt"), str(parameters)
)
logger = utils.get_logger(model_output_path)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
### Load training data
train_dataset_name = "aida-train"
train_samples = utils.read_dataset(
train_dataset_name, parameters["path_to_preprocessed_json_data"]
)
train_samples_filtered = utils.filter_samples(train_samples, parameters["top_k"])
logger.info(
"Retained {} out of {} samples".format(
len(train_samples_filtered), len(train_samples)
)
)
number_of_samples_per_dataset[train_dataset_name] = len(train_samples)
train_data, train_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
train_samples_filtered,
tokenizer,
parameters["max_seq_length"],
silent=parameters["silent"],
logger=logger,
top_k=parameters["top_k"],
debug=parameters["debug"],
)
train_sampler = RandomSampler(train_tensor_data)
train_dataloader = DataLoader(
train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
###
### Loading dev data
dev_dataset_name = "aida-A"
dev_samples = utils.read_dataset(
dev_dataset_name, parameters["path_to_preprocessed_json_data"]
)
dev_samples_filtered = utils.filter_samples(dev_samples, parameters["top_k"])
logger.info(
"Retained {} out of {} samples".format(
len(dev_samples_filtered), len(dev_samples)
)
)
number_of_samples_per_dataset[dev_dataset_name] = len(dev_samples)
dev_data, dev_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
train_samples_filtered,
tokenizer,
parameters["max_seq_length"],
silent=parameters["silent"],
logger=logger,
top_k=parameters["top_k"],
debug=parameters["debug"],
)
dev_sampler = SequentialSampler(dev_tensor_data)
dev_dataloader = DataLoader(
dev_tensor_data, sampler=dev_sampler, batch_size=evaluation_batch_size
)
###
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_samples_filtered))
logger.info(" Batch size = %d", train_batch_size)
logger.info(" Gradient accumulation steps = %d", gradient_accumulation_steps)
optimizer, scheduler = reranker.get_scheduler_and_optimizer(
parameters, train_tensor_data, logger
)
best_epoch_idx = -1
best_score = -1
num_train_epochs = parameters["num_train_epochs"]
model.train()
for epoch_idx in trange(int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
for step, batch in enumerate(tqdm(train_dataloader, desc="Batch")):
batch = tuple(t.to(device) for t in batch)
input_ids, input_mask, segment_ids, label_ids, entity_mask = batch
loss, _ = model(
input_ids, segment_ids, input_mask, label_ids, entity_mask=entity_mask
)
# if n_gpu > 1:
# loss = loss.mean() # mean() to average on multi-gpu.
if gradient_accumulation_steps > 1:
loss = loss / gradient_accumulation_steps
tr_loss += loss.item()
if (step + 1) % (
parameters["print_tr_loss_opt_steps_interval"]
* parameters["gradient_accumulation_steps"]
) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss
/ (
parameters["print_tr_loss_opt_steps_interval"]
* gradient_accumulation_steps
),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), parameters["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (
parameters["dev_evaluation_interval"]
* gradient_accumulation_steps
* train_batch_size
) == 0:
logger.info("Evaluation on the development dataset")
evaluate_model_on_dataset(
model,
dev_dataloader,
dev_dataset_name,
device=device,
logger=logger,
number_of_samples=number_of_samples_per_dataset[dev_dataset_name],
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
results = evaluate_model_on_dataset(
model,
dev_dataloader,
dev_dataset_name,
device=device,
logger=logger,
path_to_file_to_write_results=output_eval_file,
number_of_samples=number_of_samples_per_dataset[dev_dataset_name],
)
ls = [best_score, results["normalized_accuracy"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
parameters["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
reranker = utils.get_reranker(parameters)
utils.save_model(reranker.model, tokenizer, model_output_path)
if parameters["evaluate"]:
parameters["path_to_model"] = model_output_path
evaluate(parameters, logger=logger)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path_to_preprocessed_json_data",
default="data/train_and_benchmark_processed_json",
type=str,
help="The path to the train and benchmarking data.",
)
parser.add_argument(
"--bert_model",
default="bert-large-cased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--model_output_path",
default=None,
type=str,
required=True,
help="The output directory where the trained model is to be dumped.",
)
parser.add_argument(
"--context_key", default="tagged_query_context_sent_prev_curr_next", type=str
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--top_k", default=80, type=int)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--full_evaluation",
action="store_true",
help="Whether to run the evaluation on all datasets.",
)
parser.add_argument(
"--evaluate_with_pregenerated_candidates",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--train_batch_size", default=8, type=int, help="Total batch size for training."
)
parser.add_argument(
"--evaluation_batch_size",
default=4,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=3,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--print_tr_loss_opt_steps_interval",
type=int,
default=20,
help="Interval of loss printing",
)
parser.add_argument(
"--dev_evaluation_interval",
type=int,
default=160,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1, help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--seed", type=int, default=12345, help="random seed for initialization"
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=8,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
# args = argparse.Namespace(**params)
args = parser.parse_args()
print(args)
parameters = args.__dict__
main(parameters)
|
BLINK-main
|
blink/candidate_ranking/train.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import utils
import torch
import utils
import argparse
import os
from bert_reranking import BertReranker
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm
def evaluate_model_on_dataset(
model,
dataloader,
dataset_name,
device,
logger,
number_of_samples,
eval_bm45_acc=False,
path_to_file_to_write_results=None,
):
model.eval()
eval_accuracy = 0
nb_eval_steps, nb_eval_examples = 0, 0
for input_ids, input_mask, segment_ids, label_ids, entity_mask in tqdm(
dataloader, desc="Evaluating"
):
input_ids = input_ids.to(device)
input_mask = input_mask.to(device)
segment_ids = segment_ids.to(device)
label_ids = label_ids.to(device)
entity_mask = entity_mask.to(device)
with torch.no_grad():
tmp_eval_loss, logits = model(
input_ids, segment_ids, input_mask, label_ids, entity_mask=entity_mask
)
logits = logits.detach().cpu().numpy()
label_ids = label_ids.to("cpu").numpy()
tmp_eval_accuracy = utils.accuracy(logits, label_ids)
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
logger.info("\n")
normalized_eval_accuracy = eval_accuracy / nb_eval_examples
result = {"normalized_accuracy": normalized_eval_accuracy}
result["unnormalized_accuracy"] = eval_accuracy / number_of_samples
result["candidate_generation_recall"] = nb_eval_examples / number_of_samples
if eval_bm45_acc:
result["normalized_bm45_recall_@"] = utils.eval_precision_bm45_dataloader(
dataloader, [1, 5, 10, 20, 40, 60, 80, 100]
)
result["unnormalized_bm45_recall_@"] = utils.eval_precision_bm45_dataloader(
dataloader, [1, 5, 10, 20, 40, 60, 80, 100], number_of_samples
)
if path_to_file_to_write_results is None:
logger.info(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
else:
with open(path_to_file_to_write_results, "a+") as writer:
logger.info(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
writer.write(
"***** Eval results - {} ({} / {} samples) *****\n".format(
dataset_name, nb_eval_examples, number_of_samples
)
)
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
writer.write("\n")
logger.info("\n")
return result
def evaluate(parameters, logger=None):
reranker = utils.get_reranker(parameters)
if parameters["full_evaluation"]:
eval_datasets = [
"aida-A",
"aida-B",
"msnbc",
"aquaint",
"ace2004",
"clueweb",
"wikipedia",
]
else:
eval_datasets = ["aida-B"]
candidates_key = (
"pregenerated_candidates"
if parameters["evaluate_with_pregenerated_candidates"]
else "candidates"
)
gold_key = (
"pregenerated_gold_pos"
if parameters["evaluate_with_pregenerated_candidates"]
else "gold_pos"
)
number_of_samples_per_dataset = {}
total_time = 0
for eval_dataset_name in eval_datasets:
time_start = time.time()
logger.info("\nEvaluating on the {} dataset".format(eval_dataset_name))
eval_samples = utils.read_dataset(
eval_dataset_name, parameters["path_to_preprocessed_json_data"]
)
eval_samples_filtered = utils.filter_samples(
eval_samples, parameters["top_k"], gold_key
)
logger.info(
"Retained {} out of {} samples".format(
len(eval_samples_filtered), len(eval_samples)
)
)
number_of_samples_per_dataset[eval_dataset_name] = len(eval_samples)
# if args.num_preprocessing_threads == -1:
# eval_data, eval_tensor_data = process_samples_for_model(args.context_key, eval_samples_filtered, tokenizer, args.max_seq_length, logger = logger, top_k = args.top_k, example = False, debug = args.debug, tagged = args.tag_mention, candidates_key = candidates_key, gold_key = gold_key)
# else:
# eval_data, eval_tensor_data = preprocessing_multithreaded(eval_samples_filtered, logger, args, output_dir=True)
eval_data, eval_tensor_data = reranker._process_mentions_for_model(
parameters["context_key"],
eval_samples_filtered,
reranker.tokenizer,
parameters["max_seq_length"],
parameters["top_k"],
parameters["silent"],
candidates_key=candidates_key,
gold_key=gold_key,
debug=parameters["debug"],
)
eval_sampler = SequentialSampler(eval_tensor_data)
eval_dataloader = DataLoader(
eval_tensor_data,
sampler=eval_sampler,
batch_size=parameters["evaluation_batch_size"],
)
if parameters["output_eval_file"] is None:
output_eval_file = os.path.join(
parameters["path_to_model"], "eval_results.txt"
)
else:
output_eval_file = parameters["output_eval_file"]
result = evaluate_model_on_dataset(
reranker.model,
eval_dataloader,
eval_dataset_name,
eval_bm45_acc=True,
device=reranker.device,
logger=logger,
path_to_file_to_write_results=output_eval_file,
number_of_samples=number_of_samples_per_dataset[eval_dataset_name],
)
execution_time = (time.time() - time_start) / 60
total_time += execution_time
if logger != None:
logger.info(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
else:
print(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
if logger != None:
logger.info(
"The execution for dataset {} took {} minutes".format(
eval_dataset_name, execution_time
)
)
else:
print("The evaluation took:", total_time, " minutes")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--path_to_preprocessed_json_data",
default="data/train_and_benchmark_processed_json",
type=str,
help="The path to the train and benchmarking data.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=True,
help="The full path to the model to be evaluated.",
)
parser.add_argument("--top_k", default=80, type=int)
parser.add_argument(
"--max_seq_length",
default=512,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--context_key", default="tagged_query_context_sent_prev_curr_next", type=str
)
parser.add_argument(
"--lowercase_flag",
action="store_true",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument(
"--full_evaluation",
action="store_true",
help="Whether to run the evaluation on all datasets.",
)
parser.add_argument(
"--evaluate_with_pregenerated_candidates",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--evaluation_batch_size",
default=8,
type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--dataparallel_bert",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true", help="Whether not to use CUDA when available"
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
args = parser.parse_args()
print(args)
parameters = args.__dict__
evaluate(parameters, logger=utils.get_logger())
|
BLINK-main
|
blink/candidate_ranking/evaluate.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Provide an argument parser and default command line options for using BLINK.
import argparse
import importlib
import os
import sys
import datetime
ENT_START_TAG = "[unused0]"
ENT_END_TAG = "[unused1]"
ENT_TITLE_TAG = "[unused2]"
class BlinkParser(argparse.ArgumentParser):
"""
Provide an opt-producer and CLI arguement parser.
More options can be added specific by paassing this object and calling
''add_arg()'' or add_argument'' on it.
:param add_blink_args:
(default True) initializes the default arguments for BLINK package.
:param add_model_args:
(default False) initializes the default arguments for loading models,
including initializing arguments from the model.
"""
def __init__(
self, add_blink_args=True, add_model_args=False,
description='BLINK parser',
):
super().__init__(
description=description,
allow_abbrev=False,
conflict_handler='resolve',
formatter_class=argparse.HelpFormatter,
add_help=add_blink_args,
)
self.blink_home = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
os.environ['BLINK_HOME'] = self.blink_home
self.add_arg = self.add_argument
self.overridable = {}
if add_blink_args:
self.add_blink_args()
if add_model_args:
self.add_model_args()
def add_blink_args(self, args=None):
"""
Add common BLINK args across all scripts.
"""
parser = self.add_argument_group("Common Arguments")
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--data_parallel",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument("--top_k", default=10, type=int)
parser.add_argument(
"--seed", type=int, default=52313, help="random seed for initialization"
)
parser.add_argument(
"--zeshel",
default=True,
type=bool,
help="Whether the dataset is from zeroshot.",
)
def add_model_args(self, args=None):
"""
Add model args.
"""
parser = self.add_argument_group("Model Arguments")
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_context_length",
default=128,
type=int,
help="The maximum total context input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_cand_length",
default=128,
type=int,
help="The maximum total label input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=False,
help="The full path to the model to load.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pull_from_layer", type=int, default=-1, help="Layers to pull from BERT",
)
parser.add_argument(
"--lowercase",
action="store_false",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--context_key", default="context", type=str)
parser.add_argument(
"--out_dim", type=int, default=1, help="Output dimention of bi-encoders.",
)
parser.add_argument(
"--add_linear",
action="store_true",
help="Whether to add an additonal linear projection on top of BERT.",
)
parser.add_argument(
"--data_path",
default="data/zeshel",
type=str,
help="The path to the train data.",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="The output directory where generated output file (model, etc.) is to be dumped.",
)
def add_training_args(self, args=None):
"""
Add model training args.
"""
parser = self.add_argument_group("Model Training Arguments")
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--train_batch_size", default=8, type=int,
help="Total batch size for training."
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Number of training epochs.",
)
parser.add_argument(
"--print_interval", type=int, default=10,
help="Interval of loss printing",
)
parser.add_argument(
"--eval_interval",
type=int,
default=100,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1,
help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--type_optimization",
type=str,
default="all_encoder_layers",
help="Which type of layers to optimize in BERT",
)
parser.add_argument(
"--shuffle", type=bool, default=False,
help="Whether to shuffle train data",
)
def add_eval_args(self, args=None):
"""
Add model evaluation args.
"""
parser = self.add_argument_group("Model Evaluation Arguments")
parser.add_argument(
"--eval_batch_size", default=8, type=int,
help="Total batch size for evaluation.",
)
parser.add_argument(
"--mode",
default="valid",
type=str,
help="Train / validation / test",
)
parser.add_argument(
"--save_topk_result",
action="store_true",
help="Whether to save prediction results.",
)
parser.add_argument(
"--encode_batch_size",
default=8,
type=int,
help="Batch size for encoding."
)
parser.add_argument(
"--cand_pool_path",
default=None,
type=str,
help="Path for cached candidate pool (id tokenization of candidates)",
)
parser.add_argument(
"--cand_encode_path",
default=None,
type=str,
help="Path for cached candidate encoding",
)
|
BLINK-main
|
blink/common/params.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
def get_model_obj(model):
model = model.module if hasattr(model, "module") else model
return model
class BertEncoder(nn.Module):
def __init__(
self, bert_model, output_dim, layer_pulled=-1, add_linear=None):
super(BertEncoder, self).__init__()
self.layer_pulled = layer_pulled
bert_output_dim = bert_model.embeddings.word_embeddings.weight.size(1)
self.bert_model = bert_model
if add_linear:
self.additional_linear = nn.Linear(bert_output_dim, output_dim)
self.dropout = nn.Dropout(0.1)
else:
self.additional_linear = None
def forward(self, token_ids, segment_ids, attention_mask):
output_bert, output_pooler = self.bert_model(
token_ids, segment_ids, attention_mask
)
# get embedding of [CLS] token
if self.additional_linear is not None:
embeddings = output_pooler
else:
embeddings = output_bert[:, 0, :]
# in case of dimensionality reduction
if self.additional_linear is not None:
result = self.additional_linear(self.dropout(embeddings))
else:
result = embeddings
return result
|
BLINK-main
|
blink/common/ranker_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import os
import numpy as np
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from torch import nn
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import AdamW
patterns_optimizer = {
'additional_layers': ['additional'],
'top_layer': ['additional', 'bert_model.encoder.layer.11.'],
'top4_layers': [
'additional',
'bert_model.encoder.layer.11.',
'encoder.layer.10.',
'encoder.layer.9.',
'encoder.layer.8',
],
'all_encoder_layers': ['additional', 'bert_model.encoder.layer'],
'all': ['additional', 'bert_model.encoder.layer', 'bert_model.embeddings'],
}
def get_bert_optimizer(models, type_optimization, learning_rate, fp16=False):
""" Optimizes the network with AdamWithDecay
"""
if type_optimization not in patterns_optimizer:
print(
'Error. Type optimizer must be one of %s' % (str(patterns_optimizer.keys()))
)
parameters_with_decay = []
parameters_with_decay_names = []
parameters_without_decay = []
parameters_without_decay_names = []
no_decay = ['bias', 'gamma', 'beta']
patterns = patterns_optimizer[type_optimization]
for model in models:
for n, p in model.named_parameters():
if any(t in n for t in patterns):
if any(t in n for t in no_decay):
parameters_without_decay.append(p)
parameters_without_decay_names.append(n)
else:
parameters_with_decay.append(p)
parameters_with_decay_names.append(n)
print('The following parameters will be optimized WITH decay:')
print(ellipse(parameters_with_decay_names, 5, ' , '))
print('The following parameters will be optimized WITHOUT decay:')
print(ellipse(parameters_without_decay_names, 5, ' , '))
optimizer_grouped_parameters = [
{'params': parameters_with_decay, 'weight_decay': 0.01},
{'params': parameters_without_decay, 'weight_decay': 0.0},
]
optimizer = AdamW(
optimizer_grouped_parameters,
lr=learning_rate,
correct_bias=False
)
if fp16:
optimizer = fp16_optimizer_wrapper(optimizer)
return optimizer
def ellipse(lst, max_display=5, sep='|'):
"""
Like join, but possibly inserts an ellipsis.
:param lst: The list to join on
:param int max_display: the number of items to display for ellipsing.
If -1, shows all items
:param string sep: the delimiter to join on
"""
# copy the list (or force it to a list if it's a set)
choices = list(lst)
# insert the ellipsis if necessary
if max_display > 0 and len(choices) > max_display:
ellipsis = '...and {} more'.format(len(choices) - max_display)
choices = choices[:max_display] + [ellipsis]
return sep.join(str(c) for c in choices)
|
BLINK-main
|
blink/common/optimizer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
FAISS-based index components. Original from
https://github.com/facebookresearch/DPR/blob/master/dpr/indexer/faiss_indexers.py
"""
import os
import logging
import pickle
import faiss
import numpy as np
logger = logging.getLogger()
class DenseIndexer(object):
def __init__(self, buffer_size: int = 50000):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
def index_data(self, data: np.array):
raise NotImplementedError
def search_knn(self, query_vectors: np.array, top_docs: int):
raise NotImplementedError
def serialize(self, index_file: str):
logger.info("Serializing index to %s", index_file)
faiss.write_index(self.index, index_file)
def deserialize_from(self, index_file: str):
logger.info("Loading index from %s", index_file)
self.index = faiss.read_index(index_file)
logger.info(
"Loaded index of type %s and size %d", type(self.index), self.index.ntotal
)
# DenseFlatIndexer does exact search
class DenseFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, buffer_size: int = 50000):
super(DenseFlatIndexer, self).__init__(buffer_size=buffer_size)
self.index = faiss.IndexFlatIP(vector_sz)
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
vectors = np.concatenate(vectors, axis=0)
self.index.add(vectors)
cnt += self.buffer_size
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseHNSWFlatIndexer does approximate search
class DenseHNSWFlatIndexer(DenseIndexer):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(
self,
vector_sz: int,
buffer_size: int = 50000,
store_n: int = 128,
ef_search: int = 256,
ef_construction: int = 200,
):
super(DenseHNSWFlatIndexer, self).__init__(buffer_size=buffer_size)
# IndexHNSWFlat supports L2 similarity only
# so we have to apply DOT -> L2 similairy space conversion with the help of an extra dimension
index = faiss.IndexHNSWFlat(vector_sz + 1, store_n)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
self.phi = 0
def index_data(self, data: np.array):
n = len(data)
# max norm is required before putting all vectors in the index to convert inner product similarity to L2
if self.phi > 0:
raise RuntimeError(
"DPR HNSWF index needs to index all data at once,"
"results will be unpredictable otherwise."
)
phi = 0
for i, item in enumerate(data):
doc_vector = item
norms = (doc_vector ** 2).sum()
phi = max(phi, norms)
logger.info("HNSWF DotProduct -> L2 space phi={}".format(phi))
self.phi = 0
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
norms = [(doc_vector ** 2).sum() for doc_vector in vectors]
aux_dims = [np.sqrt(phi - norm) for norm in norms]
hnsw_vectors = [
np.hstack((doc_vector, aux_dims[i].reshape(-1, 1)))
for i, doc_vector in enumerate(vectors)
]
hnsw_vectors = np.concatenate(hnsw_vectors, axis=0)
self.index.add(hnsw_vectors)
cnt += self.buffer_size
logger.info("Indexed data %d" % cnt)
logger.info("Total data indexed %d" % n)
def search_knn(self, query_vectors, top_k):
aux_dim = np.zeros(len(query_vectors), dtype="float32")
query_nhsw_vectors = np.hstack((query_vectors, aux_dim.reshape(-1, 1)))
logger.info("query_hnsw_vectors %s", query_nhsw_vectors.shape)
scores, indexes = self.index.search(query_nhsw_vectors, top_k)
return scores, indexes
def deserialize_from(self, file: str):
super(DenseHNSWFlatIndexer, self).deserialize_from(file)
# to trigger warning on subsequent indexing
self.phi = 1
|
BLINK-main
|
blink/indexer/faiss_indexer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import json
import sys
from elq.index.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer, DenseIVFFlatIndexer
import logging
import torch
import numpy as np
from colorama import init
from termcolor import colored
import torch.nn.functional as F
import blink.ner as NER
from torch.utils.data import DataLoader, SequentialSampler, TensorDataset
from elq.biencoder.biencoder import BiEncoderRanker, load_biencoder, to_bert_input
from elq.biencoder.data_process import (
process_mention_data,
get_context_representation_single_mention,
get_candidate_representation,
)
import elq.candidate_ranking.utils as utils
import math
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
from elq.biencoder.utils import batch_reshape_mask_left
import os
import sys
from tqdm import tqdm
import pdb
import time
HIGHLIGHTS = [
"on_red",
"on_green",
"on_yellow",
"on_blue",
"on_magenta",
"on_cyan",
]
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
def _print_colorful_text(input_tokens, tokenizer, pred_triples):
"""
pred_triples:
Assumes no overlapping triples
"""
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
init() # colorful output
msg = ""
if pred_triples and (len(pred_triples) > 0):
msg += tokenizer.decode(input_tokens[0 : int(pred_triples[sort_idxs[0]][1])])
for i, idx in enumerate(sort_idxs):
triple = pred_triples[idx]
msg += " " + colored(
tokenizer.decode(input_tokens[int(triple[1]) : int(triple[2])]),
"grey",
HIGHLIGHTS[idx % len(HIGHLIGHTS)],
)
if i < len(sort_idxs) - 1:
msg += " " + tokenizer.decode(input_tokens[
int(triple[2]) : int(pred_triples[sort_idxs[i + 1]][1])
])
else:
msg += " " + tokenizer.decode(input_tokens[int(triple[2]) : ])
else:
msg = tokenizer.decode(input_tokens)
print("\n" + str(msg) + "\n")
def _print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata):
sort_idxs = sorted(range(len(pred_triples)), key=lambda idx: pred_triples[idx][1])
for idx in sort_idxs:
print(colored(all_entity_preds[0]['pred_tuples_string'][idx][1], "grey", HIGHLIGHTS[idx % len(HIGHLIGHTS)]))
if pred_triples[idx][0] in id2wikidata:
print(" Wikidata ID: {}".format(id2wikidata[pred_triples[idx][0]]))
print(" Title: {}".format(all_entity_preds[0]['pred_tuples_string'][idx][0]))
print(" Score: {}".format(str(all_entity_preds[0]['scores'][idx])))
print(" Triple: {}".format(str(pred_triples[idx])))
print(" Text: {}".format(id2text[pred_triples[idx][0]]))
def _load_candidates(
entity_catalogue, entity_encoding,
faiss_index="none", index_path=None,
logger=None,
):
if faiss_index == "none":
candidate_encoding = torch.load(entity_encoding)
indexer = None
else:
candidate_encoding = None
assert index_path is not None, "Error! Empty indexer path."
if faiss_index == "flat":
indexer = DenseFlatIndexer(1)
elif faiss_index == "hnsw":
indexer = DenseHNSWFlatIndexer(1)
elif faiss_index == "ivfflat":
indexer = DenseIVFFlatIndexer(1)
else:
raise ValueError("Error! Unsupported indexer type! Choose from flat,hnsw,ivfflat.")
indexer.deserialize_from(index_path)
candidate_encoding = torch.load(entity_encoding)
if not os.path.exists("models/id2title.json"):
id2title = {}
id2text = {}
id2wikidata = {}
local_idx = 0
with open(entity_catalogue, "r") as fin:
lines = fin.readlines()
for line in lines:
entity = json.loads(line)
id2title[str(local_idx)] = entity["title"]
id2text[str(local_idx)] = entity["text"]
if "kb_idx" in entity:
id2wikidata[str(local_idx)] = entity["kb_idx"]
local_idx += 1
json.dump(id2title, open("models/id2title.json", "w"))
json.dump(id2text, open("models/id2text.json", "w"))
json.dump(id2wikidata, open("models/id2wikidata.json", "w"))
else:
if logger: logger.info("Loading id2title")
id2title = json.load(open("models/id2title.json"))
if logger: logger.info("Loading id2text")
id2text = json.load(open("models/id2text.json"))
if logger: logger.info("Loading id2wikidata")
id2wikidata = json.load(open("models/id2wikidata.json"))
return (
candidate_encoding, indexer,
id2title, id2text, id2wikidata,
)
def _get_test_samples(
test_filename, test_entities_path, logger,
):
"""
Parses jsonl format with one example per line
Each line of the following form
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if logger: logger.info("Loading test samples")
test_samples = []
unknown_entity_samples = []
num_unknown_entity_samples = 0
num_no_gold_entity = 0
ner_errors = 0
with open(test_filename, "r") as fin:
lines = fin.readlines()
sample_idx = 0
do_setup_samples = True
for i, line in enumerate(lines):
record = json.loads(line)
test_samples.append(record)
return test_samples, num_unknown_entity_samples
def _process_biencoder_dataloader(samples, tokenizer, biencoder_params, logger):
"""
Samples: list of examples, each of the form--
IF HAVE LABELS
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
"mentions": [[19, 23], [7, 15]],
"tokenized_text_ids": [2040, 2003, 3099, 1997, 4058, 2249, 1029],
"tokenized_mention_idxs": [[4, 5], [2, 3]],
"label_id": [10902, 28422],
"wikidata_id": ["Q1397", "Q132050"],
"entity": ["Ohio", "Governor"],
"label": [list of wikipedia descriptions]
}
IF NO LABELS (JUST PREDICTION)
{
"id": "WebQTest-12",
"text": "who is governor of ohio 2011?",
}
"""
if 'label_id' in samples[0]:
# have labels
tokens_data, tensor_data_tuple, _ = process_mention_data(
samples=samples,
tokenizer=tokenizer,
max_context_length=biencoder_params["max_context_length"],
max_cand_length=biencoder_params["max_cand_length"],
silent=False,
logger=logger,
debug=biencoder_params["debug"],
add_mention_bounds=(not biencoder_params.get("no_mention_bounds", False)),
params=biencoder_params,
)
else:
samples_text_tuple = []
max_seq_len = 0
for sample in samples:
samples_text_tuple
# truncate the end if the sequence is too long...
encoded_sample = [101] + tokenizer.encode(sample['text'])[:biencoder_params["max_context_length"]-2] + [102]
max_seq_len = max(len(encoded_sample), max_seq_len)
samples_text_tuple.append(encoded_sample + [0 for _ in range(biencoder_params["max_context_length"] - len(encoded_sample))])
# print(samples_text_tuple)
tensor_data_tuple = [torch.tensor(samples_text_tuple)]
tensor_data = TensorDataset(*tensor_data_tuple)
sampler = SequentialSampler(tensor_data)
dataloader = DataLoader(
tensor_data, sampler=sampler, batch_size=biencoder_params["eval_batch_size"]
)
return dataloader
def _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples,
num_cand_mentions=50, num_cand_entities=10,
device="cpu", sample_to_all_context_inputs=None,
threshold=0.0, indexer=None,
):
"""
Returns: tuple
labels (List[int]) [(max_num_mentions_gold) x exs]: gold labels -- returns None if no labels
nns (List[Array[int]]) [(# of pred mentions, cands_per_mention) x exs]: predicted entity IDs in each example
dists (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: scores of each entity in nns
pred_mention_bounds (List[Array[int]]) [(# of pred mentions, 2) x exs]: predicted mention boundaries in each examples
mention_scores (List[Array[float]]) [(# of pred mentions,) x exs]: mention score logit
cand_scores (List[Array[float]]) [(# of pred mentions, cands_per_mention) x exs]: candidate score logit
"""
biencoder.model.eval()
biencoder_model = biencoder.model
if hasattr(biencoder.model, "module"):
biencoder_model = biencoder.model.module
context_inputs = []
nns = []
dists = []
mention_dists = []
pred_mention_bounds = []
mention_scores = []
cand_scores = []
sample_idx = 0
ctxt_idx = 0
label_ids = None
for step, batch in enumerate(tqdm(dataloader)):
context_input = batch[0].to(device)
mask_ctxt = context_input != biencoder.NULL_IDX
with torch.no_grad():
context_outs = biencoder.encode_context(
context_input, num_cand_mentions=num_cand_mentions, topK_threshold=threshold,
)
embedding_ctxt = context_outs['mention_reps']
left_align_mask = context_outs['mention_masks']
chosen_mention_logits = context_outs['mention_logits']
chosen_mention_bounds = context_outs['mention_bounds']
'''
GET TOP CANDIDATES PER MENTION
'''
# (all_pred_mentions_batch, embed_dim)
embedding_ctxt = embedding_ctxt[left_align_mask]
if indexer is None:
try:
cand_logits, _, _ = biencoder.score_candidate(
context_input, None,
text_encs=embedding_ctxt,
cand_encs=candidate_encoding.to(device),
)
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = cand_logits.topk(num_cand_entities, dim=-1, sorted=True)
except:
# for memory savings, go through one chunk of candidates at a time
SPLIT_SIZE=1000000
done=False
while not done:
top_cand_logits_list = []
top_cand_indices_list = []
max_chunk = int(len(candidate_encoding) / SPLIT_SIZE)
for chunk_idx in range(max_chunk):
try:
# DIM (num_total_mentions, num_cand_entities); (num_total_mention, num_cand_entities)
top_cand_logits, top_cand_indices = embedding_ctxt.mm(candidate_encoding[chunk_idx*SPLIT_SIZE:(chunk_idx+1)*SPLIT_SIZE].to(device).t().contiguous()).topk(10, dim=-1, sorted=True)
top_cand_logits_list.append(top_cand_logits)
top_cand_indices_list.append(top_cand_indices + chunk_idx*SPLIT_SIZE)
if len((top_cand_indices_list[chunk_idx] < 0).nonzero()) > 0:
import pdb
pdb.set_trace()
except:
SPLIT_SIZE = int(SPLIT_SIZE/2)
break
if len(top_cand_indices_list) == max_chunk:
# DIM (num_total_mentions, num_cand_entities); (num_total_mentions, num_cand_entities) -->
# top_top_cand_indices_shape indexes into top_cand_indices
top_cand_logits_shape, top_top_cand_indices_shape = torch.cat(
top_cand_logits_list, dim=-1).topk(num_cand_entities, dim=-1, sorted=True)
# make indices index into candidate_encoding
# DIM (num_total_mentions, max_chunk*num_cand_entities)
all_top_cand_indices = torch.cat(top_cand_indices_list, dim=-1)
# DIM (num_total_mentions, num_cand_entities)
top_cand_indices_shape = all_top_cand_indices.gather(-1, top_top_cand_indices_shape)
done = True
else:
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = indexer.search_knn(embedding_ctxt.cpu().numpy(), num_cand_entities)
top_cand_logits_shape = torch.tensor(top_cand_logits_shape).to(embedding_ctxt.device)
top_cand_indices_shape = torch.tensor(top_cand_indices_shape).to(embedding_ctxt.device)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
top_cand_logits = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1), top_cand_logits_shape.size(-1)).to(
top_cand_logits_shape.device, top_cand_logits_shape.dtype)
top_cand_logits[left_align_mask] = top_cand_logits_shape
top_cand_indices = torch.zeros(chosen_mention_logits.size(0), chosen_mention_logits.size(1), top_cand_indices_shape.size(-1)).to(
top_cand_indices_shape.device, top_cand_indices_shape.dtype)
top_cand_indices[left_align_mask] = top_cand_indices_shape
'''
COMPUTE FINAL SCORES FOR EACH CAND-MENTION PAIR + PRUNE USING IT
'''
# Has NAN for impossible mentions...
# log p(entity && mb) = log [p(entity|mention bounds) * p(mention bounds)] = log p(e|mb) + log p(mb)
# DIM (bs, max_num_pred_mentions, num_cand_entities)
scores = torch.log_softmax(top_cand_logits, -1) + torch.sigmoid(chosen_mention_logits.unsqueeze(-1)).log()
'''
DON'T NEED TO RESORT BY NEW SCORE -- DISTANCE PRESERVING (largest entity score still be largest entity score)
'''
for idx in range(len(batch[0])):
# [(seqlen) x exs] <= (bsz, seqlen)
context_inputs.append(context_input[idx][mask_ctxt[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
nns.append(top_cand_indices[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
dists.append(scores[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, 2) x exs] <= (bsz, max_num_mentions=num_cand_mentions, 2)
pred_mention_bounds.append(chosen_mention_bounds[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions,) x exs] <= (bsz, max_num_mentions=num_cand_mentions)
mention_scores.append(chosen_mention_logits[idx][left_align_mask[idx]].data.cpu().numpy())
# [(max_num_mentions, cands_per_mention) x exs] <= (bsz, max_num_mentions=num_cand_mentions, cands_per_mention)
cand_scores.append(top_cand_logits[idx][left_align_mask[idx]].data.cpu().numpy())
return nns, dists, pred_mention_bounds, mention_scores, cand_scores
def get_predictions(
args, dataloader, biencoder_params, samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=-2.9, mention_threshold=-0.6931,
):
"""
Arguments:
args, dataloader, biencoder_params, samples, nns, dists, pred_mention_bounds
Returns:
all_entity_preds,
num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
"""
# save biencoder predictions and print precision/recalls
num_correct_weak = 0
num_correct_strong = 0
num_predicted = 0
num_gold = 0
num_correct_weak_from_input_window = 0
num_correct_strong_from_input_window = 0
num_gold_from_input_window = 0
all_entity_preds = []
f = errors_f = None
if getattr(args, 'save_preds_dir', None) is not None:
save_biencoder_file = os.path.join(args.save_preds_dir, 'biencoder_outs.jsonl')
f = open(save_biencoder_file, 'w')
errors_f = open(os.path.join(args.save_preds_dir, 'biencoder_errors.jsonl'), 'w')
# nns (List[Array[int]]) [(num_pred_mentions, cands_per_mention) x exs])
# dists (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# pred_mention_bounds (List[Array[int]]) [(num_pred_mentions, 2) x exs]
# cand_scores (List[Array[float]]) [(num_pred_mentions, cands_per_mention) x exs])
# mention_scores (List[Array[float]]) [(num_pred_mentions,) x exs])
for batch_num, batch_data in enumerate(dataloader):
batch_context = batch_data[0]
if len(batch_data) > 1:
_, batch_cands, batch_label_ids, batch_mention_idxs, batch_mention_idx_masks = batch_data
for b in range(len(batch_context)):
i = batch_num * biencoder_params['eval_batch_size'] + b
sample = samples[i]
input_context = batch_context[b][batch_context[b] != 0].tolist() # filter out padding
# (num_pred_mentions, cands_per_mention)
scores = dists[i] if args.threshold_type == "joint" else cand_scores[i]
cands_mask = (scores[:,0] == scores[:,0])
pred_entity_list = nns[i][cands_mask]
if len(pred_entity_list) > 0:
e_id = pred_entity_list[0]
distances = scores[cands_mask]
# (num_pred_mentions, 2)
entity_mention_bounds_idx = pred_mention_bounds[i][cands_mask]
utterance = sample['text']
if args.threshold_type == "joint":
# THRESHOLDING
assert utterance is not None
top_mentions_mask = (distances[:,0] > threshold)
elif args.threshold_type == "top_entity_by_mention":
top_mentions_mask = (mention_scores[i] > mention_threshold)
elif args.threshold_type == "thresholded_entity_by_mention":
top_mentions_mask = (distances[:,0] > threshold) & (mention_scores[i] > mention_threshold)
_, sort_idxs = torch.tensor(distances[:,0][top_mentions_mask]).sort(descending=True)
# cands already sorted by score
all_pred_entities = pred_entity_list[:,0][top_mentions_mask]
e_mention_bounds = entity_mention_bounds_idx[top_mentions_mask]
chosen_distances = distances[:,0][top_mentions_mask]
if len(all_pred_entities) >= 2:
all_pred_entities = all_pred_entities[sort_idxs]
e_mention_bounds = e_mention_bounds[sort_idxs]
chosen_distances = chosen_distances[sort_idxs]
# prune mention overlaps
e_mention_bounds_pruned = []
all_pred_entities_pruned = []
chosen_distances_pruned = []
mention_masked_utterance = np.zeros(len(input_context))
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(e_mention_bounds):
mb[1] += 1 # prediction was inclusive, now make exclusive
# check if in existing mentions
if args.threshold_type != "top_entity_by_mention" and mention_masked_utterance[mb[0]:mb[1]].sum() >= 1:
continue
e_mention_bounds_pruned.append(mb)
all_pred_entities_pruned.append(all_pred_entities[idx])
chosen_distances_pruned.append(float(chosen_distances[idx]))
mention_masked_utterance[mb[0]:mb[1]] = 1
input_context = input_context[1:-1] # remove BOS and sep
pred_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]) - 1, # -1 for BOS
int(e_mention_bounds_pruned[j][1]) - 1,
) for j in range(len(all_pred_entities_pruned))]
entity_results = {
"id": sample["id"],
"text": sample["text"],
"scores": chosen_distances_pruned,
}
if 'label_id' in sample:
# Get LABELS
input_mention_idxs = batch_mention_idxs[b][batch_mention_idx_masks[b]].tolist()
input_label_ids = batch_label_ids[b][batch_label_ids[b] != -1].tolist()
assert len(input_label_ids) == len(input_mention_idxs)
gold_mention_bounds = [
sample['text'][ment[0]-10:ment[0]] + "[" + sample['text'][ment[0]:ment[1]] + "]" + sample['text'][ment[1]:ment[1]+10]
for ment in sample['mentions']
]
# GET ALIGNED MENTION_IDXS (input is slightly different to model) between ours and gold labels -- also have to account for BOS
gold_input = sample['tokenized_text_ids']
# return first instance of my_input in gold_input
for my_input_start in range(len(gold_input)):
if (
gold_input[my_input_start] == input_context[0] and
gold_input[my_input_start:my_input_start+len(input_context)] == input_context
):
break
# add alignment factor (my_input_start) to predicted mention triples
pred_triples = [(
triple[0],
triple[1] + my_input_start, triple[2] + my_input_start,
) for triple in pred_triples]
gold_triples = [(
str(sample['label_id'][j]),
sample['tokenized_mention_idxs'][j][0], sample['tokenized_mention_idxs'][j][1],
) for j in range(len(sample['label_id']))]
num_overlap_weak, num_overlap_strong = entity_linking_tp_with_overlap(gold_triples, pred_triples)
num_correct_weak += num_overlap_weak
num_correct_strong += num_overlap_strong
num_predicted += len(all_pred_entities_pruned)
num_gold += len(sample["label_id"])
# compute number correct given the input window
pred_input_window_triples = [(
str(all_pred_entities_pruned[j]),
int(e_mention_bounds_pruned[j][0]), int(e_mention_bounds_pruned[j][1]),
) for j in range(len(all_pred_entities_pruned))]
gold_input_window_triples = [(
str(input_label_ids[j]),
input_mention_idxs[j][0], input_mention_idxs[j][1] + 1,
) for j in range(len(input_label_ids))]
num_overlap_weak_window, num_overlap_strong_window = entity_linking_tp_with_overlap(gold_input_window_triples, pred_input_window_triples)
num_correct_weak_from_input_window += num_overlap_weak_window
num_correct_strong_from_input_window += num_overlap_strong_window
num_gold_from_input_window += len(input_mention_idxs)
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in pred_triples
],
"gold_tuples_string": [
[id2title[triple[0]], tokenizer.decode(sample['tokenized_text_ids'][triple[1]:triple[2]])]
for triple in gold_triples
],
"pred_triples": pred_triples,
"gold_triples": gold_triples,
"tokens": input_context,
})
if errors_f is not None and (num_overlap_weak != len(gold_triples) or num_overlap_weak != len(pred_triples)):
errors_f.write(json.dumps(entity_results) + "\n")
else:
entity_results.update({
"pred_tuples_string": [
[id2title[triple[0]], tokenizer.decode(input_context[triple[1]:triple[2]])]
for triple in pred_triples
],
"pred_triples": pred_triples,
"tokens": input_context,
})
all_entity_preds.append(entity_results)
if f is not None:
f.write(
json.dumps(entity_results) + "\n"
)
if f is not None:
f.close()
errors_f.close()
return (
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window
)
def _save_biencoder_outs(save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime):
np.save(os.path.join(save_preds_dir, "biencoder_nns.npy"), nns)
np.save(os.path.join(save_preds_dir, "biencoder_dists.npy"), dists)
np.save(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), pred_mention_bounds)
np.save(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), cand_scores)
np.save(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), mention_scores)
with open(os.path.join(save_preds_dir, "runtime.txt"), "w") as wf:
wf.write(str(runtime))
def _load_biencoder_outs(save_preds_dir):
nns = np.load(os.path.join(save_preds_dir, "biencoder_nns.npy"), allow_pickle=True)
dists = np.load(os.path.join(save_preds_dir, "biencoder_dists.npy"), allow_pickle=True)
pred_mention_bounds = np.load(os.path.join(save_preds_dir, "biencoder_mention_bounds.npy"), allow_pickle=True)
cand_scores = np.load(os.path.join(save_preds_dir, "biencoder_cand_scores.npy"), allow_pickle=True)
mention_scores = np.load(os.path.join(save_preds_dir, "biencoder_mention_scores.npy"), allow_pickle=True)
runtime = float(open(os.path.join(args.save_preds_dir, "runtime.txt")).read())
return nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime
def display_metrics(
num_correct, num_predicted, num_gold, prefix="",
):
p = 0 if num_predicted == 0 else float(num_correct) / float(num_predicted)
r = 0 if num_gold == 0 else float(num_correct) / float(num_gold)
if p + r > 0:
f1 = 2 * p * r / (p + r)
else:
f1 = 0
print("{0}precision = {1} / {2} = {3}".format(prefix, num_correct, num_predicted, p))
print("{0}recall = {1} / {2} = {3}".format(prefix, num_correct, num_gold, r))
print("{0}f1 = {1}".format(prefix, f1))
def load_models(args, logger):
# load biencoder model
if logger: logger.info("Loading biencoder model")
try:
with open(args.biencoder_config) as json_file:
biencoder_params = json.load(json_file)
except json.decoder.JSONDecodeError:
with open(args.biencoder_config) as json_file:
for line in json_file:
line = line.replace("'", "\"")
line = line.replace("True", "true")
line = line.replace("False", "false")
line = line.replace("None", "null")
biencoder_params = json.loads(line)
break
biencoder_params["path_to_model"] = args.biencoder_model
biencoder_params["cand_token_ids_path"] = args.cand_token_ids_path
biencoder_params["eval_batch_size"] = getattr(args, 'eval_batch_size', 8)
biencoder_params["no_cuda"] = (not getattr(args, 'use_cuda', False) or not torch.cuda.is_available())
if biencoder_params["no_cuda"]:
biencoder_params["data_parallel"] = False
biencoder_params["load_cand_enc_only"] = False
if getattr(args, 'max_context_length', None) is not None:
biencoder_params["max_context_length"] = args.max_context_length
biencoder = load_biencoder(biencoder_params)
if biencoder_params["no_cuda"] and type(biencoder.model).__name__ == 'DataParallel':
biencoder.model = biencoder.model.module
elif not biencoder_params["no_cuda"] and type(biencoder.model).__name__ != 'DataParallel':
biencoder.model = torch.nn.DataParallel(biencoder.model)
# load candidate entities
if logger: logger.info("Loading candidate entities")
(
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
) = _load_candidates(
args.entity_catalogue, args.entity_encoding,
args.faiss_index, args.index_path, logger=logger,
)
return (
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
)
def run(
args,
logger,
biencoder,
biencoder_params,
candidate_encoding,
indexer,
id2title,
id2text,
id2wikidata,
test_data=None,
):
if not test_data and not getattr(args, 'test_mentions', None) and not getattr(args, 'interactive', None):
msg = (
"ERROR: either you start BLINK with the "
"interactive option (-i) or you pass in input test mentions (--test_mentions)"
"and test entities (--test_entities) or manually pass in test data"
)
raise ValueError(msg)
if getattr(args, 'save_preds_dir', None) is not None and not os.path.exists(args.save_preds_dir):
os.makedirs(args.save_preds_dir)
print("Saving preds in {}".format(args.save_preds_dir))
stopping_condition = False
threshold = float(args.threshold)
if args.threshold_type == "top_entity_by_mention":
assert args.mention_threshold is not None
mention_threshold = float(args.mention_threshold)
else:
mention_threshold = threshold
if args.interactive:
while not stopping_condition:
if logger: logger.info("interactive mode")
# Interactive
text = input("insert text: ")
# Prepare data
samples = [{"id": "-1", "text": text}]
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, logger,
)
# Run inference
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
action = "c"
while action == "c":
all_entity_preds = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)[0]
pred_triples = all_entity_preds[0]['pred_triples']
_print_colorful_text(all_entity_preds[0]['tokens'], tokenizer, pred_triples)
_print_colorful_prediction(all_entity_preds, pred_triples, id2text, id2wikidata)
action = input("Next question [n] / change threshold [c]: ")
while action != "n" and action != "c":
action = input("Next question [n] / change threshold [c]: ")
if action == "c":
print("Current threshold {}".format(threshold))
while True:
threshold = input("New threshold (increase for less cands, decrease for more cands): ")
try:
threshold = float(threshold)
break
except:
print("Error! Expected float, got {}. Try again.".format(threshold))
else:
if not test_data:
samples, num_unk = _get_test_samples(
args.test_mentions, args.test_entities, logger,
)
else:
samples = test_data
if logger: logger.info("Preparing data for biencoder")
dataloader = _process_biencoder_dataloader(
samples, biencoder.tokenizer, biencoder_params, None,
)
stopping_condition = True
# prepare the data for biencoder
# run biencoder if predictions not saved
if not getattr(args, 'save_preds_dir', None) or not os.path.exists(
os.path.join(args.save_preds_dir, 'biencoder_mention_bounds.npy')):
# run biencoder
if logger: logger.info("Running biencoder...")
start_time = time.time()
nns, dists, pred_mention_bounds, mention_scores, cand_scores = _run_biencoder(
args, biencoder, dataloader, candidate_encoding, samples=samples,
num_cand_mentions=args.num_cand_mentions, num_cand_entities=args.num_cand_entities,
device="cpu" if biencoder_params["no_cuda"] else "cuda",
threshold=mention_threshold, indexer=indexer,
)
end_time = time.time()
if logger: logger.info("Finished running biencoder")
runtime = end_time - start_time
if getattr(args, 'save_preds_dir', None):
_save_biencoder_outs(
args.save_preds_dir, nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime,
)
else:
nns, dists, pred_mention_bounds, cand_scores, mention_scores, runtime = _load_biencoder_outs(args.save_preds_dir)
assert len(samples) == len(nns) == len(dists) == len(pred_mention_bounds) == len(cand_scores) == len(mention_scores)
(
all_entity_preds, num_correct_weak, num_correct_strong, num_predicted, num_gold,
num_correct_weak_from_input_window, num_correct_strong_from_input_window, num_gold_from_input_window,
) = get_predictions(
args, dataloader, biencoder_params,
samples, nns, dists, mention_scores, cand_scores,
pred_mention_bounds, id2title, threshold=threshold,
mention_threshold=mention_threshold,
)
print("*--------*")
if num_gold > 0:
print("WEAK MATCHING")
display_metrics(num_correct_weak, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_weak_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("STRONG MATCHING")
display_metrics(num_correct_strong, num_predicted, num_gold)
print("Just entities within input window...")
display_metrics(num_correct_strong_from_input_window, num_predicted, num_gold_from_input_window)
print("*--------*")
print("biencoder runtime = {}".format(runtime))
print("*--------*")
return all_entity_preds
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--debug_biencoder", "-db", action="store_true", default=False, help="Debug biencoder"
)
# evaluation mode
parser.add_argument(
"--get_predictions", "-p", action="store_true", default=False, help="Getting predictions mode. Does not filter at crossencoder step."
)
parser.add_argument(
"--interactive", "-i", action="store_true", help="Interactive mode."
)
# test_data
parser.add_argument(
"--test_mentions", dest="test_mentions", type=str, help="Test Dataset."
)
parser.add_argument(
"--test_entities", dest="test_entities", type=str, help="Test Entities.",
default="models/entity.jsonl", # ALL WIKIPEDIA!
)
parser.add_argument(
"--save_preds_dir", type=str, help="Directory to save model predictions to."
)
parser.add_argument(
"--mention_threshold", type=str, default=None,
dest="mention_threshold",
help="Used if threshold type is `top_entity_by_mention`. "
"Threshold for mention score, for which mentions will be pruned if they fall under that threshold. "
"Set to '-inf' to get all mentions."
)
parser.add_argument(
"--threshold", type=str, default="-4.5",
dest="threshold",
help="Threshold for final joint score, for which examples will be pruned if they fall under that threshold. "
"Set to `-inf` to get all entities."
)
parser.add_argument(
"--num_cand_mentions", type=int, default=50, help="Number of mention candidates to consider per example (at most)"
)
parser.add_argument(
"--num_cand_entities", type=int, default=10, help="Number of entity candidates to consider per mention (at most)"
)
parser.add_argument(
"--threshold_type", type=str, default="joint",
choices=["joint", "top_entity_by_mention"],
help="How to threshold the final candidates. "
"`top_entity_by_mention`: get top candidate (with entity score) for each predicted mention bound. "
"`joint`: by thresholding joint score."
)
# biencoder
parser.add_argument(
"--biencoder_model",
dest="biencoder_model",
type=str,
default="models/elq_wiki_large.bin",
help="Path to the biencoder model.",
)
parser.add_argument(
"--biencoder_config",
dest="biencoder_config",
type=str,
default="models/elq_large_params.txt",
help="Path to the biencoder configuration.",
)
parser.add_argument(
"--cand_token_ids_path",
dest="cand_token_ids_path",
type=str,
default="models/entity_token_ids_128.t7", # ALL WIKIPEDIA!
help="Path to tokenized entity catalogue",
)
parser.add_argument(
"--entity_catalogue",
dest="entity_catalogue",
type=str,
default="models/entity.jsonl", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--entity_encoding",
dest="entity_encoding",
type=str,
default="models/all_entities_large.t7", # ALL WIKIPEDIA!
help="Path to the entity catalogue.",
)
parser.add_argument(
"--eval_batch_size",
dest="eval_batch_size",
type=int,
default=8,
help="Crossencoder's batch size for evaluation",
)
parser.add_argument(
"--faiss_index",
dest="faiss_index",
type=str,
default="hnsw",
choices=["hnsw", "flat", "ivfflat", "none"],
help="whether to use faiss index",
)
parser.add_argument(
"--index_path",
dest="index_path",
type=str,
default="models/faiss_hnsw_index.pkl",
help="path to load indexer",
)
parser.add_argument(
"--max_context_length",
dest="max_context_length",
type=int,
help="Maximum length of context. (Don't set to inherit from training config)",
)
# output folder
parser.add_argument(
"--output_path",
dest="output_path",
type=str,
default="output",
help="Path to the output.",
)
parser.add_argument(
"--use_cuda", dest="use_cuda", action="store_true", default=False, help="run on gpu"
)
parser.add_argument(
"--no_logger", dest="no_logger", action="store_true", default=False, help="don't log progress"
)
args = parser.parse_args()
logger = None
if not args.no_logger:
logger = utils.get_logger(args.output_path)
logger.setLevel(10)
models = load_models(args, logger)
run(args, logger, *models)
|
BLINK-main
|
elq/main_dense.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import logging
import numpy
import os
import time
import torch
from elq.index.faiss_indexer import DenseFlatIndexer, DenseIVFFlatIndexer, DenseHNSWFlatIndexer
import elq.candidate_ranking.utils as utils
logger = utils.get_logger()
def main(params):
output_path = params["output_path"]
logger.info("Loading candidate encoding from path: %s" % params["candidate_encoding"])
candidate_encoding = torch.load(params["candidate_encoding"])
vector_size = candidate_encoding.size(1)
index_buffer = params["index_buffer"]
if params["faiss_index"] == "hnsw":
logger.info("Using HNSW index in FAISS")
index = DenseHNSWFlatIndexer(vector_size, index_buffer)
elif params["faiss_index"] == "ivfflat":
logger.info("Using IVF Flat index in FAISS")
index = DenseIVFFlatIndexer(vector_size, 75, 100)
else:
logger.info("Using Flat index in FAISS")
index = DenseFlatIndexer(vector_size, index_buffer)
logger.info("Building index.")
index.index_data(candidate_encoding.numpy())
logger.info("Done indexing data.")
if params.get("save_index", None):
index.serialize(output_path)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--output_path",
required=True,
type=str,
help="output file path",
)
parser.add_argument(
"--candidate_encoding",
default="models/all_entities_large.t7",
type=str,
help="file path for candidte encoding.",
)
parser.add_argument(
"--faiss_index", type=str, choices=["hnsw", "flat", "ivfflat"],
help='Which faiss index to use',
)
parser.add_argument(
"--save_index", action='store_true',
help='If enabled, save index',
)
parser.add_argument(
'--index_buffer', type=int, default=50000,
help="Temporal memory data buffer size (in samples) for indexer",
)
params = parser.parse_args()
params = params.__dict__
main(params)
|
BLINK-main
|
elq/build_faiss_index.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Code partially adopted from https://github.com/allenai/allennlp
#
from typing import Any, Dict, List, Optional, Sequence, Tuple, TypeVar, Union
import torch
def get_device_of(tensor: torch.Tensor) -> int:
"""
Returns the device of the tensor.
"""
if not tensor.is_cuda:
return -1
else:
return tensor.get_device()
def get_range_vector(size: int, device: int) -> torch.Tensor:
"""
Returns a range vector with the desired size, starting at 0. The CUDA implementation
is meant to avoid copy data from CPU to GPU.
"""
if device > -1:
return torch.cuda.LongTensor(size, device=device).fill_(1).cumsum(0) - 1
else:
return torch.arange(0, size, dtype=torch.long)
def flatten_and_batch_shift_indices(indices: torch.Tensor, sequence_length: int) -> torch.Tensor:
"""
This is a subroutine for [`batched_index_select`](./util.md#batched_index_select).
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into dimension 2 of a
target tensor, which has size `(batch_size, sequence_length, embedding_size)`. This
function returns a vector that correctly indexes into the flattened target. The sequence
length of the target must be provided to compute the appropriate offsets.
```python
indices = torch.ones([2,3], dtype=torch.long)
# Sequence length of the target tensor.
sequence_length = 10
shifted_indices = flatten_and_batch_shift_indices(indices, sequence_length)
# Indices into the second element in the batch are correctly shifted
# to take into account that the target tensor will be flattened before
# the indices are applied.
assert shifted_indices == [1, 1, 1, 11, 11, 11]
```
# Parameters
indices : `torch.LongTensor`, required.
sequence_length : `int`, required.
The length of the sequence the indices index into.
This must be the second dimension of the tensor.
# Returns
offset_indices : `torch.LongTensor`
"""
# Shape: (batch_size)
if torch.max(indices) >= sequence_length or torch.min(indices) < 0:
raise IndexError(
f"All elements in indices should be in range (0, {sequence_length - 1})"
)
offsets = get_range_vector(indices.size(0), get_device_of(indices)) * sequence_length
for _ in range(len(indices.size()) - 1):
offsets = offsets.unsqueeze(1)
# Shape: (batch_size, d_1, ..., d_n)
offset_indices = indices + offsets
# Shape: (batch_size * d_1 * ... * d_n)
offset_indices = offset_indices.view(-1)
return offset_indices
def batched_index_select(
target: torch.Tensor,
indices: torch.LongTensor,
flattened_indices: Optional[torch.LongTensor] = None,
) -> torch.Tensor:
"""
The given `indices` of size `(batch_size, d_1, ..., d_n)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns selected values in the target with respect to the provided indices, which
have size `(batch_size, d_1, ..., d_n, embedding_size)`. This can use the optionally
precomputed `flattened_indices` with size `(batch_size * d_1 * ... * d_n)` if given.
An example use case of this function is looking up the start and end indices of spans in a
sequence tensor. This is used in the
[CoreferenceResolver](../models/coreference_resolution/coref.md). Model to select
contextual word representations corresponding to the start and end indices of mentions. The key
reason this can't be done with basic torch functions is that we want to be able to use look-up
tensors with an arbitrary number of dimensions (for example, in the coref model, we don't know
a-priori how many spans we are looking up).
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A tensor of shape (batch_size, ...), where each element is an index into the
`sequence_length` dimension of the `target` tensor.
flattened_indices : Optional[torch.Tensor], optional (default = None)
An optional tensor representing the result of calling `flatten_and_batch_shift_indices`
on `indices`. This is helpful in the case that the indices can be flattened once and
cached for many batch lookups.
# Returns
selected_targets : `torch.Tensor`
A tensor with shape [indices.size(), target.size(-1)] representing the embedded indices
extracted from the batch flattened target tensor.
"""
if flattened_indices is None:
# Shape: (batch_size * d_1 * ... * d_n)
try:
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
except:
print("indices: {}".format(indices))
print("target: {}".format(target))
flattened_indices = flatten_and_batch_shift_indices(indices, target.size(1))
# Shape: (batch_size * sequence_length, embedding_size)
flattened_target = target.view(-1, target.size(-1))
# Shape: (batch_size * d_1 * ... * d_n, embedding_size)
flattened_selected = flattened_target.index_select(0, flattened_indices)
selected_shape = list(indices.size()) + [target.size(-1)]
# Shape: (batch_size, d_1, ..., d_n, embedding_size)
selected_targets = flattened_selected.view(*selected_shape)
return selected_targets
def batched_span_select(target: torch.Tensor, spans: torch.LongTensor) -> torch.Tensor:
"""
The given `spans` of size `(batch_size, num_spans, 2)` indexes into the sequence
dimension (dimension 2) of the target, which has size `(batch_size, sequence_length,
embedding_size)`.
This function returns segmented spans in the target with respect to the provided span indices.
It does not guarantee element order within each span.
# Parameters
target : `torch.Tensor`, required.
A 3 dimensional tensor of shape (batch_size, sequence_length, embedding_size).
This is the tensor to be indexed.
indices : `torch.LongTensor`
A 3 dimensional tensor of shape (batch_size, num_spans, 2) representing start and end
indices (both inclusive) into the `sequence_length` dimension of the `target` tensor.
# Returns
span_embeddings : `torch.Tensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width, embedding_size]
representing the embedded spans extracted from the batch flattened target tensor.
span_mask: `torch.BoolTensor`
A tensor with shape (batch_size, num_spans, max_batch_span_width) representing the mask on
the returned span embeddings.
"""
# both of shape (batch_size, num_spans, 1)
span_starts, span_ends = spans.split(1, dim=-1)
# shape (batch_size, num_spans, 1)
# These span widths are off by 1, because the span ends are `inclusive`.
span_widths = span_ends - span_starts
# We need to know the maximum span width so we can
# generate indices to extract the spans from the sequence tensor.
# These indices will then get masked below, such that if the length
# of a given span is smaller than the max, the rest of the values
# are masked.
max_batch_span_width = span_widths.max().item() + 1
# Shape: (1, 1, max_batch_span_width)
max_span_range_indices = get_range_vector(max_batch_span_width, get_device_of(target)).view(
1, 1, -1
)
# Shape: (batch_size, num_spans, max_batch_span_width)
# This is a broadcasted comparison - for each span we are considering,
# we are creating a range vector of size max_span_width, but masking values
# which are greater than the actual length of the span.
#
# We're using <= here (and for the mask below) because the span ends are
# inclusive, so we want to include indices which are equal to span_widths rather
# than using it as a non-inclusive upper bound.
span_mask = max_span_range_indices <= span_widths
raw_span_indices = span_ends - max_span_range_indices
# We also don't want to include span indices which are less than zero,
# which happens because some spans near the beginning of the sequence
# have an end index < max_batch_span_width, so we add this to the mask here.
span_mask = span_mask & (raw_span_indices >= 0)
span_indices = torch.nn.functional.relu(raw_span_indices.float()).long()
# Shape: (batch_size, num_spans, max_batch_span_width, embedding_dim)
span_embeddings = batched_index_select(target, span_indices)
return span_embeddings, span_mask
|
BLINK-main
|
elq/biencoder/allennlp_span_utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import json
import logging
import torch
from tqdm import tqdm, trange
from torch.utils.data import DataLoader, TensorDataset
from pytorch_transformers.tokenization_bert import BertTokenizer
from blink.biencoder.zeshel_utils import world_to_id
from elq.common.params import ENT_START_TAG, ENT_END_TAG, ENT_TITLE_TAG
def select_field_with_padding(data, key1, key2=None, pad_idx=-1):
max_len = 0
selected_list = []
padding_mask = []
for example in data:
if key2 is None:
selected_list.append(example[key1])
max_len = max(max_len, len(example[key1]))
else:
selected_list.append(example[key1][key2])
max_len = max(max_len, len(example[key1][key2]))
for i, entry in enumerate(selected_list):
# pad to max len
pad_list = [1 for _ in range(len(entry))] + [0 for _ in range(max_len - len(entry))]
selected_list[i] += [pad_idx for _ in range(max_len - len(entry))]
assert len(pad_list) == max_len
assert len(selected_list[i]) == max_len
padding_mask.append(pad_list)
return selected_list, padding_mask
def select_field(data, key1, key2=None):
if key2 is None:
return [example[key1] for example in data]
else:
return [example[key1][key2] for example in data]
def get_context_representation_single_mention(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
add_mention_bounds=True,
):
mention_tokens = []
if sample[mention_key] and len(sample[mention_key]) > 0:
mention_tokens = tokenizer.tokenize(sample[mention_key])
to_subtract = 4 if add_mention_bounds else 2
if len(mention_tokens) > max_seq_length - to_subtract:
# -4 as 2 for ent_start and ent_end, 2 for [CLS] and [SEP]
mention_tokens = mention_tokens[:max_seq_length - to_subtract]
if add_mention_bounds:
mention_tokens = [ent_start_token] + mention_tokens + [ent_end_token]
context_left = sample[context_key + "_left"]
context_right = sample[context_key + "_right"]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
if left_quota <= 0:
context_left = []
if right_quota <= 0:
context_right = []
context_tokens = (
context_left[-left_quota:] + mention_tokens + context_right[:right_quota]
)
context_tokens = ["[CLS]"] + context_tokens + ["[SEP]"]
mention_idxs = [
len(context_left[-left_quota:]) + 1,
len(context_left[-left_quota:]) + len(mention_tokens) + 1,
]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": mention_idxs,
}
def get_context_representation_multiple_mentions_left_right(
sample,
tokenizer,
max_seq_length,
mention_key="mention",
context_key="context",
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
):
all_mentions = sample[mention_key]
all_context_lefts = sample[context_key + "_left"]
all_context_rights = sample[context_key + "_right"]
if len(all_mentions[0]) == 0 and len(all_context_lefts[0]) == 0 and len(all_context_rights[0]) == 0: # passed in empty string
context_tokens = ["[CLS]", "[SEP]"]
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": [],
}
mention_tokens = []
for mention in all_mentions:
if mention and len(mention) > 0:
mention_token = tokenizer.tokenize(mention)
if len(mention_token) > max_seq_length - 2:
# -2 for [CLS] and [SEP]
mention_token = mention_token[:max_seq_length - 2]
mention_tokens.append(mention_token)
mention_idxs = []
assert len(all_context_lefts) == len(all_context_rights)
assert len(all_context_rights) == len(all_mentions)
context_tokens = None
for c in range(len(all_context_lefts)):
context_left = all_context_lefts[c]
context_right = all_context_rights[c]
context_left = tokenizer.tokenize(context_left)
context_right = tokenizer.tokenize(context_right)
left_quota = (max_seq_length - len(mention_tokens[c])) // 2 - 1
right_quota = max_seq_length - len(mention_tokens[c]) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota:
if right_add > right_quota:
right_quota += left_quota - left_add
else:
if right_add <= right_quota:
left_quota += right_quota - right_add
if left_quota <= 0:
context_left = []
if right_quota <= 0:
context_right = []
context_tokens_itr = (
context_left[-left_quota:] + mention_tokens[c] + context_right[:right_quota]
)
context_tokens_itr = ["[CLS]"] + context_tokens_itr + ["[SEP]"]
if context_tokens is None:
context_tokens = context_tokens_itr
else:
try:
assert context_tokens == context_tokens_itr
except:
import pdb
pdb.set_trace()
mention_idxs.append([
len(context_left[-left_quota:]) + 1,
len(context_left[-left_quota:]) + len(mention_tokens[c]) + 1,
])
input_ids = tokenizer.convert_tokens_to_ids(context_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": context_tokens,
"ids": input_ids,
"mention_idxs": mention_idxs,
}
def sort_mentions(
lst, sort_map=None,
):
"""
sort_map: {orig_idx: idx in new "sorted" array}
"""
new_lst = [0 for _ in range(len(lst))]
for i in range(len(lst)):
new_lst[sort_map[i]] = lst[i]
return new_lst
def do_sort(
sample, orig_idx_to_sort_idx,
):
sample['mentions'] = sort_mentions(sample['mentions'], orig_idx_to_sort_idx)
sample['label_id'] = sort_mentions(sample['label_id'], orig_idx_to_sort_idx)
sample['wikidata_id'] = sort_mentions(sample['wikidata_id'], orig_idx_to_sort_idx)
sample['entity'] = sort_mentions(sample['entity'], orig_idx_to_sort_idx)
sample['label'] = sort_mentions(sample['label'], orig_idx_to_sort_idx)
def get_context_representation_multiple_mentions_idxs(
sample, tokenizer, max_seq_length,
mention_key, context_key, ent_start_token, ent_end_token,
):
'''
Also cuts out mentions beyond that context window
ASSUMES MENTION_IDXS ARE SORTED!!!!
Returns:
List of mention bounds that are [inclusive, exclusive) (make both inclusive later)
NOTE: 2nd index of mention bound may be outside of max_seq_length-range (must deal with later)
'''
mention_idxs = sample["tokenized_mention_idxs"]
input_ids = sample["tokenized_text_ids"]
# sort mentions / entities / everything associated
# [[orig_index, [start, end]], ....] --> sort by start, then end
sort_tuples = [[i[0], i[1]] for i in sorted(enumerate(mention_idxs), key=lambda x:(x[1][0], x[1][1]))]
if [tup[1] for tup in sort_tuples] != mention_idxs:
orig_idx_to_sort_idx = {itm[0]: i for i, itm in enumerate(sort_tuples)}
assert [tup[1] for tup in sort_tuples] == sort_mentions(mention_idxs, orig_idx_to_sort_idx)
mention_idxs = [tup[1] for tup in sort_tuples]
sample['tokenized_mention_idxs'] = mention_idxs
do_sort(sample, orig_idx_to_sort_idx)
# TODO SORT EVERYTHING
# fit leftmost mention, then all of the others that can reasonably fit...
all_mention_spans_range = [mention_idxs[0][0], mention_idxs[-1][1]]
while all_mention_spans_range[1] - all_mention_spans_range[0] + 2 > max_seq_length:
if len(mention_idxs) == 1:
# don't cut further
assert mention_idxs[0][1] - mention_idxs[0][0] + 2 > max_seq_length
# truncate mention
mention_idxs[0][1] = max_seq_length + mention_idxs[0][0] - 2
else:
# cut last mention
mention_idxs = mention_idxs[:len(mention_idxs) - 1]
all_mention_spans_range = [mention_idxs[0][0], mention_idxs[-1][1]]
context_left = input_ids[:all_mention_spans_range[0]]
all_mention_tokens = input_ids[all_mention_spans_range[0]:all_mention_spans_range[1]]
context_right = input_ids[all_mention_spans_range[1]:]
left_quota = (max_seq_length - len(all_mention_tokens)) // 2 - 1
right_quota = max_seq_length - len(all_mention_tokens) - left_quota - 2
left_add = len(context_left)
right_add = len(context_right)
if left_add <= left_quota: # tokens left to add <= quota ON THE LEFT
if right_add > right_quota: # add remaining quota to right quota
right_quota += left_quota - left_add
else:
if right_add <= right_quota: # tokens left to add <= quota ON THE RIGHT
left_quota += right_quota - right_add # add remaining quota to left quota
if left_quota <= 0:
left_quota = -len(context_left) # cut entire list (context_left = [])
if right_quota <= 0:
right_quota = 0 # cut entire list (context_right = [])
input_ids_window = context_left[-left_quota:] + all_mention_tokens + context_right[:right_quota]
# shift mention_idxs
if len(input_ids) <= max_seq_length - 2:
try:
assert input_ids == input_ids_window
except:
import pdb
pdb.set_trace()
else:
assert input_ids != input_ids_window
cut_from_left = len(context_left) - len(context_left[-left_quota:])
if cut_from_left > 0:
# must shift mention_idxs
for c in range(len(mention_idxs)):
mention_idxs[c] = [
mention_idxs[c][0] - cut_from_left, mention_idxs[c][1] - cut_from_left,
]
input_ids_window = [101] + input_ids_window + [102]
tokens = tokenizer.convert_ids_to_tokens(input_ids_window)
# +1 for CLS token
mention_idxs = [[mention[0]+1, mention[1]+1] for mention in mention_idxs]
# input_ids = tokenizer.convert_tokens_to_ids(input_ids_window)
padding = [0] * (max_seq_length - len(input_ids_window))
input_ids_window += padding
assert len(input_ids_window) == max_seq_length
return {
"tokens": tokens,
"ids": input_ids_window,
"mention_idxs": mention_idxs,
# "pruned_ents": [1 for i in range(len(all_mentions)) if i < len(mention_idxs) else 0], # pruned last N entities, TODO change if changed
}
def get_candidate_representation(
candidate_desc,
tokenizer,
max_seq_length,
candidate_title=None,
title_tag=ENT_TITLE_TAG,
):
cls_token = tokenizer.cls_token
sep_token = tokenizer.sep_token
cand_tokens = tokenizer.tokenize(candidate_desc)
if candidate_title is not None:
title_tokens = tokenizer.tokenize(candidate_title)
cand_tokens = title_tokens + [title_tag] + cand_tokens
cand_tokens = cand_tokens[: max_seq_length - 2]
cand_tokens = [cls_token] + cand_tokens + [sep_token]
input_ids = tokenizer.convert_tokens_to_ids(cand_tokens)
padding = [0] * (max_seq_length - len(input_ids))
input_ids += padding
assert len(input_ids) == max_seq_length
return {
"tokens": cand_tokens,
"ids": [input_ids],
}
def process_mention_data(
samples,
tokenizer,
max_context_length,
max_cand_length,
silent,
mention_key="mention",
context_key="context",
label_key="label",
title_key='label_title',
ent_start_token=ENT_START_TAG,
ent_end_token=ENT_END_TAG,
title_token=ENT_TITLE_TAG,
debug=False,
logger=None,
add_mention_bounds=True,
saved_context_dir=None,
candidate_token_ids=None,
params=None,
):
'''
Returns /inclusive/ bounds
'''
extra_ret_values = {}
if saved_context_dir is not None and os.path.exists(os.path.join(saved_context_dir, "tensor_tuple.pt")):
data = torch.load(os.path.join(saved_context_dir, "data.pt"))
tensor_data_tuple = torch.load(os.path.join(saved_context_dir, "tensor_tuple.pt"))
return data, tensor_data_tuple, extra_ret_values
if candidate_token_ids is None and not debug:
candidate_token_ids = torch.load(params["cand_token_ids_path"])
if logger: logger.info("Loaded saved entities info")
extra_ret_values["candidate_token_ids"] = candidate_token_ids
processed_samples = []
if debug:
samples = samples[:200]
if silent:
iter_ = samples
else:
iter_ = tqdm(samples)
use_world = True
ent_start_id = tokenizer.convert_tokens_to_ids(ent_start_token)
ent_end_id = tokenizer.convert_tokens_to_ids(ent_end_token)
cls_token_id = tokenizer.convert_tokens_to_ids("[CLS]")
sep_token_id = tokenizer.convert_tokens_to_ids("[SEP]")
for idx, sample in enumerate(iter_):
assert not add_mention_bounds, "Adding mention bounds, but we have multiple entities per example"
if context_key + "_left" in sample:
context_tokens = get_context_representation_multiple_mentions_left_right(
sample, tokenizer, max_context_length,
mention_key, context_key, ent_start_token, ent_end_token,
)
else:
context_tokens = get_context_representation_multiple_mentions_idxs(
sample, tokenizer, max_context_length,
mention_key, context_key, ent_start_token, ent_end_token,
)
for i in range(len(context_tokens["mention_idxs"])):
context_tokens["mention_idxs"][i][1] -= 1 # make bounds inclusive
label = sample[label_key]
title = sample.get(title_key)
label_ids = sample.get("label_id")
if label is None:
label = [None]
label_ids = [label_ids]
# remove those that got pruned off
if len(label) > len(context_tokens['mention_idxs']):
label = label[:len(context_tokens['mention_idxs'])]
label_ids = sample["label_id"][:len(context_tokens['mention_idxs'])]
if candidate_token_ids is not None:
token_ids = [[candidate_token_ids[label_id].tolist()] for label_id in label_ids]
label_tokens = {
"tokens": "",
"ids": token_ids,
}
elif not params["freeze_cand_enc"]:
label_tokens = [get_candidate_representation(
l, tokenizer, max_cand_length, title[i],
) for i, l in enumerate(label)]
label_tokens = {
k: [label_tokens[l][k] for l in range(len(label_tokens))]
for k in label_tokens[0]}
else:
label_tokens = None
if isinstance(sample["label_id"], list):
# multiple candidates
if len(sample["label_id"]) > len(context_tokens['mention_idxs']):
sample["label_id"] = sample["label_id"][:len(context_tokens['mention_idxs'])]
label_idx = [int(id) for id in sample["label_id"]]
else:
assert isinstance(sample["label_id"], int) or isinstance(sample["label_id"], str)
label_idx = int(sample["label_id"])
record = {
"context": context_tokens,
}
if not params["freeze_cand_enc"]:
record["label"] = label_tokens
record["label_idx"] = label_idx
if "world" in sample:
src = sample["world"]
src = world_to_id[src]
record["src"] = [src]
use_world = True
else:
use_world = False
processed_samples.append(record)
if debug and logger:
logger.info("====Processed samples: ====")
for sample in processed_samples[:5]:
logger.info("Context tokens : " + " ".join(sample["context"]["tokens"]))
logger.info(
"Context ids : " + " ".join([str(v) for v in sample["context"]["ids"]])
)
if not params["freeze_cand_encs"]:
logger.info("Label tokens : " + " ".join(sample["label"]["tokens"]))
logger.info(
"Label ids : " + " ".join([str(v) for v in sample["label"]["ids"]])
)
logger.info("Label_id : %d" % sample["label_idx"])
if use_world:
logger.info("Src : %d" % sample["src"][0])
context_vecs = torch.tensor(
select_field(processed_samples, "context", "ids"), dtype=torch.long,
)
if logger:
logger.info("Created context IDs vector")
if isinstance(processed_samples[0]["context"]["mention_idxs"][0], int):
mention_idx_vecs = torch.tensor(
select_field(processed_samples, "context", "mention_idxs"), dtype=torch.long,
).unsqueeze(1)
mention_idx_mask = torch.ones(mention_idx_vecs.size(0), dtype=torch.bool).unsqueeze(-1)
if logger:
logger.info("Created mention positions vector")
if not params["freeze_cand_enc"]:
cand_vecs = torch.tensor(
select_field(processed_samples, "label", "ids"), dtype=torch.long,
)
if logger:
logger.info("Created candidate IDs vector")
label_idx = torch.tensor(
select_field(processed_samples, "label_idx"), dtype=torch.long,
).unsqueeze(-1)
if logger:
logger.info("Created label IDXs vector")
else:
mention_idx_vecs, mention_idx_mask = select_field_with_padding(
processed_samples, "context", "mention_idxs", pad_idx=[0,1], #ensure is a well-formed span
)
# (bs, max_num_spans, 2)
mention_idx_vecs = torch.tensor(mention_idx_vecs, dtype=torch.long)
# (bs, max_num_spans)
mention_idx_mask = torch.tensor(mention_idx_mask, dtype=torch.bool)
if not params["freeze_cand_enc"]:
cand_vecs, cand_mask = select_field_with_padding(
processed_samples, "label", "ids", pad_idx=[[0 for _ in range(max_cand_length)]],
)
# (bs, max_num_spans, 1, max_cand_length)
cand_vecs = torch.tensor(cand_vecs, dtype=torch.long)
cand_mask = torch.tensor(cand_mask, dtype=torch.bool)
assert (cand_mask == mention_idx_mask).all() or cand_mask.all()
if logger:
logger.info("Created candidate IDs vector")
else:
cand_vecs = torch.Tensor(context_vecs.size())
label_idx_vecs, label_idx_mask = select_field_with_padding(processed_samples, "label_idx", pad_idx=-1)
# (bs, max_num_spans)
label_idx = torch.tensor(label_idx_vecs, dtype=torch.long)
label_idx_mask = torch.tensor(label_idx_mask, dtype=torch.bool)
assert (label_idx_mask == mention_idx_mask).all() or label_idx_mask.all()
if logger:
logger.info("Created label IDXs vector")
# mention_idx_vecs: (bs, max_num_spans, 2), mention_idx_mask: (bs, max_num_spans)
assert len(mention_idx_vecs.size()) == 3
# prune mention_idx_vecs to max_context_length
mention_idx_vecs[mention_idx_vecs >= max_context_length] = (max_context_length - 1)
if use_world:
src_vecs = torch.tensor(
select_field(processed_samples, "src"), dtype=torch.long,
)
if logger:
logger.info("Created source vector")
data = {
"context_vecs": context_vecs,
"mention_idx_vecs": mention_idx_vecs,
"cand_vecs": cand_vecs,
"label_idx": label_idx,
}
if use_world:
data["src"] = src_vecs
tensor_data_tuple = (context_vecs, cand_vecs, src_vecs, label_idx, mention_idx_vecs, mention_idx_mask)
else:
tensor_data_tuple = (context_vecs, cand_vecs, label_idx, mention_idx_vecs, mention_idx_mask)
# save data
if saved_context_dir is not None and not os.path.exists(os.path.join(saved_context_dir, "tensor_tuple.pt")):
os.makedirs(saved_context_dir, exist_ok=True)
torch.save(data, os.path.join(saved_context_dir, "data.pt"))
torch.save(tensor_data_tuple, os.path.join(saved_context_dir, "tensor_tuple.pt"))
return data, tensor_data_tuple, extra_ret_values
|
BLINK-main
|
elq/biencoder/data_process.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import numpy as np
def batch_reshape_mask_left(
input_t, selected, pad_idx=0, left_align_mask=None
):
"""
Left-aligns all ``selected" values in input_t, which is a batch of examples.
- input_t: >=2D tensor (N, M, *)
- selected: 2D torch.Bool tensor, 2 dims same size as first 2 dims of `input_t` (N, M)
- pad_idx represents the padding to be used in the output
- left_align_mask: if already precomputed, pass the alignment mask in
(mask on the output, corresponding to `selected` on the input)
Example:
input_t = [[1,2,3,4],[5,6,7,8]]
selected = [[0,1,0,1],[1,1,0,1]]
output = [[2,4,0],[5,6,8]]
"""
batch_num_selected = selected.sum(1)
max_num_selected = batch_num_selected.max()
# (bsz, 2)
repeat_freqs = torch.stack([batch_num_selected, max_num_selected - batch_num_selected], dim=-1)
# (bsz x 2,)
repeat_freqs = repeat_freqs.view(-1)
if left_align_mask is None:
# (bsz, 2)
left_align_mask = torch.zeros(input_t.size(0), 2).to(input_t.device).bool()
left_align_mask[:,0] = 1
# (bsz x 2,): [1,0,1,0,...]
left_align_mask = left_align_mask.view(-1)
# (bsz x max_num_selected,): [1 xrepeat_freqs[0],0 x(M-repeat_freqs[0]),1 xrepeat_freqs[1],0 x(M-repeat_freqs[1]),...]
left_align_mask = left_align_mask.repeat_interleave(repeat_freqs)
# (bsz, max_num_selected)
left_align_mask = left_align_mask.view(-1, max_num_selected)
# reshape to (bsz, max_num_selected, *)
input_reshape = torch.Tensor(left_align_mask.size() + input_t.size()[2:]).to(input_t.device, input_t.dtype).fill_(pad_idx)
input_reshape[left_align_mask] = input_t[selected]
# (bsz, max_num_selected, *); (bsz, max_num_selected)
return input_reshape, left_align_mask
|
BLINK-main
|
elq/biencoder/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import argparse
import faiss
import pickle
import torch
import json
import sys
import io
import random
import time
import traceback
import numpy as np
from scipy.special import softmax, expit
import torch.nn.functional as F
from multiprocessing.pool import ThreadPool
from tqdm import tqdm, trange
from collections import OrderedDict
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from pytorch_transformers.file_utils import PYTORCH_PRETRAINED_BERT_CACHE
from pytorch_transformers.optimization import WarmupLinearSchedule
from pytorch_transformers.tokenization_bert import BertTokenizer
from elq.biencoder.biencoder import BiEncoderRanker
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
import logging
import elq.candidate_ranking.utils as utils
from elq.biencoder.data_process import process_mention_data
from blink.biencoder.zeshel_utils import DOC_PATH, WORLDS, world_to_id
from blink.common.optimizer import get_bert_optimizer
from elq.common.params import ElqParser
from elq.index.faiss_indexer import DenseFlatIndexer, DenseHNSWFlatIndexer, DenseIVFFlatIndexer
logger = None
np.random.seed(1234) # reproducible for FAISS indexer
def evaluate(
reranker, eval_dataloader, params, device, logger,
cand_encs=None, faiss_index=None,
get_losses=False,
):
reranker.model.eval()
if params["silent"]:
iter_ = eval_dataloader
else:
iter_ = tqdm(eval_dataloader, desc="Evaluation")
results = {}
eval_num_correct = 0.0
eval_num_p = 0.0
eval_num_g = 0.0
nb_eval_examples = 0
nb_eval_steps = 0
overall_loss = 0.0
if cand_encs is not None and not params["freeze_cand_enc"]:
torch.cuda.empty_cache()
cand_encs = cand_encs.to(device)
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
candidate_input = batch[1]
# (bs, num_actual_spans)
label_ids = batch[2].cpu().numpy() if params["freeze_cand_enc"] else None
if params["debug"] and label_ids is not None:
label_ids[label_ids > 199] = 199
mention_idx = batch[-2].cpu().numpy()
mention_idx_mask = batch[-1].cpu().numpy()
with torch.no_grad():
# evaluate with joint mention detection
if params["freeze_cand_enc"]:
context_outs = reranker.encode_context(
context_input,
num_cand_mentions=50,
topK_threshold=-3.5,
)
embedding_context = context_outs['mention_reps'].cpu().numpy()
pred_mention_mask = context_outs['mention_masks'].cpu().numpy()
chosen_mention_bounds = context_outs['mention_bounds'].cpu().numpy()
embedding_ctxt = embedding_context[pred_mention_mask]
# do faiss search for closest entity
# DIM (all_pred_mentions_batch, num_cand_entities); (all_pred_mentions_batch, num_cand_entities)
top_cand_logits_shape, top_cand_indices_shape = faiss_index.search_knn(embedding_ctxt, 10)
top_cand_logits = np.zeros((pred_mention_mask.shape[0], pred_mention_mask.shape[1], 10), dtype=np.float)
top_cand_indices = np.zeros_like(pred_mention_mask, dtype=np.int)
top_cand_logits[pred_mention_mask] = top_cand_logits_shape
top_cand_indices[pred_mention_mask] = top_cand_indices_shape[:,0]
scores = (np.log(softmax(top_cand_logits, -1)) + torch.sigmoid(context_outs['mention_logits'].unsqueeze(-1)).log().cpu().numpy())[:,:,0]
tmp_num_correct = 0.0
tmp_num_p = 0.0
tmp_num_g = 0.0
for i, ex in enumerate(top_cand_indices):
gold_mb = mention_idx[i][mention_idx_mask[i]]
gold_label_ids = label_ids[i][mention_idx_mask[i]]
overall_score_mask = scores[i][pred_mention_mask[i]] > -2.5
pred_mb = chosen_mention_bounds[i][pred_mention_mask[i]][overall_score_mask]
pred_label_ids = ex[pred_mention_mask[i]][overall_score_mask]
gold_triples = [(str(gold_label_ids[j]), gold_mb[j][0], gold_mb[j][1]) for j in range(len(gold_mb))]
pred_triples = [(str(pred_label_ids[j]), pred_mb[j][0], pred_mb[j][1]) for j in range(len(pred_mb))]
num_overlap_weak, _ = entity_linking_tp_with_overlap(gold_triples, pred_triples)
tmp_num_correct += num_overlap_weak
tmp_num_p += float(len(pred_triples))
tmp_num_g += float(len(gold_triples))
text_encs = embedding_context
else:
loss, logits, mention_logits, mention_bounds = reranker(
context_input, candidate_input,
cand_encs=cand_encs,
gold_mention_bounds=batch[-2],
gold_mention_bounds_mask=batch[-1],
return_loss=True,
)
logits = logits.cpu().numpy()
# Using in-batch negatives, the label ids are diagonal
label_ids = torch.LongTensor(torch.arange(logits.shape[0]))
label_ids = label_ids.cpu().numpy()
tmp_num_correct = utils.accuracy(logits, label_ids)
tmp_num_p = len(batch[-2][batch[-1]])
tmp_num_g = len(batch[-2][batch[-1]])
overall_loss += loss
eval_num_correct += tmp_num_correct
eval_num_p += tmp_num_p
eval_num_g += tmp_num_g
nb_eval_steps += 1
if cand_encs is not None:
cand_encs = cand_encs.to("cpu")
torch.cuda.empty_cache()
if nb_eval_steps > 0 and overall_loss > 0:
normalized_overall_loss = overall_loss / nb_eval_steps
logger.info("Overall loss: %.5f" % normalized_overall_loss)
if eval_num_p > 0:
normalized_eval_p = eval_num_correct / eval_num_p
else:
normalized_eval_p = 0.0
if eval_num_g > 0:
normalized_eval_r = eval_num_correct / eval_num_g
else:
normalized_eval_r = 0.0
logger.info("Precision: %.5f" % normalized_eval_p)
logger.info("Recall: %.5f" % normalized_eval_r)
if normalized_eval_p + normalized_eval_r == 0:
f1 = 0
else:
f1 = 2 * normalized_eval_p * normalized_eval_r / (normalized_eval_p + normalized_eval_r)
logger.info("F1: %.5f" % f1)
results["normalized_f1"] = f1
return results
def get_optimizer(model, params):
return get_bert_optimizer(
[model],
params["type_optimization"],
params["learning_rate"],
fp16=params.get("fp16"),
)
def get_scheduler(params, optimizer, len_train_data, logger):
batch_size = params["train_batch_size"]
grad_acc = params["gradient_accumulation_steps"]
epochs = params["num_train_epochs"]
num_train_steps = int(len_train_data / batch_size / grad_acc) * epochs
num_warmup_steps = int(num_train_steps * params["warmup_proportion"])
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=num_warmup_steps, t_total=num_train_steps,
)
logger.info(" Num optimization steps = %d" % num_train_steps)
logger.info(" Num warmup steps = %d", num_warmup_steps)
return scheduler
def main(params):
model_output_path = params["output_path"]
if not os.path.exists(model_output_path):
os.makedirs(model_output_path)
logger = utils.get_logger(params["output_path"])
# Init model
reranker = BiEncoderRanker(params)
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
if params["gradient_accumulation_steps"] < 1:
raise ValueError(
"Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
params["gradient_accumulation_steps"]
)
)
# An effective batch size of `x`, when we are accumulating the gradient accross `y` batches will be achieved by having a batch size of `z = x / y`
params["train_batch_size"] = (
params["train_batch_size"] // params["gradient_accumulation_steps"]
)
train_batch_size = params["train_batch_size"]
eval_batch_size = params["eval_batch_size"]
grad_acc_steps = params["gradient_accumulation_steps"]
# Fix the random seeds
seed = params["seed"]
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if reranker.n_gpu > 0:
torch.cuda.manual_seed_all(seed)
# Load train data
train_samples = utils.read_dataset("train", params["data_path"])
logger.info("Read %d train samples." % len(train_samples))
logger.info("Finished reading all train samples")
# Load eval data
try:
valid_samples = utils.read_dataset("valid", params["data_path"])
except FileNotFoundError:
valid_samples = utils.read_dataset("dev", params["data_path"])
# MUST BE DIVISBLE BY n_gpus
if len(valid_samples) > 1024:
valid_subset = 1024
else:
valid_subset = len(valid_samples) - len(valid_samples) % torch.cuda.device_count()
logger.info("Read %d valid samples, choosing %d subset" % (len(valid_samples), valid_subset))
valid_data, valid_tensor_data, extra_ret_values = process_mention_data(
samples=valid_samples[:valid_subset], # use subset of valid data
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=None,
params=params,
)
candidate_token_ids = extra_ret_values["candidate_token_ids"]
valid_tensor_data = TensorDataset(*valid_tensor_data)
valid_sampler = SequentialSampler(valid_tensor_data)
valid_dataloader = DataLoader(
valid_tensor_data, sampler=valid_sampler, batch_size=eval_batch_size
)
# load candidate encodings
cand_encs = None
cand_encs_index = None
if params["freeze_cand_enc"]:
cand_encs = torch.load(params['cand_enc_path'])
logger.info("Loaded saved entity encodings")
if params["debug"]:
cand_encs = cand_encs[:200]
# build FAISS index
cand_encs_index = DenseHNSWFlatIndexer(1)
cand_encs_index.deserialize_from(params['index_path'])
logger.info("Loaded FAISS index on entity encodings")
num_neighbors = 10
# evaluate before training
results = evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
)
number_of_samples_per_dataset = {}
time_start = time.time()
utils.write_to_file(
os.path.join(model_output_path, "training_params.txt"), str(params)
)
logger.info("Starting training")
logger.info(
"device: {} n_gpu: {}, distributed training: {}".format(device, n_gpu, False)
)
num_train_epochs = params["num_train_epochs"]
if params["dont_distribute_train_samples"]:
num_samples_per_batch = len(train_samples)
train_data, train_tensor_data_tuple, extra_ret_values = process_mention_data(
samples=train_samples,
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=candidate_token_ids,
params=params,
)
logger.info("Finished preparing training data")
else:
num_samples_per_batch = len(train_samples) // num_train_epochs
trainer_path = params.get("path_to_trainer_state", None)
optimizer = get_optimizer(model, params)
scheduler = get_scheduler(
params, optimizer, num_samples_per_batch,
logger
)
if trainer_path is not None and os.path.exists(trainer_path):
training_state = torch.load(trainer_path)
optimizer.load_state_dict(training_state["optimizer"])
scheduler.load_state_dict(training_state["scheduler"])
logger.info("Loaded saved training state")
model.train()
best_epoch_idx = -1
best_score = -1
logger.info("Num samples per batch : %d" % num_samples_per_batch)
for epoch_idx in trange(params["last_epoch"] + 1, int(num_train_epochs), desc="Epoch"):
tr_loss = 0
results = None
if not params["dont_distribute_train_samples"]:
start_idx = epoch_idx * num_samples_per_batch
end_idx = (epoch_idx + 1) * num_samples_per_batch
train_data, train_tensor_data_tuple, extra_ret_values = process_mention_data(
samples=train_samples[start_idx:end_idx],
tokenizer=tokenizer,
max_context_length=params["max_context_length"],
max_cand_length=params["max_cand_length"],
context_key=params["context_key"],
title_key=params["title_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
add_mention_bounds=(not args.no_mention_bounds),
candidate_token_ids=candidate_token_ids,
params=params,
)
logger.info("Finished preparing training data for epoch {}: {} samples".format(epoch_idx, len(train_tensor_data_tuple[0])))
batch_train_tensor_data = TensorDataset(
*list(train_tensor_data_tuple)
)
if params["shuffle"]:
train_sampler = RandomSampler(batch_train_tensor_data)
else:
train_sampler = SequentialSampler(batch_train_tensor_data)
train_dataloader = DataLoader(
batch_train_tensor_data, sampler=train_sampler, batch_size=train_batch_size
)
if params["silent"]:
iter_ = train_dataloader
else:
iter_ = tqdm(train_dataloader, desc="Batch")
for step, batch in enumerate(iter_):
batch = tuple(t.to(device) for t in batch)
context_input = batch[0]
candidate_input = batch[1]
label_ids = batch[2] if params["freeze_cand_enc"] else None
mention_idxs = batch[-2]
mention_idx_mask = batch[-1]
if params["debug"] and label_ids is not None:
label_ids[label_ids > 199] = 199
cand_encs_input = None
label_input = None
mention_reps_input = None
mention_logits = None
mention_bounds = None
hard_negs_mask = None
if params["adversarial_training"]:
assert cand_encs is not None and label_ids is not None # due to params["freeze_cand_enc"] being set
'''
GET CLOSEST N CANDIDATES (AND APPROPRIATE LABELS)
'''
# (bs, num_spans, embed_size)
pos_cand_encs_input = cand_encs[label_ids.to("cpu")]
pos_cand_encs_input[~mention_idx_mask] = 0
context_outs = reranker.encode_context(
context_input, gold_mention_bounds=mention_idxs,
gold_mention_bounds_mask=mention_idx_mask,
get_mention_scores=True,
)
mention_logits = context_outs['all_mention_logits']
mention_bounds = context_outs['all_mention_bounds']
mention_reps = context_outs['mention_reps']
# mention_reps: (bs, max_num_spans, embed_size) -> masked_mention_reps: (all_pred_mentions_batch, embed_size)
masked_mention_reps = mention_reps[context_outs['mention_masks']]
# neg_cand_encs_input_idxs: (all_pred_mentions_batch, num_negatives)
_, neg_cand_encs_input_idxs = cand_encs_index.search_knn(masked_mention_reps.detach().cpu().numpy(), num_neighbors)
neg_cand_encs_input_idxs = torch.from_numpy(neg_cand_encs_input_idxs)
# set "correct" closest entities to -1
# masked_label_ids: (all_pred_mentions_batch)
masked_label_ids = label_ids[mention_idx_mask]
# neg_cand_encs_input_idxs: (max_spans_in_batch, num_negatives)
neg_cand_encs_input_idxs[neg_cand_encs_input_idxs - masked_label_ids.to("cpu").unsqueeze(-1) == 0] = -1
# reshape back tensor (extract num_spans dimension)
# (bs, num_spans, num_negatives)
neg_cand_encs_input_idxs_reconstruct = torch.zeros(label_ids.size(0), label_ids.size(1), neg_cand_encs_input_idxs.size(-1), dtype=neg_cand_encs_input_idxs.dtype)
neg_cand_encs_input_idxs_reconstruct[mention_idx_mask] = neg_cand_encs_input_idxs
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs_reconstruct
# create neg_example_idx (corresponding example (in batch) for each negative)
# neg_example_idx: (bs * num_negatives)
neg_example_idx = torch.arange(neg_cand_encs_input_idxs.size(0)).unsqueeze(-1)
neg_example_idx = neg_example_idx.expand(neg_cand_encs_input_idxs.size(0), neg_cand_encs_input_idxs.size(2))
neg_example_idx = neg_example_idx.flatten()
# flatten and filter -1 (i.e. any correct/positive entities)
# neg_cand_encs_input_idxs: (bs * num_negatives, num_spans)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs.permute(0,2,1)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs.reshape(-1, neg_cand_encs_input_idxs.size(-1))
# mask invalid negatives (actually the positive example)
# (bs * num_negatives)
mask = ~((neg_cand_encs_input_idxs == -1).sum(1).bool()) # rows without any -1 entry
# deletes corresponding negative for *all* spans in that example (deletes at most 3 of 10 negatives / example)
# neg_cand_encs_input_idxs: (bs * num_negatives - invalid_negs, num_spans)
neg_cand_encs_input_idxs = neg_cand_encs_input_idxs[mask]
# neg_cand_encs_input_idxs: (bs * num_negatives - invalid_negs)
neg_example_idx = neg_example_idx[mask]
# (bs * num_negatives - invalid_negs, num_spans, embed_size)
neg_cand_encs_input = cand_encs[neg_cand_encs_input_idxs]
# (bs * num_negatives - invalid_negs, num_spans, embed_size)
neg_mention_idx_mask = mention_idx_mask[neg_example_idx]
neg_cand_encs_input[~neg_mention_idx_mask] = 0
# create input tensors (concat [pos examples, neg examples])
# (bs + bs * num_negatives, num_spans, embed_size)
mention_reps_input = torch.cat([
mention_reps, mention_reps[neg_example_idx.to(device)],
])
assert mention_reps.size(0) == pos_cand_encs_input.size(0)
# (bs + bs * num_negatives, num_spans)
label_input = torch.cat([
torch.ones(pos_cand_encs_input.size(0), pos_cand_encs_input.size(1), dtype=label_ids.dtype),
torch.zeros(neg_cand_encs_input.size(0), neg_cand_encs_input.size(1), dtype=label_ids.dtype),
]).to(device)
# (bs + bs * num_negatives, num_spans, embed_size)
cand_encs_input = torch.cat([
pos_cand_encs_input, neg_cand_encs_input,
]).to(device)
hard_negs_mask = torch.cat([mention_idx_mask, neg_mention_idx_mask])
loss, _, _, _ = reranker(
context_input, candidate_input,
cand_encs=cand_encs_input, text_encs=mention_reps_input,
mention_logits=mention_logits, mention_bounds=mention_bounds,
label_input=label_input, gold_mention_bounds=mention_idxs,
gold_mention_bounds_mask=mention_idx_mask,
hard_negs_mask=hard_negs_mask,
return_loss=True,
)
if grad_acc_steps > 1:
loss = loss / grad_acc_steps
tr_loss += loss.item()
if (step + 1) % (params["print_interval"] * grad_acc_steps) == 0:
logger.info(
"Step {} - epoch {} average loss: {}\n".format(
step,
epoch_idx,
tr_loss / (params["print_interval"] * grad_acc_steps),
)
)
tr_loss = 0
loss.backward()
if (step + 1) % grad_acc_steps == 0:
torch.nn.utils.clip_grad_norm_(
model.parameters(), params["max_grad_norm"]
)
optimizer.step()
scheduler.step()
optimizer.zero_grad()
if (step + 1) % (params["eval_interval"] * grad_acc_steps) == 0:
logger.info("Evaluation on the development dataset")
loss = None # for GPU mem management
mention_reps = None
mention_reps_input = None
label_input = None
cand_encs_input = None
evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
model.train()
logger.info("\n")
logger.info("***** Saving fine - tuned model *****")
epoch_output_folder_path = os.path.join(
model_output_path, "epoch_{}".format(epoch_idx)
)
utils.save_model(model, tokenizer, epoch_output_folder_path)
torch.save({
"optimizer": optimizer.state_dict(),
"scheduler": scheduler.state_dict(),
}, os.path.join(epoch_output_folder_path, "training_state.th"))
output_eval_file = os.path.join(epoch_output_folder_path, "eval_results.txt")
logger.info("Valid data evaluation")
results = evaluate(
reranker, valid_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
logger.info("Train data evaluation")
results = evaluate(
reranker, train_dataloader, params,
cand_encs=cand_encs, device=device,
logger=logger, faiss_index=cand_encs_index,
get_losses=params["get_losses"],
)
ls = [best_score, results["normalized_f1"]]
li = [best_epoch_idx, epoch_idx]
best_score = ls[np.argmax(ls)]
best_epoch_idx = li[np.argmax(ls)]
logger.info("\n")
execution_time = (time.time() - time_start) / 60
utils.write_to_file(
os.path.join(model_output_path, "training_time.txt"),
"The training took {} minutes\n".format(execution_time),
)
logger.info("The training took {} minutes\n".format(execution_time))
# save the best model in the parent_dir
logger.info("Best performance in epoch: {}".format(best_epoch_idx))
params["path_to_model"] = os.path.join(
model_output_path, "epoch_{}".format(best_epoch_idx)
)
utils.save_model(reranker.model, tokenizer, model_output_path)
if params["evaluate"]:
params["path_to_model"] = model_output_path
evaluate(params, cand_encs=cand_encs, logger=logger, faiss_index=cand_encs_index)
if __name__ == "__main__":
parser = ElqParser(add_model_args=True)
parser.add_training_args()
args = parser.parse_args()
print(args)
params = args.__dict__
main(params)
|
BLINK-main
|
elq/biencoder/train_biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from tqdm import tqdm
from collections import OrderedDict
from pytorch_transformers.modeling_bert import (
BertPreTrainedModel,
BertConfig,
BertModel,
)
from pytorch_transformers.tokenization_bert import BertTokenizer
from elq.common.ranker_base import BertEncoder, get_model_obj
from blink.common.optimizer import get_bert_optimizer
from elq.biencoder.allennlp_span_utils import batched_span_select, batched_index_select
from elq.biencoder.utils import batch_reshape_mask_left
def load_biencoder(params):
# Init model
biencoder = BiEncoderRanker(params)
return biencoder
def get_submodel_from_state_dict(state_dict, prefix):
# get only submodel specified with prefix 'prefix' from the state_dict
new_state_dict = OrderedDict()
for key, value in state_dict.items():
if key.startswith(prefix):
key = key[len(prefix)+1:] # +1 for '.'
new_state_dict[key] = value
return new_state_dict
class MentionScoresHead(nn.Module):
def __init__(
self, bert_output_dim, scoring_method="qa_linear", max_mention_length=10,
):
super(MentionScoresHead, self).__init__()
self.scoring_method = scoring_method
self.max_mention_length = max_mention_length
if self.scoring_method == "qa_linear":
self.bound_classifier = nn.Linear(bert_output_dim, 3)
elif self.scoring_method == "qa_mlp" or self.scoring_method == "qa": # for back-compatibility
self.bound_classifier = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, 3),
)
else:
raise NotImplementedError()
def forward(self, bert_output, mask_ctxt):
'''
Retuns scores for *inclusive* mention boundaries
'''
# (bs, seqlen, 3)
logits = self.bound_classifier(bert_output)
if self.scoring_method[:2] == "qa":
# (bs, seqlen, 1); (bs, seqlen, 1); (bs, seqlen, 1)
start_logprobs, end_logprobs, mention_logprobs = logits.split(1, dim=-1)
# (bs, seqlen)
start_logprobs = start_logprobs.squeeze(-1)
end_logprobs = end_logprobs.squeeze(-1)
mention_logprobs = mention_logprobs.squeeze(-1)
# impossible to choose masked tokens as starts/ends of spans
start_logprobs[~mask_ctxt] = -float("Inf")
end_logprobs[~mask_ctxt] = -float("Inf")
mention_logprobs[~mask_ctxt] = -float("Inf")
# take sum of log softmaxes:
# log p(mention) = log p(start_pos && end_pos) = log p(start_pos) + log p(end_pos)
# DIM: (bs, starts, ends)
mention_scores = start_logprobs.unsqueeze(2) + end_logprobs.unsqueeze(1)
# (bs, starts, ends)
mention_cum_scores = torch.zeros(mention_scores.size(), dtype=mention_scores.dtype).to(mention_scores.device)
# add ends
mention_logprobs_end_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)):
mention_logprobs_end_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,:,i] += mention_logprobs_end_cumsum.unsqueeze(-1)
# subtract starts
mention_logprobs_start_cumsum = torch.zeros(mask_ctxt.size(0), dtype=mention_scores.dtype).to(mention_scores.device)
for i in range(mask_ctxt.size(1)-1):
mention_logprobs_start_cumsum += mention_logprobs[:,i]
mention_cum_scores[:,(i+1),:] -= mention_logprobs_start_cumsum.unsqueeze(-1)
# DIM: (bs, starts, ends)
mention_scores += mention_cum_scores
# DIM: (starts, ends, 2) -- tuples of [start_idx, end_idx]
mention_bounds = torch.stack([
torch.arange(mention_scores.size(1)).unsqueeze(-1).expand(mention_scores.size(1), mention_scores.size(2)), # start idxs
torch.arange(mention_scores.size(1)).unsqueeze(0).expand(mention_scores.size(1), mention_scores.size(2)), # end idxs
], dim=-1).to(mask_ctxt.device)
# DIM: (starts, ends)
mention_sizes = mention_bounds[:,:,1] - mention_bounds[:,:,0] + 1 # (+1 as ends are inclusive)
# Remove invalids (startpos > endpos, endpos > seqlen) and renormalize
# DIM: (bs, starts, ends)
valid_mask = (mention_sizes.unsqueeze(0) > 0) & mask_ctxt.unsqueeze(1)
# DIM: (bs, starts, ends)
mention_scores[~valid_mask] = -float("inf") # invalids have logprob=-inf (p=0)
# DIM: (bs, starts * ends)
mention_scores = mention_scores.view(mention_scores.size(0), -1)
# DIM: (bs, starts * ends, 2)
mention_bounds = mention_bounds.view(-1, 2)
mention_bounds = mention_bounds.unsqueeze(0).expand(mention_scores.size(0), mention_scores.size(1), 2)
if self.max_mention_length is not None:
mention_scores, mention_bounds = self.filter_by_mention_size(
mention_scores, mention_bounds, self.max_mention_length,
)
return mention_scores, mention_bounds
def filter_by_mention_size(self, mention_scores, mention_bounds, max_mention_length):
'''
Filter all mentions > maximum mention length
mention_scores: torch.FloatTensor (bsz, num_mentions)
mention_bounds: torch.LongTensor (bsz, num_mentions, 2)
'''
# (bsz, num_mentions)
mention_bounds_mask = (mention_bounds[:,:,1] - mention_bounds[:,:,0] <= max_mention_length)
# (bsz, num_filtered_mentions)
mention_scores = mention_scores[mention_bounds_mask]
mention_scores = mention_scores.view(mention_bounds_mask.size(0),-1)
# (bsz, num_filtered_mentions, 2)
mention_bounds = mention_bounds[mention_bounds_mask]
mention_bounds = mention_bounds.view(mention_bounds_mask.size(0),-1,2)
return mention_scores, mention_bounds
class GetContextEmbedsHead(nn.Module):
def __init__(self, mention_aggregation_type, ctxt_output_dim, cand_output_dim, dropout=0.1):
"""
mention_aggregation_type
`all_avg`: average across tokens in mention
`fl_avg`: to average across first/last tokens in mention
`{all/fl}_linear`: for linear layer over mention
`{all/fl}_mlp` to MLP over mention
"""
super(GetContextEmbedsHead, self).__init__()
# for aggregating mention outputs of context encoder
self.mention_aggregation_type = mention_aggregation_type.split('_')
self.tokens_to_aggregate = self.mention_aggregation_type[0]
self.aggregate_method = "_".join(self.mention_aggregation_type[1:])
self.dropout = nn.Dropout(dropout)
if self.mention_aggregation_type == 'all_avg' or self.mention_aggregation_type == 'none':
assert ctxt_output_dim == cand_output_dim
if self.aggregate_method == 'linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim * 2, cand_output_dim)
elif self.aggregate_method == 'avg_linear':
self.mention_agg_linear = nn.Linear(ctxt_output_dim, cand_output_dim)
elif self.aggregate_method == 'mlp':
self.mention_agg_mlp = nn.Sequential(
nn.Linear(bert_output_dim, bert_output_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(bert_output_dim, output_dim),
)
else:
self.mention_agg_mlp = None
def forward(self, bert_output, mention_bounds):
'''
bert_output
(bs, seqlen, embed_dim)
mention_bounds: both bounds are inclusive [start, end]
(bs, num_spans, 2)
'''
# get embedding of [CLS] token
if mention_bounds.size(0) == 0:
return mention_bounds
if self.tokens_to_aggregate == 'all':
(
embedding_ctxt, # (batch_size, num_spans, max_batch_span_width, embedding_size)
mask, # (batch_size, num_spans, max_batch_span_width)
) = batched_span_select(
bert_output, # (batch_size, sequence_length, embedding_size)
mention_bounds, # (batch_size, num_spans, 2)
)
embedding_ctxt[~mask] = 0 # 0 out masked elements
# embedding_ctxt = (batch_size, num_spans, max_batch_span_width, embedding_size)
if self.aggregate_method.startswith('avg'):
embedding_ctxt = embedding_ctxt.sum(2) / mask.sum(2).float().unsqueeze(-1)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
if self.aggregate_method == 'avg_linear':
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
elif self.tokens_to_aggregate == 'fl':
start_embeddings = batched_index_select(bert_output, mention_bounds[:,:,0])
end_embeddings = batched_index_select(bert_output, mention_bounds[:,:,1])
embedding_ctxt = torch.cat([start_embeddings.unsqueeze(2), end_embeddings.unsqueeze(2)], dim=2)
# embedding_ctxt = (batch_size, num_spans, 2, embedding_size)
if self.aggregate_method == 'avg':
embedding_ctxt = embedding_ctxt.mean(2)
# embedding_ctxt = (batch_size, num_spans, embedding_size)
elif self.aggregate_method == 'linear':
embedding_ctxt = embedding_ctxt.view(embedding_ctxt.size(0), embedding_ctxt.size(1), -1)
# embedding_ctxt = (batch_size, num_spans, 2 * embedding_size)
embedding_ctxt = self.mention_agg_linear(embedding_ctxt)
# embedding_ctxt = (batch_size, num_spans, output_dim)
else:
raise NotImplementedError()
return embedding_ctxt
class BiEncoderModule(torch.nn.Module):
def __init__(self, params):
super(BiEncoderModule, self).__init__()
ctxt_bert = BertModel.from_pretrained(params["bert_model"], output_hidden_states=True)
if params["load_cand_enc_only"]:
bert_model = "bert-large-uncased"
else:
bert_model = params['bert_model']
cand_bert = BertModel.from_pretrained(
bert_model,
output_hidden_states=True,
)
self.context_encoder = BertEncoder(
ctxt_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
self.cand_encoder = BertEncoder(
cand_bert,
params["out_dim"],
layer_pulled=params["pull_from_layer"],
add_linear=params["add_linear"],
)
if params.get("freeze_cand_enc", False):
for param in self.cand_encoder.parameters():
param.requires_grad = False
self.config = ctxt_bert.config
ctxt_bert_output_dim = ctxt_bert.embeddings.word_embeddings.weight.size(1)
self.mention_aggregation_type = params.get('mention_aggregation_type', None)
self.classification_heads = nn.ModuleDict({})
self.linear_compression = None
if self.mention_aggregation_type is not None:
classification_heads_dict = {'get_context_embeds': GetContextEmbedsHead(
self.mention_aggregation_type,
ctxt_bert_output_dim,
cand_bert.embeddings.word_embeddings.weight.size(1),
)}
classification_heads_dict['mention_scores'] = MentionScoresHead(
ctxt_bert_output_dim,
params["mention_scoring_method"],
params.get("max_mention_length", 10),
)
self.classification_heads = nn.ModuleDict(classification_heads_dict)
elif ctxt_bert_output_dim != cand_bert.embeddings.word_embeddings.weight.size(1):
# mapping to make the output dimensions match for dot-product similarity
self.linear_compression = nn.Linear(ctxt_bert_output_dim, cand_bert.embeddings.word_embeddings.weight.size(1))
def get_raw_ctxt_encoding(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
):
"""
Gets raw, shared context embeddings from BERT,
to be used by both mention detector and entity linker
Returns:
torch.FloatTensor (bsz, seqlen, embed_dim)
"""
raw_ctxt_encoding, _, _ = self.context_encoder.bert_model(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
return raw_ctxt_encoding
def get_ctxt_mention_scores(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
raw_ctxt_encoding = None,
):
"""
Gets mention scores using raw context encodings
Inputs:
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
Returns:
torch.FloatTensor (bsz, num_total_mentions): mention scores/logits
torch.IntTensor (bsz, num_total_mentions): mention boundaries
"""
# (bsz, seqlen, embed_dim)
if raw_ctxt_encoding is None:
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# (num_total_mentions,); (num_total_mentions,)
return self.classification_heads['mention_scores'](
raw_ctxt_encoding, mask_ctxt,
)
def prune_ctxt_mentions(
self,
mention_logits,
mention_bounds,
num_cand_mentions,
threshold,
):
'''
Prunes mentions based on mention scores/logits (by either
`threshold` or `num_cand_mentions`, whichever yields less candidates)
Inputs:
mention_logits: torch.FloatTensor (bsz, num_total_mentions)
mention_bounds: torch.IntTensor (bsz, num_total_mentions)
num_cand_mentions: int
threshold: float
Returns:
torch.FloatTensor(bsz, max_num_pred_mentions): top mention scores/logits
torch.IntTensor(bsz, max_num_pred_mentions, 2): top mention boundaries
torch.BoolTensor(bsz, max_num_pred_mentions): mask on top mentions
torch.BoolTensor(bsz, total_possible_mentions): mask for reshaping from total possible mentions -> max # pred mentions
'''
# (bsz, num_cand_mentions); (bsz, num_cand_mentions)
top_mention_logits, mention_pos = mention_logits.topk(num_cand_mentions, sorted=True)
# (bsz, num_cand_mentions, 2)
# [:,:,0]: index of batch
# [:,:,1]: index into top mention in mention_bounds
mention_pos = torch.stack([torch.arange(mention_pos.size(0)).to(mention_pos.device).unsqueeze(-1).expand_as(mention_pos), mention_pos], dim=-1)
# (bsz, num_cand_mentions)
top_mention_pos_mask = torch.sigmoid(top_mention_logits).log() > threshold
# (total_possible_mentions, 2)
# tuples of [index of batch, index into mention_bounds] of what mentions to include
mention_pos = mention_pos[top_mention_pos_mask | (
# 2nd part of OR: if nothing is > threshold, use topK that are > -inf
((top_mention_pos_mask.sum(1) == 0).unsqueeze(-1)) & (top_mention_logits > -float("inf"))
)]
mention_pos = mention_pos.view(-1, 2)
# (bsz, total_possible_mentions)
# mask of possible logits
mention_pos_mask = torch.zeros(mention_logits.size(), dtype=torch.bool).to(mention_pos.device)
mention_pos_mask[mention_pos[:,0], mention_pos[:,1]] = 1
# (bsz, max_num_pred_mentions, 2)
chosen_mention_bounds, chosen_mention_mask = batch_reshape_mask_left(mention_bounds, mention_pos_mask, pad_idx=0)
# (bsz, max_num_pred_mentions)
chosen_mention_logits, _ = batch_reshape_mask_left(mention_logits, mention_pos_mask, pad_idx=-float("inf"), left_align_mask=chosen_mention_mask)
return chosen_mention_logits, chosen_mention_bounds, chosen_mention_mask, mention_pos_mask
def get_ctxt_embeds(
self,
raw_ctxt_encoding,
mention_bounds,
):
"""
Get candidate scores + embeddings associated with passed-in mention_bounds
Input
raw_ctxt_encoding: torch.FloatTensor (bsz, seqlen, embed_dim)
shared embeddings straight from BERT
mention_bounds: torch.IntTensor (bsz, max_num_pred_mentions, 2)
top mention boundaries
Returns
torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim)
"""
# (bs, max_num_pred_mentions, embed_dim)
embedding_ctxt = self.classification_heads['get_context_embeds'](raw_ctxt_encoding, mention_bounds)
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt
def forward_ctxt(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
if self.mention_aggregation_type is None:
'''
OLD system: don't do mention aggregation (use tokens around mention)
'''
embedding_ctxt = self.context_encoder(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
# linear mapping to correct context length
if self.linear_compression is not None:
embedding_ctxt = self.linear_compression(embedding_ctxt)
return embedding_ctxt, None, None, None
else:
'''
NEW system: aggregate mention tokens
'''
# (bs, seqlen, embed_size)
raw_ctxt_encoding = self.get_raw_ctxt_encoding(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
)
top_mention_bounds = None
top_mention_logits = None
extra_rets = {}
if get_mention_scores:
mention_logits, mention_bounds = self.get_ctxt_mention_scores(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt, raw_ctxt_encoding,
)
extra_rets['all_mention_logits'] = mention_logits
extra_rets['all_mention_bounds'] = mention_bounds
if gold_mention_bounds is None:
(
top_mention_logits, top_mention_bounds, top_mention_mask, all_mention_mask,
) = self.prune_ctxt_mentions(
mention_logits, mention_bounds, num_cand_mentions, topK_threshold,
)
extra_rets['mention_logits'] = top_mention_logits.view(-1)
extra_rets['all_mention_mask'] = all_mention_mask
if top_mention_bounds is None:
# use gold mention
top_mention_bounds = gold_mention_bounds
top_mention_mask = gold_mention_bounds_mask
assert top_mention_bounds is not None
assert top_mention_mask is not None
# (bs, num_pred_mentions OR num_gold_mentions, embed_size)
embedding_ctxt = self.get_ctxt_embeds(
raw_ctxt_encoding, top_mention_bounds,
)
# for merging dataparallel, only 1st dimension can differ...
return {
"mention_reps": embedding_ctxt.view(-1, embedding_ctxt.size(-1)),
"mention_bounds": top_mention_bounds.view(-1, top_mention_bounds.size(-1)),
"mention_masks": top_mention_mask.view(-1),
"mention_dims": torch.tensor(top_mention_mask.size()).unsqueeze(0).to(embedding_ctxt.device),
**extra_rets
}
def forward_candidate(
self,
token_idx_cands,
segment_idx_cands,
mask_cands,
):
try:
return self.cand_encoder(
token_idx_cands, segment_idx_cands, mask_cands
)
except:
print(token_idx_cands.size())
print(segment_idx_cands.size())
print(mask_cands.size())
return torch.rand(token_idx_cands.size()).to(token_idx_cands.device)
def forward(
self,
token_idx_ctxt,
segment_idx_ctxt,
mask_ctxt,
token_idx_cands,
segment_idx_cands,
mask_cands,
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
topK_threshold=-4.5,
get_mention_scores=True,
):
"""
If gold_mention_bounds is set, returns mention embeddings of passed-in mention bounds
Otherwise, uses top-scoring mentions
"""
embedding_ctxt = embedding_cands = top_mention_mask = \
top_mention_logits = top_mention_bounds = all_mention_mask = \
all_mention_logits = all_mention_bounds = max_num_pred_mentions = None
context_outs = None
cand_outs = None
if token_idx_ctxt is not None:
context_outs = self.forward_ctxt(
token_idx_ctxt, segment_idx_ctxt, mask_ctxt,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions, topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores,
)
if token_idx_cands is not None:
cand_outs = self.forward_candidate(
token_idx_cands, segment_idx_cands, mask_cands
)
return context_outs, cand_outs
def upgrade_state_dict_named(self, state_dict):
prefix = ''
current_head_names = [] if not hasattr(self, 'classification_heads') else \
self.classification_heads.keys()
# Handle new classification heads present in the state dict.
keys_to_delete = []
for k in state_dict.keys():
if not k.startswith(prefix + 'classification_heads.'):
continue
head_name = k[len(prefix + 'classification_heads.'):].split('.')[0]
if head_name not in current_head_names:
print(
'WARNING: deleting classification head ({}) from checkpoint '
'not present in current model: {}'.format(head_name, k)
)
keys_to_delete.append(k)
for k in keys_to_delete:
del state_dict[k]
# Copy any newly-added classification heads into the state dict
# with their current weights.
if hasattr(self, 'classification_heads'):
cur_state = self.classification_heads.state_dict()
for k, v in cur_state.items():
if prefix + 'classification_heads.' + k not in state_dict:
print('Overwriting', prefix + 'classification_heads.' + k)
state_dict[prefix + 'classification_heads.' + k] = v
class BiEncoderRanker(torch.nn.Module):
def __init__(self, params, shared=None):
super(BiEncoderRanker, self).__init__()
self.params = params
self.device = torch.device(
"cuda" if torch.cuda.is_available() and not params["no_cuda"] else "cpu"
)
self.n_gpu = torch.cuda.device_count()
# init tokenizer
self.NULL_IDX = 0
self.START_TOKEN = "[CLS]"
self.END_TOKEN = "[SEP]"
self.tokenizer = BertTokenizer.from_pretrained(
params["bert_model"], do_lower_case=params["lowercase"]
)
# init model
self.build_model()
model_path = params.get("path_to_model", None)
if model_path is not None:
self.load_model(
model_path,
cand_enc_only=params.get("load_cand_enc_only", False),
)
self.model = self.model.to(self.device)
self.data_parallel = params.get("data_parallel")
if self.data_parallel:
self.model = torch.nn.DataParallel(self.model)
def load_model(self, fname, cpu=False, cand_enc_only=False):
if cpu or not torch.cuda.is_available():
state_dict = torch.load(fname, map_location=torch.device("cpu"))
else:
state_dict = torch.load(fname)
if cand_enc_only:
cand_state_dict = get_submodel_from_state_dict(state_dict, 'cand_encoder')
self.model.cand_encoder.load_state_dict(cand_state_dict)
else:
self.model.upgrade_state_dict_named(state_dict)
self.model.load_state_dict(state_dict)
def build_model(self):
self.model = BiEncoderModule(self.params)
def save_model(self, output_dir):
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = get_model_obj(self.model)
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
def get_optimizer(self, optim_states=None, saved_optim_type=None):
return get_bert_optimizer(
[self.model],
self.params["type_optimization"],
self.params["learning_rate"],
fp16=self.params.get("fp16"),
)
def encode_context(
self, cands, gold_mention_bounds=None, gold_mention_bounds_mask=None,
num_cand_mentions=50, topK_threshold=-4.5,
get_mention_scores=True,
):
"""
if gold_mention_bounds specified, selects according to gold_mention_bounds,
otherwise selects according to top-scoring mentions
Returns: Dictionary
mention_reps: torch.FloatTensor (bsz, max_num_pred_mentions, embed_dim): mention embeddings
mention_masks: torch.BoolTensor (bsz, max_num_pred_mentions): mention padding mask
mention_bounds: torch.LongTensor (bsz, max_num_pred_mentions, 2)
(
mention_logits: torch.FloatTensor (bsz, max_num_pred_mentions): mention scores/logits
all_mention_mask: torch.BoolTensor ((bsz, all_cand_mentions)
all_mention_logits: torch.FloatTensor (bsz, all_cand_mentions): all mention scores/logits
all_mention_bounds: torch.LongTensor (bsz, all_cand_mentions, 2): all mention bounds
)
"""
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
context_outs, _ = self.model(
token_idx_cands, segment_idx_cands, mask_cands,
None, None, None,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=topK_threshold,
get_mention_scores=get_mention_scores
)
if context_outs['mention_dims'].size(0) <= 1:
for key in context_outs:
if 'all' in key or key == 'mention_dims':
continue
context_outs[key] = context_outs[key].view([context_outs['mention_dims'][0,0], -1] + list(context_outs[key].size()[1:]))
return context_outs
'''
Reshape to (bs, num_mentions, *), iterating across GPUs
'''
def init_tensor(shape, dtype, init_value):
return init_value * torch.ones(
shape
).to(dtype=dtype, device=context_outs['mention_dims'].device)
bs = cands.size(0)
n_pred_mentions = context_outs['mention_dims'][:,1].max()
context_outs_reshape = {}
for key in context_outs:
if 'all' in key or key == 'mention_dims':
context_outs_reshape[key] = context_outs[key]
continue
# (bsz, max_num_pred_mentions, *)
context_outs_reshape[key] = init_tensor(
[bs, n_pred_mentions] + list(context_outs[key].size()[1:]),
context_outs[key].dtype,
-float("inf") if 'logit' in key else 0,
)
for idx in range(len(context_outs['mention_dims'])):
# reshape
gpu_bs = context_outs['mention_dims'][idx, 0]
b_width = context_outs['mention_dims'][idx, 1]
start_idx = (context_outs['mention_dims'][:idx, 0] * context_outs['mention_dims'][:idx, 1]).sum()
end_idx = start_idx + b_width * gpu_bs
s_reshape = context_outs['mention_dims'][:idx, 0].sum()
e_reshape = s_reshape + gpu_bs
for key in context_outs_reshape:
if 'all' in key or key == 'mention_dims':
continue
if len(context_outs[key].size()) == 1:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width)
else:
target_tensor = context_outs[key][start_idx:end_idx].view(gpu_bs, b_width, -1)
context_outs_reshape[key][s_reshape:e_reshape, :b_width] = target_tensor
return context_outs_reshape
def encode_candidate(self, cands):
token_idx_cands, segment_idx_cands, mask_cands = to_bert_input(
cands, self.NULL_IDX
)
_, embedding_cands = self.model(
None, None, None,
token_idx_cands, segment_idx_cands, mask_cands
)
return embedding_cands
# Score candidates given context input and label input
# If text_encs/cand_encs is provided (pre-computed), text_vecs/cand_vecs is ignored
def score_candidate(
self,
text_vecs,
cand_vecs,
text_encs=None, # pre-computed mention encoding
cand_encs=None, # pre-computed candidate encoding.
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
num_cand_mentions=50,
mention_threshold=-4.5,
get_mention_scores=True,
hard_negs=False, # (if training) passed in a subset of hard negatives
hard_negs_mask=None, # (if hard negs training) mask for gold candidate mentions on all inputs (pos + negs)
):
"""
text_vecs (bs, max_ctxt_size):
cand_vecs (bs, max_num_gold_mentions, 1, max_cand_size):
text_encs (batch_num_mentions, embed_size): Pre-encoded mention vectors, masked before input
cand_encs (num_ents_to_match [batch_num_total_ents/all_ents], embed_size): Pre-encoded candidate vectors, masked before input
"""
'''
Compute context representations and/or get mention scores
'''
if text_encs is None or get_mention_scores:
# embedding_ctxt: (bs, num_gold_mentions/num_pred_mentions, embed_size)
context_outs = self.encode_context(
text_vecs, gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
num_cand_mentions=num_cand_mentions,
topK_threshold=mention_threshold,
get_mention_scores=get_mention_scores,
)
mention_logits = None
mention_bounds = None
if get_mention_scores:
mention_logits = context_outs['all_mention_logits']
mention_bounds = context_outs['all_mention_bounds']
if text_encs is None:
if gold_mention_bounds is None:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][context_outs['mention_masks']]
else:
# (all_batch_pred_mentions, embed_size)
embedding_ctxt = context_outs['mention_reps'][gold_mention_bounds_mask]
else:
# Context encoding is given, do not need to re-compute
embedding_ctxt = text_encs
'''
Compute candidate representations
'''
if cand_encs is None:
# Train time: Compute candidates in batch and compare in-batch negatives
# cand_vecs: (bs, num_gold_mentions, 1, cand_width) -> (batch_num_gold_mentions, cand_width)
cand_vecs = cand_vecs[gold_mention_bounds_mask].squeeze(1)
# (batch_num_gold_mentions, embed_dim)
embedding_cands = self.encode_candidate(cand_vecs)
else:
# (batch_num_gold_mentions, embed_dim)
embedding_cands = cand_encs
'''
Do inner-product search, or obtain scores on hard-negative entities
'''
if hard_negs:
assert hard_negs_mask is not None
# (num_mention_in_batch, embed_dim)
embedding_ctxt = embedding_ctxt[hard_negs_mask]
embedding_cands = embedding_cands[hard_negs_mask]
embedding_ctxt = embedding_ctxt.unsqueeze(1) # num_mention_in_batch x 1 x embed_size
embedding_cands = embedding_cands.unsqueeze(2) # num_mention_in_batch x embed_size x 1
scores = torch.bmm(embedding_ctxt, embedding_cands) # num_mention_in_batch x 1 x 1
scores = torch.squeeze(scores)
# (num_mention_in_batch,)
return scores, mention_logits, mention_bounds
else:
# matmul across all cand_encs (in-batch, if cand_encs is None, or across all cand_encs)
# (all_batch_pred_mentions, num_cands)
# similarity score between ctxt i and cand j
all_scores = embedding_ctxt.mm(embedding_cands.t())
return all_scores, mention_logits, mention_bounds
# label_input -- negatives provided
# If label_input is None, train on in-batch negatives
def forward(
self, context_input, cand_input,
text_encs=None, # pre-computed mention encoding.
cand_encs=None, # pre-computed candidate embeddings
mention_logits=None, # pre-computed mention logits
mention_bounds=None, # pre-computed mention bounds
label_input=None, # labels for passed-in (if hard negatives training)
gold_mention_bounds=None,
gold_mention_bounds_mask=None,
hard_negs_mask=None, # should be non-none if we are using negs
return_loss=True,
):
"""
text_encs/cand_encs/label_inputs masked before training
In-batch negs training: cand_encs None, label_inputs None, return_loss True
Hard negs training: cand_encs non-None, label_inputs non-None, return_loss True
cand_encs = all entities in batch + additional hard negatives
Inference: cand_encs non-None, label_inputs None, return_loss False
cand_encs = all entities in DB
cand_encs
non-None: set of candidate encodings to search in
None: compute in-batch candidate vectors (used as negatives if train mode)
label_inputs
non-None: labels to use for hard negatives training
None: random negatives training and/or inference
"""
hard_negs = label_input is not None
'''
GET CANDIDATE SCORES
'''
scores, out_mention_logits, out_mention_bounds = self.score_candidate(
context_input, cand_input,
hard_negs=hard_negs,
cand_encs=cand_encs,
text_encs=text_encs,
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
hard_negs_mask=hard_negs_mask,
get_mention_scores=(return_loss and (mention_logits is None or mention_bounds is None)),
)
if mention_logits is None:
mention_logits = out_mention_logits
if mention_bounds is None:
mention_bounds = out_mention_bounds
if not return_loss:
return None, scores, mention_logits, mention_bounds
'''
COMPUTE MENTION LOSS (TRAINING MODE)
'''
span_loss = 0
if mention_logits is not None and mention_bounds is not None:
N = context_input.size(0) # batch size
M = gold_mention_bounds.size(1) # num_mentions per instance (just 1, so far)
# 1 value
span_loss = self.get_span_loss(
gold_mention_bounds=gold_mention_bounds,
gold_mention_bounds_mask=gold_mention_bounds_mask,
mention_logits=mention_logits, mention_bounds=mention_bounds,
)
'''
COMPUTE EL LOSS (TRAINING MODE)
'''
if hard_negs:
'''
Hard negatives (negatives passed in)
'''
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
label_input = label_input[hard_negs_mask]
# scores: (num_mentions_in_batch,); label_input: (num_mentions_in_batch,)
loss = loss_fct(scores, label_input.float()) + span_loss
else:
'''
Random negatives (use in-batch negatives)
'''
# scores: (bs*num_mentions [filtered], bs*num_mentions [filtered])
target = torch.LongTensor(torch.arange(scores.size(1)))
target = target.to(self.device)
# log P(entity|mention) + log P(mention) = log [P(entity|mention)P(mention)]
loss = F.cross_entropy(scores, target, reduction="mean") + span_loss
return loss, scores, mention_logits, mention_bounds
def get_span_loss(
self, gold_mention_bounds, gold_mention_bounds_mask, mention_logits, mention_bounds,
):
"""
gold_mention_bounds (bs, num_mentions, 2)
gold_mention_bounds_mask (bs, num_mentions):
mention_logits (bs, all_mentions)
menion_bounds (bs, all_mentions, 2)
"""
loss_fct = nn.BCEWithLogitsLoss(reduction="mean")
gold_mention_bounds[~gold_mention_bounds_mask] = -1 # ensure don't select masked to score
# triples of [ex in batch, mention_idx in gold_mention_bounds, idx in mention_bounds]
# use 1st, 2nd to index into gold_mention_bounds, 1st, 3rd to index into mention_bounds
gold_mention_pos_idx = ((
mention_bounds.unsqueeze(1) - gold_mention_bounds.unsqueeze(2) # (bs, num_mentions, start_pos * end_pos, 2)
).abs().sum(-1) == 0).nonzero()
# gold_mention_pos_idx should have 1 entry per masked element
# (num_gold_mentions [~gold_mention_bounds_mask])
gold_mention_pos = gold_mention_pos_idx[:,2]
# (bs, total_possible_spans)
gold_mention_binary = torch.zeros(mention_logits.size(), dtype=mention_logits.dtype).to(gold_mention_bounds.device)
gold_mention_binary[gold_mention_pos_idx[:,0], gold_mention_pos_idx[:,2]] = 1
# prune masked spans
mask = mention_logits != -float("inf")
masked_mention_logits = mention_logits[mask]
masked_gold_mention_binary = gold_mention_binary[mask]
# (bs, total_possible_spans)
span_loss = loss_fct(masked_mention_logits, masked_gold_mention_binary)
return span_loss
def to_bert_input(token_idx, null_idx):
"""
token_idx is a 2D tensor int.
"""
segment_idx = token_idx * 0
mask = token_idx != null_idx
# nullify elements in case self.NULL_IDX was not 0
token_idx = token_idx * mask.long()
return token_idx, segment_idx, mask
|
BLINK-main
|
elq/biencoder/biencoder.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import io
import sys
import json
import torch
import logging
import numpy as np
from collections import OrderedDict
from pytorch_transformers.modeling_utils import CONFIG_NAME, WEIGHTS_NAME
from tqdm import tqdm
from elq.biencoder.biencoder import BiEncoderRanker
def read_dataset(dataset_name, preprocessed_json_data_parent_folder, debug=False):
file_name = "{}.jsonl".format(dataset_name)
txt_file_path = os.path.join(preprocessed_json_data_parent_folder, file_name)
samples = []
with io.open(txt_file_path, mode="r", encoding="utf-8") as file:
for line in file:
samples.append(json.loads(line.strip()))
if debug and len(samples) > 200:
break
return samples
def accuracy(out, labels):
outputs = np.argmax(out, axis=1)
return np.sum(outputs == labels)
def remove_module_from_state_dict(state_dict):
new_state_dict = OrderedDict()
for key, value in state_dict.items():
name = "".join(key.split(".module"))
new_state_dict[name] = value
return new_state_dict
def save_model(model, tokenizer, output_dir):
"""Saves the model and the tokenizer used in the output directory."""
if not os.path.exists(output_dir):
os.makedirs(output_dir)
model_to_save = model.module if hasattr(model, "module") else model
output_model_file = os.path.join(output_dir, WEIGHTS_NAME)
output_config_file = os.path.join(output_dir, CONFIG_NAME)
torch.save(model_to_save.state_dict(), output_model_file)
model_to_save.config.to_json_file(output_config_file)
tokenizer.save_vocabulary(output_dir)
def get_logger(output_dir=None):
if output_dir != None:
os.makedirs(output_dir, exist_ok=True)
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[
logging.FileHandler(
"{}/log.txt".format(output_dir), mode="a", delay=False
),
logging.StreamHandler(sys.stdout),
],
)
else:
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
logger = logging.getLogger('Blink')
logger.setLevel(10)
return logger
def write_to_file(path, string, mode="w"):
with open(path, mode) as writer:
writer.write(string)
def get_biencoder(parameters):
return BiEncoderRanker(parameters)
|
BLINK-main
|
elq/candidate_ranking/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Provide an argument parser and default command line options for using ELQ.
import argparse
import importlib
import os
import sys
import datetime
ENT_START_TAG = "[unused0]"
ENT_END_TAG = "[unused1]"
ENT_TITLE_TAG = "[unused2]"
class ElqParser(argparse.ArgumentParser):
"""
Provide an opt-producer and CLI arguement parser.
More options can be added specific by paassing this object and calling
''add_arg()'' or add_argument'' on it.
:param add_elq_args:
(default True) initializes the default arguments for ELQ package.
:param add_model_args:
(default False) initializes the default arguments for loading models,
including initializing arguments from the model.
"""
def __init__(
self, add_elq_args=True, add_model_args=False,
description='ELQ parser',
):
super().__init__(
description=description,
allow_abbrev=False,
conflict_handler='resolve',
formatter_class=argparse.HelpFormatter,
add_help=add_elq_args,
)
self.elq_home = os.path.dirname(
os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
)
os.environ['ELQ_HOME'] = self.elq_home
self.add_arg = self.add_argument
self.overridable = {}
if add_elq_args:
self.add_elq_args()
if add_model_args:
self.add_model_args()
def add_elq_args(self, args=None):
"""
Add common ELQ args across all scripts.
"""
parser = self.add_argument_group("Common Arguments")
parser.add_argument(
"--silent", action="store_true", help="Whether to print progress bars."
)
parser.add_argument(
"--debug",
action="store_true",
help="Whether to run in debug mode with only 200 samples.",
)
parser.add_argument(
"--data_parallel",
action="store_true",
help="Whether to distributed the candidate generation process.",
)
parser.add_argument(
"--no_cuda", action="store_true",
help="Whether not to use CUDA when available",
)
parser.add_argument("--top_k", default=10, type=int)
parser.add_argument(
"--seed", type=int, default=52313, help="random seed for initialization"
)
parser.add_argument(
"--zeshel",
default=True,
type=bool,
help="Whether the dataset is from zeroshot.",
)
def add_model_args(self, args=None):
"""
Add model args.
"""
parser = self.add_argument_group("Model Arguments")
parser.add_argument(
"--max_seq_length",
default=256,
type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_context_length",
default=128,
type=int,
help="The maximum total context input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--max_cand_length",
default=128,
type=int,
help="The maximum total label input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.",
)
parser.add_argument(
"--path_to_model",
default=None,
type=str,
required=False,
help="The full path to the model to load.",
)
parser.add_argument(
"--bert_model",
default="bert-base-uncased",
type=str,
help="Bert pre-trained model selected in the list: bert-base-uncased, "
"bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.",
)
parser.add_argument(
"--pull_from_layer", type=int, default=-1, help="Layers to pull from BERT",
)
parser.add_argument(
"--lowercase",
action="store_false",
help="Whether to lower case the input text. True for uncased models, False for cased models.",
)
parser.add_argument("--context_key", default="context", type=str)
parser.add_argument("--title_key", default="entity", type=str)
parser.add_argument(
"--out_dim", type=int, default=1, help="Output dimention of bi-encoders.",
)
parser.add_argument(
"--add_linear",
action="store_true",
help="Whether to add an additonal linear projection on top of BERT.",
)
parser.add_argument(
"--data_path",
default="data/zeshel",
type=str,
help="The path to the train data.",
)
parser.add_argument(
"--output_path",
default=None,
type=str,
required=True,
help="The output directory where generated output file (model, etc.) is to be dumped.",
)
parser.add_argument(
"--mention_aggregation_type",
default=None,
type=str,
help="Type of mention aggregation (None to just use [CLS] token, "
"'all_avg' to average across tokens in mention, 'fl_avg' to average across first/last tokens in mention, "
"'{all/fl}_linear' for linear layer over mention, '{all/fl}_mlp' to MLP over mention)",
)
parser.add_argument(
"--no_mention_bounds",
dest="no_mention_bounds",
action="store_true",
default=False,
help="Don't add tokens around target mention. MUST BE FALSE IF 'mention_aggregation_type' is NONE",
)
parser.add_argument(
"--mention_scoring_method",
dest="mention_scoring_method",
default="qa_linear",
type=str,
help="Method for generating/scoring mentions boundaries (options: 'qa_mlp', 'qa_linear', 'BIO')",
)
parser.add_argument(
"--max_mention_length",
dest="max_mention_length",
default=10,
type=int,
help="Maximum length of span to consider as candidate mention",
)
def add_training_args(self, args=None):
"""
Add model training args.
"""
parser = self.add_argument_group("Model Training Arguments")
parser.add_argument(
"--evaluate", action="store_true", help="Whether to run evaluation."
)
parser.add_argument(
"--output_eval_file",
default=None,
type=str,
help="The txt file where the the evaluation results will be written.",
)
parser.add_argument(
"--train_batch_size", default=8, type=int,
help="Total batch size for training."
)
parser.add_argument(
"--eval_batch_size", default=8, type=int,
help="Total batch size for evaluation.",
)
parser.add_argument("--max_grad_norm", default=1.0, type=float)
parser.add_argument(
"--learning_rate",
default=3e-5,
type=float,
help="The initial learning rate for Adam.",
)
parser.add_argument(
"--num_train_epochs",
default=1,
type=int,
help="Number of training epochs.",
)
parser.add_argument(
"--print_interval", type=int, default=5,
help="Interval of loss printing",
)
parser.add_argument(
"--eval_interval",
type=int,
default=40,
help="Interval for evaluation during training",
)
parser.add_argument(
"--save_interval", type=int, default=1,
help="Interval for model saving"
)
parser.add_argument(
"--warmup_proportion",
default=0.1,
type=float,
help="Proportion of training to perform linear learning rate warmup for. "
"E.g., 0.1 = 10% of training.",
)
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumualte before performing a backward/update pass.",
)
parser.add_argument(
"--type_optimization",
type=str,
default="all_encoder_layers",
help="Which type of layers to optimize in BERT",
)
parser.add_argument(
"--shuffle", type=bool, default=False,
help="Whether to shuffle train data",
)
# TODO DELETE LATER!!!
parser.add_argument(
"--start_idx",
default=None,
type=int,
)
parser.add_argument(
"--end_idx",
default=None,
type=int,
)
parser.add_argument(
"--last_epoch",
default=0,
type=int,
help="Epoch to restore from when pretraining",
)
parser.add_argument(
"--path_to_trainer_state",
default=None,
type=str,
required=False,
help="The full path to the last checkpoint's training state to load.",
)
parser.add_argument(
'--dont_distribute_train_samples',
default=False,
action="store_true",
help="Don't distribute all training samples across the epochs (go through all samples every epoch)",
)
parser.add_argument(
"--freeze_cand_enc",
default=False,
action="store_true",
help="Freeze the candidate encoder",
)
parser.add_argument(
"--load_cand_enc_only",
default=False,
action="store_true",
help="Only load the candidate encoder from saved model path",
)
parser.add_argument(
"--cand_enc_path",
default="models/all_entities_large.t7",
type=str,
required=False,
help="Filepath to the saved entity encodings.",
)
parser.add_argument(
"--cand_token_ids_path",
default="models/entity_token_ids_128.t7",
type=str,
required=False,
help="Filepath to the saved tokenized entity descriptions.",
)
parser.add_argument(
"--index_path",
default="models/faiss_hnsw_index.pkl",
type=str,
required=False,
help="Filepath to the HNSW index for adversarial training.",
)
parser.add_argument(
"--adversarial_training",
default=False,
action="store_true",
help="Do adversarial training (only takes effect if `freeze_cand_enc` is set)",
)
parser.add_argument(
"--get_losses",
default=False,
action="store_true",
help="Get losses during evaluation",
)
def add_eval_args(self, args=None):
"""
Add model evaluation args.
"""
parser = self.add_argument_group("Model Evaluation Arguments")
parser.add_argument(
"--mode",
default="valid",
type=str,
help="Train / validation / test",
)
parser.add_argument(
"--save_topk_result",
action="store_true",
help="Whether to save prediction results.",
)
parser.add_argument(
"--encode_batch_size",
default=8,
type=int,
help="Batch size for encoding."
)
parser.add_argument(
"--cand_pool_path",
default=None,
type=str,
help="Path for candidate pool",
)
parser.add_argument(
"--cand_encode_path",
default=None,
type=str,
help="Path for candidate encoding",
)
|
BLINK-main
|
elq/common/params.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
import torch
def get_model_obj(model):
model = model.module if hasattr(model, "module") else model
return model
class BertEncoder(nn.Module):
def __init__(
self, bert_model, output_dim, layer_pulled=-1, add_linear=None,
):
super(BertEncoder, self).__init__()
self.layer_pulled = layer_pulled
bert_output_dim = bert_model.embeddings.word_embeddings.weight.size(1)
self.bert_model = bert_model
self.dropout = nn.Dropout(0.1)
if add_linear:
self.additional_linear = nn.Linear(bert_output_dim, output_dim)
else:
self.additional_linear = None
def forward(self, token_ids, segment_ids, attention_mask, DEBUG=False):
if DEBUG:
import pdb
pdb.set_trace()
try:
output_bert, output_pooler, _ = self.bert_model(
token_ids, segment_ids, attention_mask
)
except RuntimeError as e:
print(token_ids.size())
print(segment_ids.size())
print(attention_mask.size())
print(e)
import pdb
pdb.set_trace()
output_bert, output_pooler, _ = self.bert_model(
token_ids, segment_ids, attention_mask
)
if self.additional_linear is not None:
# embeddings = (batch_size, embedding_size)
embeddings = output_pooler
else:
# embeddings = (batch_size, embedding_size)
embeddings = output_bert[:, 0, :]
# in case of dimensionality reduction
if self.additional_linear is not None:
result = self.additional_linear(self.dropout(embeddings))
else:
result = embeddings
return result
|
BLINK-main
|
elq/common/ranker_base.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
"""
FAISS-based index components. Original from
https://github.com/facebookresearch/DPR/blob/master/dpr/indexer/faiss_indexers.py
"""
import os
import logging
import pickle
import faiss
import numpy as np
logger = logging.getLogger()
class DenseIndexer(object):
def __init__(self, buffer_size: int = 50000):
self.buffer_size = buffer_size
self.index_id_to_db_id = []
self.index = None
def index_data(self, data: np.array):
raise NotImplementedError
def search_knn(self, query_vectors: np.array, top_docs: int):
raise NotImplementedError
def serialize(self, index_file: str):
logger.info("Serializing index to %s", index_file)
faiss.write_index(self.index, index_file)
def deserialize_from(self, index_file: str):
logger.info("Loading index from %s", index_file)
self.index = faiss.read_index(index_file)
logger.info(
"Loaded index of type %s and size %d", type(self.index), self.index.ntotal
)
# DenseFlatIndexer does exact search
class DenseFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, buffer_size: int = 50000):
super(DenseFlatIndexer, self).__init__(buffer_size=buffer_size)
self.index = faiss.IndexFlatIP(vector_sz)
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
cnt = 0
for i in range(0, n, self.buffer_size):
vectors = [np.reshape(t, (1, -1)) for t in data[i : i + self.buffer_size]]
vectors = np.concatenate(vectors, axis=0)
self.index.add(vectors)
cnt += self.buffer_size
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseIVFFlatIndexer does bucketed exact search
class DenseIVFFlatIndexer(DenseIndexer):
def __init__(self, vector_sz: int = 1, nprobe: int = 10, nlist: int = 100):
super(DenseIVFFlatIndexer, self).__init__()
self.nprobe = nprobe
self.nlist = nlist
quantizer = faiss.IndexFlatL2(vector_sz) # the other index
self.index = faiss.IndexIVFFlat(quantizer, vector_sz, self.nlist, faiss.METRIC_INNER_PRODUCT)
self.index.nprobe = nprobe
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
self.index.train(data)
self.index.add(data)
logger.info("Total data indexed %d", n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
# DenseHNSWFlatIndexer does approximate search
class DenseHNSWFlatIndexer(DenseIndexer):
"""
Efficient index for retrieval. Note: default settings are for hugh accuracy but also high RAM usage
"""
def __init__(
self,
vector_sz: int,
buffer_size: int = 50000,
store_n: int = 128,
ef_search: int = 256,
ef_construction: int = 200,
):
super(DenseHNSWFlatIndexer, self).__init__(buffer_size=buffer_size)
index = faiss.IndexHNSWFlat(vector_sz, store_n, faiss.METRIC_INNER_PRODUCT)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
self.index = index
def index_data(self, data: np.array):
n = len(data)
# indexing in batches is beneficial for many faiss index types
logger.info("Indexing data, this may take a while.")
self.index.add(data)
logger.info("Total data indexed %d" % n)
def search_knn(self, query_vectors, top_k):
scores, indexes = self.index.search(query_vectors, top_k)
return scores, indexes
def deserialize_from(self, file: str):
super(DenseHNSWFlatIndexer, self).deserialize_from(file)
# to trigger warning on subsequent indexing
self.phi = 1
|
BLINK-main
|
elq/index/faiss_indexer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
def entity_linking_tp_with_overlap(gold, predicted):
"""
Partially adopted from: https://github.com/UKPLab/starsem2018-entity-linking
Counts weak and strong matches
:param gold:
:param predicted:
:return:
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16), ('Q780394', 19, 35)])
2, 1
>>> entity_linking_tp_with_overlap([('Q7366', 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), ('Q780394', 19, 35)], [('Q7366', 14, 16)])
0, 0
>>> entity_linking_tp_with_overlap([(None, 14, 18), (None, )], [(None,)])
1, 0
>>> entity_linking_tp_with_overlap([('Q7366', ), ('Q780394', )], [('Q7366', 14, 16)])
1, 0
>>> entity_linking_tp_with_overlap([], [('Q7366', 14, 16)])
0, 0
"""
if not gold or not predicted:
return 0, 0
# Add dummy spans, if no spans are given, everything is overlapping per default
if any(len(e) != 3 for e in gold):
gold = [(e[0], 0, 1) for e in gold]
predicted = [(e[0], 0, 1) for e in predicted]
# Replace None KB ids with empty strings
gold = [("",) + e[1:] if e[0] is None else e for e in gold]
predicted = [("",) + e[1:] if e[0] is None else e for e in predicted]
gold = sorted(gold, key=lambda x: x[2])
predicted = sorted(predicted, key=lambda x: x[2])
# tracks weak matches
lcs_matrix_weak = np.zeros((len(gold), len(predicted)), dtype=np.int16)
# tracks strong matches
lcs_matrix_strong = np.zeros((len(gold), len(predicted)), dtype=np.int16)
for g_i in range(len(gold)):
for p_i in range(len(predicted)):
gm = gold[g_i]
pm = predicted[p_i]
# increment lcs_matrix_weak
if not (gm[1] >= pm[2] or pm[1] >= gm[2]) and (gm[0].lower() == pm[0].lower()):
if g_i == 0 or p_i == 0:
lcs_matrix_weak[g_i, p_i] = 1
else:
lcs_matrix_weak[g_i, p_i] = 1 + lcs_matrix_weak[g_i - 1, p_i - 1]
else:
if g_i == 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(0, lcs_matrix_weak[g_i, p_i - 1])
elif g_i != 0 and p_i == 0:
lcs_matrix_weak[g_i, p_i] = max(lcs_matrix_weak[g_i - 1, p_i], 0)
elif g_i != 0 and p_i != 0:
lcs_matrix_weak[g_i, p_i] = max(lcs_matrix_weak[g_i - 1, p_i], lcs_matrix_weak[g_i, p_i - 1])
# increment lcs_matrix_strong
if (gm[1] == pm[1] and pm[2] == gm[2]) and (gm[0].lower() == pm[0].lower()):
if g_i == 0 or p_i == 0:
lcs_matrix_strong[g_i, p_i] = 1
else:
lcs_matrix_strong[g_i, p_i] = 1 + lcs_matrix_strong[g_i - 1, p_i - 1]
else:
if g_i == 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = 0
elif g_i == 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(0, lcs_matrix_strong[g_i, p_i - 1])
elif g_i != 0 and p_i == 0:
lcs_matrix_strong[g_i, p_i] = max(lcs_matrix_strong[g_i - 1, p_i], 0)
elif g_i != 0 and p_i != 0:
lcs_matrix_strong[g_i, p_i] = max(lcs_matrix_strong[g_i - 1, p_i], lcs_matrix_strong[g_i, p_i - 1])
weak_match_count = lcs_matrix_weak[len(gold) - 1, len(predicted) - 1]
strong_match_count = lcs_matrix_strong[len(gold) - 1, len(predicted) - 1]
return weak_match_count, strong_match_count
|
BLINK-main
|
elq/vcg_utils/measures.py
|
import argparse
import json
import logging
import os
import random
import time
import torch
from datetime import timedelta
WORLDS = {
'american_football',
'doctor_who',
'fallout',
'final_fantasy',
'military',
'pro_wrestling',
'starwars',
'world_of_warcraft',
'coronation_street',
'muppets',
'ice_hockey',
'elder_scrolls',
'forgotten_realms',
'lego',
'star_trek',
'yugioh'
}
domain_set = {}
domain_set['val'] = set(['coronation_street', 'muppets', 'ice_hockey', 'elder_scrolls'])
domain_set['test'] = set(['forgotten_realms', 'lego', 'star_trek', 'yugioh'])
domain_set['train'] = set(['american_football', 'doctor_who', 'fallout', 'final_fantasy', 'military', 'pro_wrestling', 'starwars', 'world_of_warcraft'])
class LogFormatter():
def __init__(self):
self.start_time = time.time()
def format(self, record):
elapsed_seconds = round(record.created - self.start_time)
prefix = "%s - %s - %s" % (
record.levelname,
time.strftime("%x %X"),
timedelta(seconds=elapsed_seconds)
)
message = record.getMessage()
message = message.replace('\n', '\n' + ' ' * (len(prefix) + 3))
return "%s - %s" % (prefix, message) if message else ''
log_formatter = LogFormatter()
console_handler = logging.StreamHandler()
console_handler.setLevel(logging.INFO)
console_handler.setFormatter(log_formatter)
logger = logging.getLogger()
logger.handlers = []
logger.setLevel(logging.INFO)
logger.propagate = False
logger.addHandler(console_handler)
def load_entity_dict(params):
entity_dict = {}
entity_map = {}
for src in WORLDS:
fname = os.path.join(params.document_path, src + ".json")
assert os.path.isfile(fname), "File not found! %s" % fname
cur_dict = {}
doc_map = {}
doc_list = []
with open(fname, 'rt') as f:
for line in f:
line = line.rstrip()
item = json.loads(line)
doc_id = item["document_id"]
title = item["title"]
text = item["text"]
doc_map[doc_id] = len(doc_list)
doc_list.append(item)
logger.info("Load for world %s." % src)
entity_dict[src] = doc_list
entity_map[src] = doc_map
return entity_dict, entity_map
def convert_data(params, entity_dict, entity_map, mode):
if mode == "valid":
fname = os.path.join(params.mention_path, "val.json")
else:
fname = os.path.join(params.mention_path, mode + ".json")
fout = open(os.path.join(params.output_path, mode + ".jsonl"), 'wt')
cnt = 0
max_tok = 128
with open(fname, 'rt') as f:
for line in f:
cnt += 1
line = line.rstrip()
item = json.loads(line)
mention = item["text"].lower()
src = item["corpus"]
label_doc_id = item["label_document_id"]
orig_doc_id = item["context_document_id"]
start = item["start_index"]
end = item["end_index"]
# add context around the mention as well
orig_id = entity_map[src][orig_doc_id]
text = entity_dict[src][orig_id]["text"].lower()
tokens = text.split(" ")
assert mention == ' '.join(tokens[start:end + 1])
tokenized_query = mention
mention_context_left = tokens[max(0, start - max_tok):start]
mention_context_right = tokens[end + 1:min(len(tokens), end + max_tok + 1)]
# entity info
k = entity_map[src][label_doc_id]
ent_title = entity_dict[src][k]['title']
ent_text = entity_dict[src][k]["text"]
example = {}
example["context_left"] = ' '.join(mention_context_left)
example['context_right'] = ' '.join(mention_context_right)
example["mention"] = mention
example["label"] = ent_text
example["label_id"] = k
example['label_title'] = ent_title
example['world'] = src
fout.write(json.dumps(example))
fout.write('\n')
fout.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Zero-shot Entity Linking Dataset')
parser.add_argument(
'--document_path',
default='data/zeshel/documents',
type=str,
)
parser.add_argument(
'--mention_path',
default='data/zeshel/mentions',
type=str,
)
parser.add_argument(
'--output_path',
default='data/zeshel/blink_format',
type=str,
)
params = parser.parse_args()
os.makedirs(params.output_path, exist_ok=True)
entity_dict, entity_map = load_entity_dict(params)
convert_data(params, entity_dict, entity_map, 'train')
convert_data(params, entity_dict, entity_map, 'valid')
convert_data(params, entity_dict, entity_map, 'test')
|
BLINK-main
|
examples/zeshel/create_BLINK_zeshel_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from elq.biencoder.biencoder import load_biencoder
import elq.candidate_ranking.utils as utils
import json
import sys
import os
from tqdm import tqdm
import argparse
def encode_candidate(
reranker,
candidate_pool,
encode_batch_size,
silent,
logger,
):
reranker.model.eval()
device = reranker.device
#for cand_pool in candidate_pool:
#logger.info("Encoding candidate pool %s" % src)
sampler = SequentialSampler(candidate_pool)
data_loader = DataLoader(
candidate_pool, sampler=sampler, batch_size=encode_batch_size
)
if silent:
iter_ = data_loader
else:
iter_ = tqdm(data_loader)
cand_encode_list = None
for step, batch in enumerate(iter_):
cands = batch
cands = cands.to(device)
cand_encode = reranker.encode_candidate(cands)
if cand_encode_list is None:
cand_encode_list = cand_encode
else:
cand_encode_list = torch.cat((cand_encode_list, cand_encode))
return cand_encode_list
def load_candidate_pool(
tokenizer,
params,
logger,
cand_pool_path,
):
candidate_pool = None
# try to load candidate pool from file
try:
logger.info("Loading pre-generated candidate pool from: ")
logger.info(cand_pool_path)
candidate_pool = torch.load(cand_pool_path)
except:
logger.info("Loading failed.")
assert candidate_pool is not None
return candidate_pool
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_model_config', type=str, required=True, help='filepath to saved model config')
parser.add_argument('--path_to_model', type=str, required=True, help='filepath to saved model')
parser.add_argument('--entity_dict_path', type=str, required=True, help='filepath to entities to encode (.jsonl file)')
parser.add_argument('--saved_cand_ids', type=str, help='filepath to entities pre-parsed into IDs')
parser.add_argument('--encoding_save_file_dir', type=str, help='directory of file to save generated encodings', default=None)
parser.add_argument('--test', action='store_true', default=False, help='whether to just test encoding subsample of entities')
parser.add_argument('--compare_saved_embeds', type=str, help='compare against these saved embeddings')
parser.add_argument('--batch_size', type=int, default=512, help='batch size for encoding candidate vectors (default 512)')
parser.add_argument('--chunk_start', type=int, default=0, help='example idx to start encoding at (for parallelizing encoding process)')
parser.add_argument('--chunk_end', type=int, default=-1, help='example idx to stop encoding at (for parallelizing encoding process)')
args = parser.parse_args()
try:
with open(args.path_to_model_config) as json_file:
biencoder_params = json.load(json_file)
except json.decoder.JSONDecodeError:
with open(args.path_to_model_config) as json_file:
for line in json_file:
line = line.replace("'", "\"")
line = line.replace("True", "true")
line = line.replace("False", "false")
line = line.replace("None", "null")
biencoder_params = json.loads(line)
break
# model to use
biencoder_params["path_to_model"] = args.path_to_model
# entities to use
biencoder_params["entity_dict_path"] = args.entity_dict_path
biencoder_params["degug"] = False
biencoder_params["data_parallel"] = True
biencoder_params["no_cuda"] = False
biencoder_params["max_context_length"] = 32
biencoder_params["encode_batch_size"] = args.batch_size
saved_cand_ids = getattr(args, 'saved_cand_ids', None)
encoding_save_file_dir = args.encoding_save_file_dir
if encoding_save_file_dir is not None and not os.path.exists(encoding_save_file_dir):
os.makedirs(encoding_save_file_dir, exist_ok=True)
logger = utils.get_logger(biencoder_params.get("model_output_path", None))
biencoder = load_biencoder(biencoder_params)
baseline_candidate_encoding = None
if getattr(args, 'compare_saved_embeds', None) is not None:
baseline_candidate_encoding = torch.load(getattr(args, 'compare_saved_embeds'))
candidate_pool = load_candidate_pool(
biencoder.tokenizer,
biencoder_params,
logger,
getattr(args, 'saved_cand_ids', None),
)
if args.test:
candidate_pool = candidate_pool[:10]
# encode in chunks to parallelize
save_file = None
if getattr(args, 'encoding_save_file_dir', None) is not None:
save_file = os.path.join(
args.encoding_save_file_dir,
"{}_{}.t7".format(args.chunk_start, args.chunk_end),
)
print("Saving in: {}".format(save_file))
if save_file is not None:
f = open(save_file, "w").close() # mark as existing
candidate_encoding = encode_candidate(
biencoder,
candidate_pool[args.chunk_start:args.chunk_end],
biencoder_params["encode_batch_size"],
biencoder_params["silent"],
logger,
)
if save_file is not None:
torch.save(candidate_encoding, save_file)
print(candidate_encoding[0,:10])
if baseline_candidate_encoding is not None:
print(baseline_candidate_encoding[0,:10])
|
BLINK-main
|
scripts/generate_candidates.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import os
import requests
from bs4 import BeautifulSoup
from tqdm import tqdm
BEGIN_ENT_TOKEN = "[START_ENT]"
END_ENT_TOKEN = "[END_ENT]"
url2id_cache = {}
def _read_url(url):
with urllib.request.urlopen(url) as response:
html = response.read()
soup = BeautifulSoup(html, features="html.parser")
title = soup.title.string.replace(" - Wikipedia", "").strip()
return title
def _get_pageid_from_api(title, client=None):
pageid = None
title_html = title.strip().replace(" ", "%20")
url = "https://en.wikipedia.org/w/api.php?action=query&titles={}&format=json".format(
title_html
)
try:
# Package the request, send the request and catch the response: r
r = requests.get(url)
# Decode the JSON data into a dictionary: json_data
json_data = r.json()
if len(json_data["query"]["pages"]) > 1:
print("WARNING: more than one result returned from wikipedia api")
for _, v in json_data["query"]["pages"].items():
pageid = v["pageid"]
except:
pass
return pageid
def extract_questions(filename):
# all the datapoints
global_questions = []
# left context so far in the document
left_context = []
# working datapoints for the document
document_questions = []
# is the entity open
open_entity = False
# question id in the document
question_i = 0
with open(filename) as fin:
lines = fin.readlines()
for line in tqdm(lines):
if "-DOCSTART-" in line:
# new document is starting
doc_id = line.split("(")[-1][:-2]
# END DOCUMENT
# check end of entity
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
"""
#DEBUG
for q in document_questions:
pp.pprint(q)
input("...")
"""
# add sentence_questions to global_questions
global_questions.extend(document_questions)
# reset
left_context = []
document_questions = []
question_i = 0
else:
split = line.split("\t")
token = split[0].strip()
if len(split) >= 5:
B_I = split[1]
mention = split[2]
# YAGO2_entity = split[3]
Wikipedia_URL = split[4]
Wikipedia_ID = split[5]
# Freee_base_id = split[6]
if B_I == "I":
pass
elif B_I == "B":
title = Wikipedia_URL.split("/")[-1].replace("_", " ")
if Wikipedia_ID == "000":
if Wikipedia_URL in url2id_cache:
pageid = url2id_cache[Wikipedia_URL]
else:
pageid = _get_pageid_from_api(title)
url2id_cache[Wikipedia_URL] = pageid
Wikipedia_ID = pageid
q = {
"id": "{}:{}".format(doc_id, question_i),
"input": left_context.copy() + [BEGIN_ENT_TOKEN],
"mention": mention,
"Wikipedia_title": title,
"Wikipedia_URL": Wikipedia_URL,
"Wikipedia_ID": Wikipedia_ID,
"left_context": left_context.copy(),
"right_context": [],
}
document_questions.append(q)
open_entity = True
question_i += 1
else:
print("Invalid B_I {}", format(B_I))
sys.exit(-1)
# print(token,B_I,mention,Wikipedia_URL,Wikipedia_ID)
else:
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
left_context.append(token)
for q in document_questions:
q["input"].append(token)
for q in document_questions[:-1]:
q["right_context"].append(token)
if len(document_questions) > 0 and not open_entity:
document_questions[-1]["right_context"].append(token)
# FINAL SENTENCE
if open_entity:
document_questions[-1]["input"].append(END_ENT_TOKEN)
open_entity = False
# add sentence_questions to global_questions
global_questions.extend(document_questions)
return global_questions
# store on file
def store_questions(questions, OUT_FILENAME):
if not os.path.exists(os.path.dirname(OUT_FILENAME)):
try:
os.makedirs(os.path.dirname(OUT_FILENAME))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
with open(OUT_FILENAME, "w+") as fout:
for q in questions:
json.dump(q, fout)
fout.write("\n")
def convert_to_BLINK_format(questions):
data = []
for q in questions:
datapoint = {
"context_left": " ".join(q["left_context"]).strip(),
"mention": q["mention"],
"context_right": " ".join(q["right_context"]).strip(),
"query_id": q["id"],
"label_id": q["Wikipedia_ID"],
"Wikipedia_ID": q["Wikipedia_ID"],
"Wikipedia_URL": q["Wikipedia_URL"],
"Wikipedia_title": q["Wikipedia_title"],
}
data.append(datapoint)
return data
# AIDA-YAGO2
print("AIDA-YAGO2")
in_aida_filename = (
"data/train_and_benchmark_data/basic_data/test_datasets/AIDA/AIDA-YAGO2-dataset.tsv"
)
aida_questions = extract_questions(in_aida_filename)
train = []
testa = []
testb = []
for element in aida_questions:
if "testa" in element["id"]:
testa.append(element)
elif "testb" in element["id"]:
testb.append(element)
else:
train.append(element)
print("train: {}".format(len(train)))
print("testa: {}".format(len(testa)))
print("testb: {}".format(len(testb)))
train_blink = convert_to_BLINK_format(train)
testa_blink = convert_to_BLINK_format(testa)
testb_blink = convert_to_BLINK_format(testb)
out_train_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_train.jsonl"
store_questions(train_blink, out_train_aida_filename)
out_testa_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_testa.jsonl"
store_questions(testa_blink, out_testa_aida_filename)
out_testb_aida_filename = "data/BLINK_benchmark/AIDA-YAGO2_testb.jsonl"
store_questions(testb_blink, out_testb_aida_filename)
# ACE 2004
print("ACE 2004")
in_ace_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/ace2004/ace2004.conll"
ace_questions = convert_to_BLINK_format(extract_questions(in_ace_filename))
out_ace_filename = "data/BLINK_benchmark/ace2004_questions.jsonl"
store_questions(ace_questions, out_ace_filename)
print(len(ace_questions))
# aquaint
print("aquaint")
in_aquaint_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/aquaint/aquaint.conll"
aquaint_questions = convert_to_BLINK_format(extract_questions(in_aquaint_filename))
out_aquaint_filename = "data/BLINK_benchmark/aquaint_questions.jsonl"
store_questions(aquaint_questions, out_aquaint_filename)
print(len(aquaint_questions))
# clueweb - WNED-CWEB (CWEB)
print("clueweb - WNED-CWEB (CWEB)")
in_clueweb_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/clueweb/clueweb.conll"
clueweb_questions = convert_to_BLINK_format(extract_questions(in_clueweb_filename))
out_clueweb_filename = "data/BLINK_benchmark/clueweb_questions.jsonl"
store_questions(clueweb_questions, out_clueweb_filename)
print(len(clueweb_questions))
# msnbc
print("msnbc")
in_msnbc_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/msnbc/msnbc.conll"
msnbc_questions = convert_to_BLINK_format(extract_questions(in_msnbc_filename))
out_msnbc_filename = "data/BLINK_benchmark/msnbc_questions.jsonl"
store_questions(msnbc_questions, out_msnbc_filename)
print(len(msnbc_questions))
# wikipedia - WNED-WIKI (WIKI)
print("wikipedia - WNED-WIKI (WIKI)")
in_wnedwiki_filename = "data/train_and_benchmark_data/basic_data/test_datasets/wned-datasets/wikipedia/wikipedia.conll"
wnedwiki_questions = convert_to_BLINK_format(extract_questions(in_wnedwiki_filename))
out_wnedwiki_filename = "data/BLINK_benchmark/wnedwiki_questions.jsonl"
store_questions(wnedwiki_questions, out_wnedwiki_filename)
print(len(wnedwiki_questions))
|
BLINK-main
|
scripts/create_BLINK_benchmark_data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import json
import os
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--path_to_saved_chunks', type=str, required=True, help='filepath to directory containing saved chunks')
parser.add_argument('--chunk_size', type=int, default=1000000, help='size of each chunk')
args = parser.parse_args()
CHUNK_SIZES = args.chunk_size
all_chunks = []
for fn in range(0, 5903526, CHUNK_SIZES):
f_chunk = os.path.join(
args.path_to_saved_chunks, '{}_{}.t7'.format(fn, fn+CHUNK_SIZES),
)
if not os.path.exists(f_chunk) or os.path.getsize(f_chunk) == 0:
continue
loaded_chunk = torch.load(f_chunk)
all_chunks.append(loaded_chunk[:CHUNK_SIZES])
all_chunks = torch.cat(all_chunks, dim=0)
torch.save(all_chunks, os.path.join(
args.path_to_saved_chunks, 'all.t7'.format(fn, fn+CHUNK_SIZES),
))
|
BLINK-main
|
scripts/merge_candidates.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import json
import os
import numpy as np
import torch
from elq.vcg_utils.measures import entity_linking_tp_with_overlap
from tqdm import tqdm
from transformers import BertTokenizer
tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
id2title = json.load(open("models/id2title.json"))
def load_dists(all_save_dir, data, split, model, joint_threshold):
save_dir = "{}/{}_{}_{}_joint{}_top50cands_final_joint".format(all_save_dir, data, split, model, joint_threshold)
if not os.path.exists(save_dir):
save_dir += "_0"
with open(os.path.join(save_dir, "biencoder_outs.jsonl")) as f:
examples = f.readlines()
examples = [json.loads(line) for line in examples]
biencoder_indices = np.load(os.path.join(save_dir, "biencoder_nns.npy"), allow_pickle=True) # corresponds to biencoder_dists
biencoder_dists = np.load(os.path.join(save_dir, "biencoder_dists.npy"), allow_pickle=True)
if os.path.exists(os.path.join(save_dir, "biencoder_cand_scores.npy")):
cand_dists = np.load(os.path.join(save_dir, "biencoder_cand_scores.npy"), allow_pickle=True)
else:
cand_dists = np.load(os.path.join(save_dir, "biencoder_cand_dists.npy"), allow_pickle=True)
pred_mention_bounds = np.load(os.path.join(save_dir, "biencoder_mention_bounds.npy"), allow_pickle=True)
if os.path.exists(os.path.join(save_dir, "biencoder_mention_scores.npy")):
mention_dists = np.load(os.path.join(save_dir, "biencoder_mention_scores.npy"), allow_pickle=True)
else:
mention_dists = [biencoder_dists[i] - torch.log_softmax(torch.tensor(cand_dists[i]), 1).numpy() for i in range(len(biencoder_dists))]
# inverse sigmoid
mention_dists = [np.log(md / (1 - md)) for md in mention_dists]
return examples, biencoder_indices, biencoder_dists, cand_dists, pred_mention_bounds, mention_dists
def filter_repeats(pred_triples, pred_scores):
# sort pred_triples and pred_scores by pred_scores
score_sort_ids = sorted(enumerate(pred_scores), key=lambda x: x[1], reverse=True)
pred_triples = [pred_triples[si[0]] for si in score_sort_ids]
pred_scores = [score_sort_id[1] for score_sort_id in score_sort_ids]
all_pred_entities = {}
all_pred_entities_pruned = []
all_pred_scores_pruned = []
for idx, ent in enumerate(pred_triples):
if ent[0] in all_pred_entities:
continue
all_pred_entities_pruned.append(ent)
all_pred_scores_pruned.append(pred_scores[idx])
all_pred_entities[ent[0]] = 0
return all_pred_entities_pruned, all_pred_scores_pruned
def filter_overlaps(tokens, pred_triples, pred_scores):
all_pred_entities_pruned = []
all_pred_scores_pruned = []
mention_masked_utterance = np.zeros(len(tokens))
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(pred_triples):
if sum(mention_masked_utterance[mb[1]:mb[2]]) > 0:
continue
all_pred_entities_pruned.append(mb)
all_pred_scores_pruned.append(pred_scores[idx])
mention_masked_utterance[mb[1]:mb[2]] = 1
return all_pred_entities_pruned, all_pred_scores_pruned
def filter_repeat_overlaps(tokens, pred_triples, pred_scores):
all_pred_entities_pruned = []
all_pred_scores_pruned = []
mention_masked_utterance = {triple[0]: np.zeros(len(tokens)) for triple in pred_triples}
# ensure well-formed-ness, prune overlaps
# greedily pick highest scoring, then prune all overlapping
for idx, mb in enumerate(pred_triples):
if sum(mention_masked_utterance[mb[0]][mb[1]:mb[2]]) > 0:
continue
all_pred_entities_pruned.append(mb)
all_pred_scores_pruned.append(pred_scores[idx])
mention_masked_utterance[mb[0]][mb[1]:mb[2]] = 1
return all_pred_entities_pruned, all_pred_scores_pruned
# threshold and sort by score
def get_threshold_mask_and_sort(mention_dists, cand_dists, biencoder_dists, valid_cands_mask, threshold, top_mention_sort=True):
"""
top_mention_sort:
True: sort top candidates per mention only
scores_mask and sorted_idxs has dim (#_valid_examples,)
False: sort ALL candidates (assumes multiple candidates per mention)
scores_mask and sorted_idxs has dim (#_valid_examples, #_cands)
"""
mention_scores = mention_dists[valid_cands_mask]
if len(mention_scores.shape) > 1:
mention_scores = mention_scores[:,0]
scores = torch.log_softmax(torch.tensor(cand_dists[valid_cands_mask]), 1) + torch.sigmoid(torch.tensor(mention_scores)).log().unsqueeze(-1)
if top_mention_sort:
scores_mask = (scores[:,0] > threshold)
# sort...
_, sorted_idxs = scores[:,0][scores_mask].sort(descending=True)
sorted_filtered_scores = scores[scores_mask][sorted_idxs]
else:
scores_mask = (scores > threshold) # GRAPHQUESTIONS BEST
sorted_filtered_scores, sorted_idxs = scores[scores_mask].sort(descending=True)
return scores_mask.numpy(), sorted_idxs.numpy(), sorted_filtered_scores.numpy()
all_save_dir = "saved_preds"
model_type = "finetuned_webqsp" # wiki
if model_type == "wiki":
model = '{0}_all_ents;all_mention_biencoder_all_avg_true_128_true_true_bert_large_qa_linear;15'.format(model_type)
elif model_type == "finetuned_webqsp":
model= '{0}_all_ents;all_mention_biencoder_all_avg_true_128_true_true_bert_large_qa_linear;18'.format(model_type)
get_topk_cands = True
topk = 100
if get_topk_cands:
threshold=-float("inf")
else:
threshold=-5
for data in ["nq", "WebQuestions", "triviaqa"]:
if data == "nq":
splits = ["train0", "train1", "train2", "dev", "test"]
else:
splits = ["train", "dev", "test"]
for split in splits:
(
examples, biencoder_indices, biencoder_dists,
cand_dists, pred_mention_bounds, mention_dists
) = load_dists(all_save_dir, data, split, model, "0.0" if model_type == "wiki" else "-inf")
new_examples = []
num_correct=0
num_predicted=0
num_gold=0
for i, example in enumerate(tqdm(examples)):
# select valid candidates
valid_cands_mask = (biencoder_dists[i][:,0] != -1) & (biencoder_dists[i][:,0] == biencoder_dists[i][:,0])
# get scores and masking/sorting by score
scores_mask, sorted_idxs, sorted_filtered_scores = get_threshold_mask_and_sort(
mention_dists[i], cand_dists[i], biencoder_dists[i], valid_cands_mask, threshold, top_mention_sort=(not get_topk_cands)
)
if get_topk_cands:
# (filtered_examples, #cands, 2)
ex_pred_mention_bounds = np.repeat(np.expand_dims(pred_mention_bounds[i], axis=1), biencoder_indices[i].shape[1], axis=1)
# (filtered_examples, #cands,)
ex_mention_dists = np.repeat(np.expand_dims(mention_dists[i], axis=1), biencoder_indices[i].shape[1], axis=1)
ex_biencoder_indices = biencoder_indices[i]
ex_cand_dists = cand_dists[i]
else:
ex_pred_mention_bounds = pred_mention_bounds[i]
ex_mention_dists = mention_dists[i]
ex_biencoder_indices = biencoder_indices[i] #[:,0]
ex_cand_dists = cand_dists[i] #[:,0]
# output threshold_entities_translate, pred_triples, pred_scores
threshold_entities = ex_biencoder_indices[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, #cands) / (filtered_cands,)
threshold_mention_bounds = ex_pred_mention_bounds[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, 2) / (filtered_cands, 2)
threshold_cand_scores = ex_cand_dists[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs, #cands) / (filtered_cands,)
threshold_mention_scores = ex_mention_dists[valid_cands_mask][scores_mask][sorted_idxs] # (filtered_exs,) / (filtered_cands,)
threshold_scores = sorted_filtered_scores # (filtered_exs, #cands) / (filtered_cands,)
threshold_entities_translate = {}
pred_triples = []
pred_scores = []
example['tokens'] = [101] + example['tokens'] + [102]
for m in range(len(threshold_scores)):
mb = threshold_mention_bounds[m].tolist()
mention_text = tokenizer.decode(example['tokens'][mb[0]:mb[1]+1])
threshold_entities_translate[mention_text] = {
"mention_idx": m, "mention_score": float(threshold_mention_scores[m])
}
if len(threshold_entities[m].shape) > 0:
pred_triples.append([str(threshold_entities[m][0]), mb[0], mb[1]+1])
pred_scores.append(float(threshold_scores[m][0]))
threshold_entities_translate[mention_text]["candidate_entities"] = []
threshold_entities_translate[mention_text]["cand_scores"] = threshold_cand_scores[m].tolist()
for id in threshold_entities[m]:
threshold_entities_translate[mention_text]["candidate_entities"].append(id2title[str(id)])
else:
pred_triples.append([str(threshold_entities[m]), mb[0], mb[1]+1])
pred_scores.append(float(threshold_scores[m]))
threshold_entities_translate[mention_text]["candidate_entities"] = id2title[str(threshold_entities[m])]
threshold_entities_translate[mention_text]["cand_scores"] = float(threshold_cand_scores[m])
new_ex = {
"id": example["id"],
"text": example["text"],
"tokens": example["tokens"],
}
if "gold_triples" in example:
all_pred_entities_pruned = pred_triples
all_pred_scores_pruned = pred_scores
if get_topk_cands:
all_pred_entities_pruned, all_pred_scores_pruned = filter_repeats(pred_triples, pred_scores)
all_pred_entities_pruned = all_pred_entities_pruned[:topk]
all_pred_scores_pruned = all_pred_scores_pruned[:topk]
else:
all_pred_entities_pruned, all_pred_scores_pruned = filter_overlaps(example["tokens"], pred_triples, pred_scores)
else:
all_pred_entities_pruned = pred_triples
all_pred_scores_pruned = pred_scores
if get_topk_cands:
all_pred_entities_pruned, all_pred_scores_pruned = filter_repeats(pred_triples, pred_scores)
all_pred_entities_pruned = all_pred_entities_pruned[:topk]
all_pred_scores_pruned = all_pred_scores_pruned[:topk]
else:
all_pred_entities_pruned, all_pred_scores_pruned = filter_overlaps(example["tokens"], pred_triples, pred_scores)
new_ex['pred_mentions'] = threshold_entities_translate
new_ex['pred_triples'] = [[triple[0], triple[1]-1, triple[2]-1] for triple in all_pred_entities_pruned]
new_ex['pred_triples_score'] = all_pred_scores_pruned
new_ex['pred_triples_string'] = [
[id2title[triple[0]], tokenizer.decode(example['tokens'][triple[1]:triple[2]])]
for triple in all_pred_entities_pruned
]
# get scores
if "gold_triples" in example:
gold_triples = example["gold_triples"]
new_ex["gold_triples"] = gold_triples
num_overlap_weak, num_overlap_strong = entity_linking_tp_with_overlap(gold_triples, new_ex['pred_triples'])
num_correct += num_overlap_weak
num_predicted += len(all_pred_entities_pruned)
num_gold += len(gold_triples)
new_examples.append(new_ex)
# compute metrics
if num_predicted > 0 and num_gold > 0:
p = num_correct / num_predicted
r = num_correct / num_gold
f1 = 2*p*r / (p+r)
print(f1)
f1s.append(f1)
if get_topk_cands:
print("Saving {} {} {}".format(data, split, str(topk)))
save_file = "{}_{}_top{}.jsonl".format(split, model_type, str(topk))
else:
print("Saving {} {} {}".format(data, split, str(threshold)))
save_file = "{}_{}_{}.jsonl".format(split, model_type, str(threshold))
# save
with open(os.path.join("/checkpoint/belindali/entity_link/data/{}/saved_preds".format(data), save_file), 'w') as wf:
for new_ex in new_examples:
b=wf.write(json.dumps(new_ex) + "\n")
|
BLINK-main
|
scripts/tune_hyperparams_new.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.