python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from logging import getLogger
from random import randrange
import os
import numpy as np
from sklearn.feature_extraction import image
import torch
import torch.nn as nn
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torch.utils.data.sampler import Sampler
from .YFCC100M import YFCC100M_dataset
logger = getLogger()
def load_data(args):
"""
Load dataset.
"""
if 'yfcc100m' in args.data_path:
return YFCC100M_dataset(args.data_path, size=args.size_dataset)
return datasets.ImageFolder(args.data_path)
def get_data_transformations(rotation=0):
"""
Return data transformations for clustering and for training
"""
tr_normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
final_process = [transforms.ToTensor(), tr_normalize]
# for clustering stage
tr_central_crop = transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(224),
lambda x: np.asarray(x),
Rotate(0)
] + final_process)
# for training stage
tr_dataug = transforms.Compose([
transforms.RandomResizedCrop(224),
transforms.RandomHorizontalFlip(),
lambda x: np.asarray(x),
Rotate(rotation)
] + final_process)
return tr_central_crop, tr_dataug
class Rotate(object):
def __init__(self, rot):
self.rot = rot
def __call__(self, img):
return rotate_img(img, self.rot)
def rotate_img(img, rot):
if rot == 0: # 0 degrees rotation
return img
elif rot == 90: # 90 degrees rotation
return np.flipud(np.transpose(img, (1, 0, 2))).copy()
elif rot == 180: # 90 degrees rotation
return np.fliplr(np.flipud(img)).copy()
elif rot == 270: # 270 degrees rotation / or -90
return np.transpose(np.flipud(img), (1, 0, 2)).copy()
else:
return
class KFoldSampler(Sampler):
def __init__(self, im_per_target, shuffle):
self.im_per_target = im_per_target
N = 0
for tar in im_per_target:
N = N + len(im_per_target[tar])
self.N = N
self.shuffle = shuffle
def __iter__(self):
indices = np.zeros(self.N).astype(int)
c = 0
for tar in self.im_per_target:
indices[c: c + len(self.im_per_target[tar])] = self.im_per_target[tar]
c = c + len(self.im_per_target[tar])
if self.shuffle:
np.random.shuffle(indices)
return iter(indices)
def __len__(self):
return self.N
class KFold():
"""Class to perform k-fold cross-validation.
Args:
im_per_target (Dict): key (target), value (list of data with this target)
i (int): index of the round of cross validation to perform
K (int): dataset randomly partitioned into K equal sized subsamples
Attributes:
val (KFoldSampler): validation sampler
train (KFoldSampler): training sampler
"""
def __init__(self, im_per_target, i, K):
assert(i<K)
per_target = {}
for tar in im_per_target:
per_target[tar] = int(len(im_per_target[tar]) // K)
im_per_target_train = {}
im_per_target_val = {}
for k in range(K):
for L in im_per_target:
if k==i:
im_per_target_val[L] = im_per_target[L][k * per_target[L]: (k + 1) * per_target[L]]
else:
if not L in im_per_target_train:
im_per_target_train[L] = []
im_per_target_train[L] = im_per_target_train[L] + im_per_target[L][k * per_target[L]: (k + 1) * per_target[L]]
self.val = KFoldSampler(im_per_target_val, False)
self.train = KFoldSampler(im_per_target_train, True)
def per_target(imgs):
"""Arrange samples per target.
Args:
imgs (list): List of (_, target) tuples.
Returns:
dict: key (target), value (list of data with this target)
"""
res = {}
for index in range(len(imgs)):
_, target = imgs[index]
if target not in res:
res[target] = []
res[target].append(index)
return res
|
DeeperCluster-main
|
src/data/loader.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import zipfile
import numpy as np
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import torch.utils.data as data
def loader(path_zip, file_img):
"""
Load imagefile from zip.
"""
with zipfile.ZipFile(path_zip, 'r') as myzip:
img = Image.open(myzip.open(file_img))
return img.convert('RGB')
class YFCC100M_dataset(data.Dataset):
"""
YFCC100M dataset.
"""
def __init__(self, root, size, flickr_unique_ids=True, transform=None):
self.root = root
self.transform = transform
self.sub_classes = None
# remove data with uniform color and data we didn't manage to download
if flickr_unique_ids:
self.indexes = np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)), 'flickr_unique_ids.npy'))
self.indexes = self.indexes[:min(size, len(self.indexes))]
else:
self.indexes = np.arange(size)
# for subsets
self.subset_indexes = None
def __getitem__(self, ind):
index = ind
if self.subset_indexes is not None:
index = self.subset_indexes[ind]
index = self.indexes[index]
index = format(index, "0>8d")
repo = index[:2]
z = index[2: 5]
file_img = index[5:] + '.jpg'
path_zip = os.path.join(self.root, repo, z) + '.zip'
# load the image
img = loader(path_zip, file_img)
# apply transformation
if self.transform is not None:
img = self.transform(img)
# id of cluster
sub_class = -100
if self.sub_classes is not None:
sub_class = self.sub_classes[ind]
return img, sub_class
def __len__(self):
if self.subset_indexes is not None:
return len(self.subset_indexes)
return len(self.indexes)
|
DeeperCluster-main
|
src/data/YFCC100M.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import glob
import os
from collections import defaultdict
from PIL import Image
from PIL import ImageFile
ImageFile.LOAD_TRUNCATED_IMAGES = True
import numpy as np
import torch.utils.data as data
class VOC2007_dataset(data.Dataset):
def __init__(self, voc_dir, split='train', transform=None):
# Find the image sets
image_set_dir = os.path.join(voc_dir, 'ImageSets', 'Main')
image_sets = glob.glob(os.path.join(image_set_dir, '*_' + split + '.txt'))
assert len(image_sets) == 20
# Read the labels
self.n_labels = len(image_sets)
images = defaultdict(lambda:-np.ones(self.n_labels, dtype=np.uint8))
for k, s in enumerate(sorted(image_sets)):
for l in open(s, 'r'):
name, lbl = l.strip().split()
lbl = int(lbl)
# Switch the ignore label and 0 label (in VOC -1: not present, 0: ignore)
if lbl < 0:
lbl = 0
elif lbl == 0:
lbl = 255
images[os.path.join(voc_dir, 'JPEGImages', name + '.jpg')][k] = lbl
self.images = [(k, images[k]) for k in images.keys()]
np.random.shuffle(self.images)
self.transform = transform
def __len__(self):
return len(self.images)
def __getitem__(self, i):
img = Image.open(self.images[i][0])
img = img.convert('RGB')
if self.transform is not None:
img = self.transform(img)
return img, self.images[i][1]
|
DeeperCluster-main
|
src/data/VOC2007.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import time
import os
from six.moves import cPickle
import traceback
from collections import defaultdict
import captioning.utils.opts as opts
import captioning.models as models
from captioning.data.dataloader import DataLoader
import skimage.io
import captioning.utils.eval_utils_joint as eval_utils
import captioning.utils.misc as utils
from captioning.utils.rewards import init_scorer, get_self_critical_reward
from captioning.modules.loss_wrapper_joint import LossWrapper
def add_summary_value(writer, key, value, iteration):
if writer:
writer.add_scalar(key, value, iteration)
def train(opt):
################################
# Build dataloader
################################
loader = DataLoader(opt)
opt.vocab_size = loader.vocab_size
opt.seq_length = loader.seq_length
##########################
# Initialize infos
##########################
infos = {
'iter': 0,
'epoch': 0,
'loader_state_dict': None,
'vocab': loader.get_vocab(),
}
# Load old infos(if there is) and check if models are compatible
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'infos_'+opt.id+'.pkl'), 'rb') as f:
infos = utils.pickle_load(f)
saved_model_opt = infos['opt']
need_be_same=["caption_model", "rnn_type", "rnn_size", "num_layers"]
for checkme in need_be_same:
assert getattr(saved_model_opt, checkme) == getattr(opt, checkme), "Command line argument and saved model disagree on '%s' " % checkme
infos['opt'] = opt
#########################
# Build logger
#########################
# naive dict logger
histories = defaultdict(dict)
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl')):
with open(os.path.join(opt.start_from, 'histories_'+opt.id+'.pkl'), 'rb') as f:
histories.update(utils.pickle_load(f))
# tensorboard logger
tb_summary_writer = SummaryWriter(opt.checkpoint_path)
##########################
# Build model
##########################
opt.vocab = loader.get_vocab()
model = models.setup(opt).cuda()
del opt.vocab
# Load pretrained weights:
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from, 'model.pth')):
model.load_state_dict(torch.load(os.path.join(opt.start_from, 'model.pth')))
# Wrap generation model with loss function(used for training)
# This allows loss function computed separately on each machine
lw_model = LossWrapper(model, opt)
# Wrap with dataparallel
dp_model = torch.nn.DataParallel(model)
dp_model.vocab = getattr(model, 'vocab', None) # nasty
dp_lw_model = torch.nn.DataParallel(lw_model)
##########################
# Build optimizer
##########################
if opt.noamopt:
assert opt.caption_model in ['transformer', 'bert', 'm2transformer'], 'noamopt can only work with transformer'
optimizer = utils.get_std_opt(model, optim_func=opt.optim, factor=opt.noamopt_factor, warmup=opt.noamopt_warmup)
elif opt.reduce_on_plateau:
optimizer = utils.build_optimizer(model.parameters(), opt)
optimizer = utils.ReduceLROnPlateau(optimizer,
factor=opt.reduce_on_plateau_factor,
patience=opt.reduce_on_plateau_patience)
else:
optimizer = utils.build_optimizer(model.parameters(), opt)
# Load the optimizer
if opt.start_from is not None and os.path.isfile(os.path.join(opt.start_from,"optimizer.pth")):
optimizer.load_state_dict(torch.load(os.path.join(opt.start_from, 'optimizer.pth')))
#########################
# Get ready to start
#########################
iteration = infos['iter']
epoch = infos['epoch']
# For back compatibility
if 'iterators' in infos:
infos['loader_state_dict'] = {split: {'index_list': infos['split_ix'][split], 'iter_counter': infos['iterators'][split]} for split in ['train', 'val', 'test']}
loader.load_state_dict(infos['loader_state_dict'])
if opt.load_best_score == 1:
best_val_score = infos.get('best_val_score', None)
if opt.noamopt:
optimizer._step = iteration
# flag indicating finish of an epoch
# Always set to True at the beginning to initialize the lr or etc.
epoch_done = True
# Assure in training mode
dp_lw_model.train()
# Start training
try:
while True:
# Stop if reaching max epochs
if epoch >= opt.max_epochs and opt.max_epochs != -1:
break
if epoch_done:
if not opt.noamopt and not opt.reduce_on_plateau:
# Assign the learning rate
if epoch > opt.learning_rate_decay_start and opt.learning_rate_decay_start >= 0:
frac = (epoch - opt.learning_rate_decay_start) // opt.learning_rate_decay_every
decay_factor = opt.learning_rate_decay_rate ** frac
opt.current_lr = opt.learning_rate * decay_factor
else:
opt.current_lr = opt.learning_rate
utils.set_lr(optimizer, opt.current_lr) # set the decayed rate
# Assign the scheduled sampling prob
if epoch > opt.scheduled_sampling_start and opt.scheduled_sampling_start >= 0:
frac = (epoch - opt.scheduled_sampling_start) // opt.scheduled_sampling_increase_every
opt.ss_prob = min(opt.scheduled_sampling_increase_prob * frac, opt.scheduled_sampling_max_prob)
model.ss_prob = opt.ss_prob
# If start self critical training
if opt.self_critical_after != -1 and epoch >= opt.self_critical_after:
sc_flag = True
init_scorer(opt.cached_tokens)
else:
sc_flag = False
# If start structure loss training
if opt.structure_after != -1 and epoch >= opt.structure_after:
struc_flag = True
init_scorer(opt.cached_tokens)
else:
struc_flag = False
epoch_done = False
start = time.time()
if opt.use_warmup and (iteration < opt.noamopt_warmup):
opt.current_lr = opt.learning_rate * (iteration+1) / opt.noamopt_warmup
utils.set_lr(optimizer, opt.current_lr)
# Load data from train split (0)
data = loader.get_batch('train')
print('Read data:', time.time() - start)
torch.cuda.synchronize()
start = time.time()
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_ if _ is None else _.cuda() for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
optimizer.zero_grad()
model_out = dp_lw_model(fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, data['gts'], torch.arange(0, len(data['gts'])), sc_flag, struc_flag)
loss = model_out['loss'].mean()
loss.backward()
if opt.grad_clip_value != 0:
getattr(torch.nn.utils, 'clip_grad_%s_' %(opt.grad_clip_mode))(model.parameters(), opt.grad_clip_value)
if not torch.isnan(loss):
if opt.language_eval == 1:
print('Doing final model evaluation, not updating model.')
else:
optimizer.step()
else:
print('Meet nan loss', data['gts'], model_out)
train_loss = loss.item()
torch.cuda.synchronize()
end = time.time()
if struc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, lm_loss = {:.3f}, struc_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, model_out['lm_loss'].mean().item(), model_out['struc_loss'].mean().item(), end - start))
elif not sc_flag:
print("iter {} (epoch {}), train_loss = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, train_loss, end - start))
else:
print("iter {} (epoch {}), avg_reward = {:.3f}, time/batch = {:.3f}" \
.format(iteration, epoch, model_out['reward'].mean(), end - start))
# Update the iteration and epoch
iteration += 1
if data['bounds']['wrapped']:
epoch += 1
epoch_done = True
# Write the training loss summary
if (iteration % opt.losses_log_every == 0):
tb_summary_writer.add_scalar('train_loss', train_loss, iteration)
if opt.noamopt:
opt.current_lr = optimizer.rate()
elif opt.reduce_on_plateau:
opt.current_lr = optimizer.current_lr
tb_summary_writer.add_scalar('learning_rate', opt.current_lr, iteration)
tb_summary_writer.add_scalar('scheduled_sampling_prob', model.ss_prob, iteration)
if sc_flag:
tb_summary_writer.add_scalar('avg_reward', model_out['reward'].mean(), iteration)
elif struc_flag:
tb_summary_writer.add_scalar('lm_loss', model_out['lm_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('struc_loss', model_out['struc_loss'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward', model_out['reward'].mean().item(), iteration)
tb_summary_writer.add_scalar('reward_var', model_out['reward'].var(1).mean(), iteration)
histories['loss_history'][iteration] = train_loss if not sc_flag else model_out['reward'].mean()
histories['lr_history'][iteration] = opt.current_lr
histories['ss_prob_history'][iteration] = model.ss_prob
# update infos
infos['iter'] = iteration
infos['epoch'] = epoch
infos['loader_state_dict'] = loader.state_dict()
# make evaluation on validation set, and save model
if opt.language_eval == 1 or (iteration % opt.save_checkpoint_every == 0 and not opt.save_every_epoch) or \
(epoch_done and opt.save_every_epoch):
# eval model
eval_kwargs = {'split': 'val',
'dataset': opt.input_json}
eval_kwargs.update(vars(opt))
assert (opt.task in ['caption', 'c_joint_t'] and opt.eval_task == 'caption') or \
(opt.task in ['trace', 'c_joint_t'] and opt.eval_task == 'trace') or \
(opt.task == 'pred_both' and opt.eval_task == 'pred_both')
if opt.eval_task == 'caption':
val_loss, predictions, lang_stats = eval_utils.eval_split(dp_model, lw_model.crit_caption,
loader,
'caption', eval_kwargs)
elif opt.eval_task == 'trace':
val_loss = None
# This is a little time consuming due to the linear programming solve.
val_loss = eval_utils.eval_trace_generation(dp_model, lw_model.crit_trace, loader, window_size=0,
eval_kwargs=eval_kwargs) # Adjust the window_size as needed
lang_stats = None;
predictions = None;
elif opt.eval_task == 'pred_both':
val_loss, predictions, lang_stats = eval_utils.eval_split(dp_model, lw_model.crit_caption, loader,
'both', eval_kwargs) # caption generation
val_loss_trace = eval_utils.eval_trace_generation(dp_model, lw_model.crit_trace, loader, window_size=0,
eval_kwargs=eval_kwargs) # Adjust the window_size as needed
if opt.language_eval == 1:
break # The language eval is done during testing, after the training finishes.
if opt.reduce_on_plateau:
if 'CIDEr' in lang_stats:
optimizer.scheduler_step(-lang_stats['CIDEr'])
else:
optimizer.scheduler_step(val_loss)
# Write validation result into summary
tb_summary_writer.add_scalar('validation loss', val_loss, iteration)
if lang_stats is not None:
for k,v in lang_stats.items():
tb_summary_writer.add_scalar(k, v, iteration)
histories['val_result_history'][iteration] = {'loss': val_loss, 'lang_stats': lang_stats, 'predictions': predictions}
# Save model if is improving on validation result
if opt.language_eval == 1:
current_score = lang_stats['CIDEr']
else:
current_score = - val_loss
best_flag = False
if best_val_score is None or current_score > best_val_score:
best_val_score = current_score
best_flag = True
# Dump miscalleous informations
infos['best_val_score'] = best_val_score
# '''
utils.save_checkpoint(opt, model, infos, optimizer, histories)
if opt.save_history_ckpt:
utils.save_checkpoint(opt, model, infos, optimizer,
append=str(epoch) if opt.save_every_epoch else str(iteration))
if best_flag:
utils.save_checkpoint(opt, model, infos, optimizer, append='best')
# '''
except (RuntimeError, KeyboardInterrupt):
print('Save ckpt on exception ...')
# '''
utils.save_checkpoint(opt, model, infos, optimizer)
print('Save ckpt done.')
stack_trace = traceback.format_exc()
print(stack_trace)
# '''
opt = opts.parse_opt()
train(opt)
|
connect-caption-and-trace-main
|
tools/train.py
|
connect-caption-and-trace-main
|
captioning/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import torch
import torch.nn as nn
import numpy as np
import torch.optim as optim
import os
import torch.nn.functional as F
import six
from six.moves import cPickle
bad_endings = ['with','in','on','of','a','at','to','for','an','this','his','her','that']
bad_endings += ['the']
def pickle_load(f):
""" Load a pickle.
Parameters
----------
f: file-like object
"""
if six.PY3:
return cPickle.load(f, encoding='latin-1')
else:
return cPickle.load(f)
def pickle_dump(obj, f):
""" Dump a pickle.
Parameters
----------
obj: pickled object
f: file-like object
"""
if six.PY3:
return cPickle.dump(obj, f, protocol=2)
else:
return cPickle.dump(obj, f)
# modified from https://github.com/facebookresearch/detectron2/blob/master/detectron2/utils/comm.py
def serialize_to_tensor(data):
device = torch.device("cpu")
buffer = cPickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to(device=device)
return tensor
def deserialize(tensor):
buffer = tensor.cpu().numpy().tobytes()
return cPickle.loads(buffer)
# Input: seq, N*D numpy array, with element 0 .. vocab_size. 0 is END token.
def decode_sequence(ix_to_word, seq):
N, D = seq.size()
out = []
for i in range(N):
txt = ''
for j in range(D):
ix = seq[i,j]
if ix > 0 :
if j >= 1:
txt = txt + ' '
txt = txt + ix_to_word[str(ix.item())]
else:
break
if int(os.getenv('REMOVE_BAD_ENDINGS', '0')):
flag = 0
words = txt.split(' ')
for j in range(len(words)):
if words[-j-1] not in bad_endings:
flag = -j
break
txt = ' '.join(words[0:len(words)+flag])
out.append(txt.replace('@@ ', ''))
return out
def save_checkpoint(opt, model, infos, optimizer, histories=None, append=''):
if len(append) > 0:
append = '-' + append
# if checkpoint_path doesn't exist
print('!!!!!!!!!!!!!!!!!', opt.checkpoint_path)
if not os.path.isdir(opt.checkpoint_path):
os.makedirs(opt.checkpoint_path)
checkpoint_path = os.path.join(opt.checkpoint_path, 'model%s.pth' %(append))
torch.save(model.state_dict(), checkpoint_path)
print("model saved to {}".format(checkpoint_path))
optimizer_path = os.path.join(opt.checkpoint_path, 'optimizer%s.pth' %(append))
torch.save(optimizer.state_dict(), optimizer_path)
with open(os.path.join(opt.checkpoint_path, 'infos_'+opt.id+'%s.pkl' %(append)), 'wb') as f:
pickle_dump(infos, f)
if histories:
with open(os.path.join(opt.checkpoint_path, 'histories_'+opt.id+'%s.pkl' %(append)), 'wb') as f:
pickle_dump(histories, f)
def set_lr(optimizer, lr):
for group in optimizer.param_groups:
group['lr'] = lr
def get_lr(optimizer):
for group in optimizer.param_groups:
return group['lr']
def build_optimizer(params, opt):
if opt.optim == 'rmsprop':
return optim.RMSprop(params, opt.learning_rate, opt.optim_alpha, opt.optim_epsilon, weight_decay=opt.weight_decay)
elif opt.optim == 'adagrad':
return optim.Adagrad(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif opt.optim == 'sgd':
return optim.SGD(params, opt.learning_rate, weight_decay=opt.weight_decay)
elif opt.optim == 'sgdm':
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay)
elif opt.optim == 'sgdmom':
return optim.SGD(params, opt.learning_rate, opt.optim_alpha, weight_decay=opt.weight_decay, nesterov=True)
elif opt.optim == 'adam':
return optim.Adam(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
elif opt.optim == 'adamw':
return optim.AdamW(params, opt.learning_rate, (opt.optim_alpha, opt.optim_beta), opt.optim_epsilon, weight_decay=opt.weight_decay)
else:
raise Exception("bad option opt.optim: {}".format(opt.optim))
def penalty_builder(penalty_config):
if penalty_config == '':
return lambda x,y: y
pen_type, alpha = penalty_config.split('_')
alpha = float(alpha)
if pen_type == 'wu':
return lambda x,y: length_wu(x,y,alpha)
if pen_type == 'avg':
return lambda x,y: length_average(x,y,alpha)
def length_wu(length, logprobs, alpha=0.):
"""
NMT length re-ranking score from
"Google's Neural Machine Translation System" :cite:`wu2016google`.
"""
modifier = (((5 + length) ** alpha) /
((5 + 1) ** alpha))
return (logprobs / modifier)
def length_average(length, logprobs, alpha=0.):
"""
Returns the average probability of tokens in a sequence.
"""
return logprobs / length
class NoamOpt(object):
"Optim wrapper that implements rate."
def __init__(self, model_size, factor, warmup, optimizer):
self.optimizer = optimizer
self._step = 0
self.warmup = warmup
self.factor = factor
self.model_size = model_size
self._rate = 0
def step(self):
"Update parameters and rate"
self._step += 1
rate = self.rate()
for p in self.optimizer.param_groups:
p['lr'] = rate
self._rate = rate
self.optimizer.step()
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def __getattr__(self, name):
return getattr(self.optimizer, name)
def state_dict(self):
state_dict = self.optimizer.state_dict()
state_dict['_step'] = self._step
return state_dict
def load_state_dict(self, state_dict):
if '_step' in state_dict:
self._step = state_dict['_step']
del state_dict['_step']
self.optimizer.load_state_dict(state_dict)
class ReduceLROnPlateau(object):
"Optim wrapper that implements rate."
def __init__(self, optimizer, mode='min', factor=0.1, patience=10, verbose=False, threshold=0.0001, threshold_mode='rel', cooldown=0, min_lr=0, eps=1e-08):
self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, mode, factor, patience, verbose, threshold, threshold_mode, cooldown, min_lr, eps)
self.optimizer = optimizer
self.current_lr = get_lr(optimizer)
def step(self):
"Update parameters and rate"
self.optimizer.step()
def scheduler_step(self, val):
self.scheduler.step(val)
self.current_lr = get_lr(self.optimizer)
def state_dict(self):
return {'current_lr':self.current_lr,
'scheduler_state_dict': self.scheduler.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict()}
def load_state_dict(self, state_dict):
if 'current_lr' not in state_dict:
# it's normal optimizer
self.optimizer.load_state_dict(state_dict)
set_lr(self.optimizer, self.current_lr) # use the lr fromt the option
else:
# it's a schduler
self.current_lr = state_dict['current_lr']
self.scheduler.load_state_dict(state_dict['scheduler_state_dict'])
self.optimizer.load_state_dict(state_dict['optimizer_state_dict'])
# current_lr is actually useless in this case
def rate(self, step = None):
"Implement `lrate` above"
if step is None:
step = self._step
return self.factor * \
(self.model_size ** (-0.5) *
min(step ** (-0.5), step * self.warmup ** (-1.5)))
def __getattr__(self, name):
return getattr(self.optimizer, name)
def get_std_opt(model, optim_func='adam', factor=1, warmup=2000):
# return NoamOpt(model.tgt_embed[0].d_model, 2, 4000,
# torch.optim.Adam(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
optim_func = dict(adam=torch.optim.Adam,
adamw=torch.optim.AdamW)[optim_func]
return NoamOpt(model.d_model, factor, warmup,
optim_func(model.parameters(), lr=0, betas=(0.9, 0.98), eps=1e-9))
|
connect-caption-and-trace-main
|
captioning/utils/misc.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['labels'], data['masks'], data['att_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, labels, masks, att_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, att_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, att_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, att_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_orig.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from eval_utils import getCOCO
from .div_utils import compute_div_n, compute_global_div_n
import sys
try:
sys.path.append("coco-caption")
annFile = 'coco-caption/annotations/captions_val2014.json'
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
from pycocoevalcap.eval_spice import COCOEvalCapSpice
from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer
from pycocoevalcap.bleu.bleu import Bleu
sys.path.append("cider")
from pyciderevalcap.cider.cider import Cider
except:
print('Warning: requirements for eval_multi not satisfied')
def eval_allspice(dataset, preds_n, model_id, split):
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
# filter results to only those in MSCOCO validation set (will be about a third)
preds_filt_n = [p for p in preds_n if p['image_id'] in valids]
print('using %d/%d predictions_n' % (len(preds_filt_n), len(preds_n)))
cache_path_n = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
json.dump(preds_filt_n, open(cache_path_n, 'w')) # serialize to temporary json file. Sigh, COCO API...
# Eval AllSPICE
cocoRes_n = coco.loadRes(cache_path_n)
cocoEvalAllSPICE = COCOEvalCapSpice(coco, cocoRes_n)
cocoEvalAllSPICE.params['image_id'] = cocoRes_n.getImgIds()
cocoEvalAllSPICE.evaluate()
out = {}
for metric, score in cocoEvalAllSPICE.eval.items():
out['All'+metric] = score
imgToEvalAllSPICE = cocoEvalAllSPICE.imgToEval
# collect SPICE_sub_score
for k in list(imgToEvalAllSPICE.values())[0]['SPICE'].keys():
if k != 'All':
out['AllSPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEvalAllSPICE.values()])
out['AllSPICE_'+k] = (out['AllSPICE_'+k][out['AllSPICE_'+k]==out['AllSPICE_'+k]]).mean()
for p in preds_filt_n:
image_id, caption = p['image_id'], p['caption']
imgToEvalAllSPICE[image_id]['caption'] = capsById[image_id]
return {'overall': out, 'imgToEvalAllSPICE': imgToEvalAllSPICE}
def eval_oracle(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
sample_n = capsById[list(capsById.keys())[0]]
for i in range(len(capsById[list(capsById.keys())[0]])):
preds = [_[i] for _ in capsById.values()]
json.dump(preds, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
imgToEval = cocoEval.imgToEval
for img_id in capsById.keys():
tmp = imgToEval[img_id]
for k in tmp['SPICE'].keys():
if k != 'All':
tmp['SPICE_'+k] = tmp['SPICE'][k]['f']
if tmp['SPICE_'+k] != tmp['SPICE_'+k]: # nan
tmp['SPICE_'+k] = -100
tmp['SPICE'] = tmp['SPICE']['All']['f']
if tmp['SPICE'] != tmp['SPICE']: tmp['SPICE'] = -100
capsById[img_id][i]['scores'] = imgToEval[img_id]
out = {'overall': {}, 'ImgToEval': {}}
for img_id in capsById.keys():
out['ImgToEval'][img_id] = {}
for metric in capsById[img_id][0]['scores'].keys():
if metric == 'image_id': continue
out['ImgToEval'][img_id]['oracle_'+metric] = max([_['scores'][metric] for _ in capsById[img_id]])
out['ImgToEval'][img_id]['avg_'+metric] = sum([_['scores'][metric] for _ in capsById[img_id]]) / len(capsById[img_id])
out['ImgToEval'][img_id]['captions'] = capsById[img_id]
for metric in list(out['ImgToEval'].values())[0].keys():
if metric == 'captions':
continue
tmp = np.array([_[metric] for _ in out['ImgToEval'].values()])
tmp = tmp[tmp!=-100]
out['overall'][metric] = tmp.mean()
return out
def eval_div_stats(dataset, preds_n, model_id, split):
tokenizer = PTBTokenizer()
capsById = {}
for i, d in enumerate(preds_n):
d['id'] = i
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
n_caps_perimg = len(capsById[list(capsById.keys())[0]])
print(n_caps_perimg)
_capsById = capsById # save the untokenized version
capsById = tokenizer.tokenize(capsById)
div_1, adiv_1 = compute_div_n(capsById,1)
div_2, adiv_2 = compute_div_n(capsById,2)
globdiv_1, _= compute_global_div_n(capsById,1)
print('Diversity Statistics are as follows: \n Div1: %.2f, Div2: %.2f, gDiv1: %d\n'%(div_1,div_2, globdiv_1))
# compute mbleu
scorer = Bleu(4)
all_scrs = []
scrperimg = np.zeros((n_caps_perimg, len(capsById)))
for i in range(n_caps_perimg):
tempRefsById = {}
candsById = {}
for k in capsById:
tempRefsById[k] = capsById[k][:i] + capsById[k][i+1:]
candsById[k] = [capsById[k][i]]
score, scores = scorer.compute_score(tempRefsById, candsById)
all_scrs.append(score)
scrperimg[i,:] = scores[1]
all_scrs = np.array(all_scrs)
out = {}
out['overall'] = {'Div1': div_1, 'Div2': div_2, 'gDiv1': globdiv_1}
for k, score in zip(range(4), all_scrs.mean(axis=0).tolist()):
out['overall'].update({'mBLeu_%d'%(k+1): score})
imgToEval = {}
for i,imgid in enumerate(capsById.keys()):
imgToEval[imgid] = {'mBleu_2' : scrperimg[:,i].mean()}
imgToEval[imgid]['individuals'] = []
for j, d in enumerate(_capsById[imgid]):
imgToEval[imgid]['individuals'].append(preds_n[d['id']])
imgToEval[imgid]['individuals'][-1]['mBleu_2'] = scrperimg[j,i]
out['ImgToEval'] = imgToEval
print('Mean mutual Bleu scores on this set is:\nmBLeu_1, mBLeu_2, mBLeu_3, mBLeu_4')
print(all_scrs.mean(axis=0))
return out
def eval_self_cider(dataset, preds_n, model_id, split):
cache_path = os.path.join('eval_results/', model_id + '_' + split + '_n.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# Get Cider_scorer
Cider_scorer = Cider(df='corpus')
tokenizer = PTBTokenizer()
gts = {}
for imgId in valids:
gts[imgId] = coco.imgToAnns[imgId]
gts = tokenizer.tokenize(gts)
for imgId in valids:
Cider_scorer.cider_scorer += (None, gts[imgId])
Cider_scorer.cider_scorer.compute_doc_freq()
Cider_scorer.cider_scorer.ref_len = np.log(float(len(Cider_scorer.cider_scorer.crefs)))
# Prepare captions
capsById = {}
for d in preds_n:
capsById[d['image_id']] = capsById.get(d['image_id'], []) + [d]
capsById = tokenizer.tokenize(capsById)
imgIds = list(capsById.keys())
scores = Cider_scorer.my_self_cider([capsById[_] for _ in imgIds])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
sc_scores = [get_div(np.linalg.eigvalsh(_/10)) for _ in scores]
score = np.mean(np.array(sc_scores))
imgToEval = {}
for i, image_id in enumerate(imgIds):
imgToEval[image_id] = {'self_cider': sc_scores[i], 'self_cider_mat': scores[i].tolist()}
return {'overall': {'self_cider': score}, 'imgToEval': imgToEval}
return score
|
connect-caption-and-trace-main
|
captioning/utils/eval_multi.py
|
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
# Copy from fvcore
import logging
import os
from typing import Any
import yaml
from yacs.config import CfgNode as _CfgNode
import io as PathManager
BASE_KEY = "_BASE_"
class CfgNode(_CfgNode):
"""
Our own extended version of :class:`yacs.config.CfgNode`.
It contains the following extra features:
1. The :meth:`merge_from_file` method supports the "_BASE_" key,
which allows the new CfgNode to inherit all the attributes from the
base configuration file.
2. Keys that start with "COMPUTED_" are treated as insertion-only
"computed" attributes. They can be inserted regardless of whether
the CfgNode is frozen or not.
3. With "allow_unsafe=True", it supports pyyaml tags that evaluate
expressions in config. See examples in
https://pyyaml.org/wiki/PyYAMLDocumentation#yaml-tags-and-python-types
Note that this may lead to arbitrary code execution: you must not
load a config file from untrusted sources before manually inspecting
the content of the file.
"""
@staticmethod
def load_yaml_with_base(filename, allow_unsafe = False):
"""
Just like `yaml.load(open(filename))`, but inherit attributes from its
`_BASE_`.
Args:
filename (str): the file name of the current config. Will be used to
find the base config file.
allow_unsafe (bool): whether to allow loading the config file with
`yaml.unsafe_load`.
Returns:
(dict): the loaded yaml
"""
with PathManager.open(filename, "r") as f:
try:
cfg = yaml.safe_load(f)
except yaml.constructor.ConstructorError:
if not allow_unsafe:
raise
logger = logging.getLogger(__name__)
logger.warning(
"Loading config {} with yaml.unsafe_load. Your machine may "
"be at risk if the file contains malicious content.".format(
filename
)
)
f.close()
with open(filename, "r") as f:
cfg = yaml.unsafe_load(f)
def merge_a_into_b(a, b):
# merge dict a into dict b. values in a will overwrite b.
for k, v in a.items():
if isinstance(v, dict) and k in b:
assert isinstance(
b[k], dict
), "Cannot inherit key '{}' from base!".format(k)
merge_a_into_b(v, b[k])
else:
b[k] = v
if BASE_KEY in cfg:
base_cfg_file = cfg[BASE_KEY]
if base_cfg_file.startswith("~"):
base_cfg_file = os.path.expanduser(base_cfg_file)
if not any(
map(base_cfg_file.startswith, ["/", "https://", "http://"])
):
# the path to base cfg is relative to the config file itself.
base_cfg_file = os.path.join(
os.path.dirname(filename), base_cfg_file
)
base_cfg = CfgNode.load_yaml_with_base(
base_cfg_file, allow_unsafe=allow_unsafe
)
del cfg[BASE_KEY]
merge_a_into_b(cfg, base_cfg)
return base_cfg
return cfg
def merge_from_file(self, cfg_filename, allow_unsafe = False):
"""
Merge configs from a given yaml file.
Args:
cfg_filename: the file name of the yaml config.
allow_unsafe: whether to allow loading the config file with
`yaml.unsafe_load`.
"""
loaded_cfg = CfgNode.load_yaml_with_base(
cfg_filename, allow_unsafe=allow_unsafe
)
loaded_cfg = type(self)(loaded_cfg)
self.merge_from_other_cfg(loaded_cfg)
# Forward the following calls to base, but with a check on the BASE_KEY.
def merge_from_other_cfg(self, cfg_other):
"""
Args:
cfg_other (CfgNode): configs to merge from.
"""
assert (
BASE_KEY not in cfg_other
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_other_cfg(cfg_other)
def merge_from_list(self, cfg_list):
"""
Args:
cfg_list (list): list of configs to merge from.
"""
keys = set(cfg_list[0::2])
assert (
BASE_KEY not in keys
), "The reserved key '{}' can only be used in files!".format(BASE_KEY)
return super().merge_from_list(cfg_list)
def __setattr__(self, name, val):
if name.startswith("COMPUTED_"):
if name in self:
old_val = self[name]
if old_val == val:
return
raise KeyError(
"Computed attributed '{}' already exists "
"with a different value! old={}, new={}.".format(
name, old_val, val
)
)
self[name] = val
else:
super().__setattr__(name, val)
if __name__ == '__main__':
cfg = CfgNode.load_yaml_with_base('configs/updown_long.yml')
print(cfg)
|
connect-caption-and-trace-main
|
captioning/utils/config.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
from ..models import utils as utils_models
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
# annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both', 'show']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
grounding_quality_loss = [] # for donstream task 2
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks'], data['show_labels'], data['show_trace_feats'], data['show_trace_masks'], data['show_masks'], data['show_gate_labels']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, \
trace_masks, show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = 0
# loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'show':
loss = crit(
model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1], att_masks,
show_trace_masks, show_gate_labels, task=task), show_labels[..., 1:], show_masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
test_grounding_quality = True
test_baseline = False
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
### repeat att feats
if test_grounding_quality:
fc_feats, att_feats, att_masks, box_feats = utils_models.repeat_tensors(5,
[fc_feats, att_feats, att_masks, box_feats]
)
#############################
if task == 'both':
seq, seq_logprobs, _ = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats, att_masks, show_trace_masks[:att_feats.shape[0]],
show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs, mode='sample')
# use gt-truth to get prediction
_, trace_output = model(fc_feats, att_feats, show_trace_feats[:,:17],
box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:,:17],
task='both')
# ### debug try using trace to give trace output
# print(show_trace_feats.shape, show_labels.shape, show_masks.shape)
# trace_output = model(fc_feats, att_feats, show_trace_feats[:, :17], box_feats,
# show_labels[..., :-1].squeeze(1),
# att_masks, show_masks.squeeze(1), task='trace')
else:
if test_baseline is True and task == 'caption':
seq, seq_logprobs, word_box_attn = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats,
att_masks, show_trace_masks[:att_feats.shape[0]],
show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs,
mode='sample')
else:
seq, seq_logprobs = model(fc_feats, att_feats, show_trace_feats[:att_feats.shape[0]], box_feats,
att_masks, show_trace_masks[:att_feats.shape[0]], show_gate_labels[:att_feats.shape[0]], task, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
### log which caption has no bounding box
ids_no_box = (show_trace_feats[:, 0, 4] == 1).float()
# only focus on the gt-words
if test_grounding_quality:
batch_grounding_loss = []
if test_baseline:
word_box_attn = torch.argmax(word_box_attn, dim=-1)
# match the generated word with the show-caption
show_labels = show_labels[:, :, 1:-1]
for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
for k in range(show_labels.shape[2]):
if show_trace_feats[i, k, 4] != 1 and show_labels[i, 0, k] != 0:
# if show_trace_feats[i, k, 4] != 1 and show_labels[i, 0, k] != 0 \
# and seq[i, k] == show_labels[
# i, 0, k]: # the word match with the key word and show_labels[i,0,k] != 1
gt_box = show_trace_feats[i, k] # get the grounding box
if test_baseline:
pred_box_idx = word_box_attn[i, k].long()
pred_box = box_feats[i, pred_box_idx] # get the predicted box
else:
pred_box = trace_output[i, k]
# print(gt_box, pred_box, seq[i,j])
tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
batch_grounding_loss.append(tmp_loss.item())
### compute the grounding quality
# if test_grounding_quality:
# batch_grounding_loss = []
# if test_baseline:
# word_box_attn = torch.argmax(word_box_attn, dim=-1)
# # match the generated word with the show-caption
# for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
# for k in range(show_labels.shape[2]):
# if show_trace_feats[i,k,4]!=1 and show_labels[i,0,k] != 0 \
# and seq[i,j] == show_labels[i,0,k]: # the word match with the key word and show_labels[i,0,k] != 1
# gt_box = show_trace_feats[i, k] # get the grounding box
# if test_baseline:
# pred_box_idx = word_box_attn[i,j].long()
# pred_box = box_feats[i, pred_box_idx] # get the predicted box
# else:
# pred_box = trace_output[i, j]
# # print(gt_box, pred_box, seq[i,j])
# tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
# batch_grounding_loss.append(tmp_loss.item())
# else:
# assert task == 'both'
# for i in range(seq.shape[0]):
# for j in range(seq.shape[1]):
# for k in range(show_labels.shape[2]):
# if seq[i, j] != 0 and seq[i, j] == show_labels[i, 0, k]: # the word match with the key word
# gt_box = show_trace_feats[i, k] # get the grounding box
# pred_box = trace_output[i,j]
# tmp_loss = torch.mean(torch.abs(gt_box[:4] - pred_box[:4]))
# batch_grounding_loss.append(tmp_loss.item())
grounding_quality_loss.append(np.mean(np.array(batch_grounding_loss)))
print('Visual grounding quality running ave: ', np.mean(np.array(grounding_quality_loss)))
seq = seq.reshape([-1, 5, 20])[:,0,:]
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
# entry to evaluate show-control-tell: seperate the 5 predictions per image
# if ids_no_box[k]==1:
# continue
# entry = {'image_id': data['infos'][k//5]['id'] + 1000000 * (k%5), 'caption': sent, 'perplexity': perplexity[k].item(),
# 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
print('Total visual grounding quality loss:', np.mean(np.array(grounding_quality_loss)))
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks, 'trace')
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_for_coco_caption.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
from ..models import utils as utils_models
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval_show_control_tell import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
# annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both', 'show']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks'], data['show_labels'], data['show_trace_feats'], data['show_trace_masks'], data['show_masks'], data['show_gate_labels']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, \
trace_masks, show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'show':
loss = crit(
model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1], att_masks,
show_trace_masks, show_gate_labels, task=task), show_labels[..., 1:], show_masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
### repeat att feats
fc_feats, att_feats, att_masks, box_feats = utils_models.repeat_tensors(5,
[fc_feats, att_feats, att_masks, box_feats]
)
#############################
seq, seq_logprobs = model(fc_feats, att_feats, show_trace_feats, box_feats, att_masks, show_trace_masks, show_gate_labels, task, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
### log which caption has no bounding box
ids_no_box = (show_trace_feats[:, 0, 4] == 1).float()
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
# entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
# entry to evaluate show-control-tell: seperate the 5 predictions per image
if ids_no_box[k]==1:
continue
entry = {'image_id': data['infos'][k//5]['id'] + 1000000 * (k%5), 'caption': sent, 'perplexity': perplexity[k].item(),
'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks, 'trace')
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_show_control_tell.py
|
connect-caption-and-trace-main
|
captioning/utils/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from captioning.utils import misc as utils
# from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
sys.path.append("/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap #COCOEvalCap_spice
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
# annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
print(annFile)
return COCO(annFile)
cache_path = '/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch/eval_results/zihang_transformer_LN_try804_openimg_twolayer_joint_cycle_b_val.json'
score_list = []
l = len(json.load(open(cache_path)))
size_per_split = 1000000
num_splits = (l//size_per_split) + (1 if (l%size_per_split)!=0 else 0)
for i in range(num_splits):
coco = getCOCO('openimg')
valids = coco.getImgIds()
cocoRes = coco.loadRes(cache_path)#, split=i, size_per_split = size_per_split)
cocoEval = COCOEvalCap(coco, cocoRes) #_spice
cocoEval.params['image_id'] = cocoRes.getImgIds()
try:
cocoEval.evaluate()
except:
print('this split fail: #', i)
continue
out = {}
for metric, score in cocoEval.eval.items():
out[metric] = score
score_list.append(score)
print(i, '-th current_split:', score, 'Overall ave:', sum(score_list) / len(score_list))
print(score_list)
print(sum(score_list) / len(score_list))
# # Add mean perplexity
# out['perplexity'] = mean_perplexity
# out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_' + k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_' + k] = (out['SPICE_' + k][out['SPICE_' + k] == out['SPICE_' + k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
|
connect-caption-and-trace-main
|
captioning/utils/for_debug_eval_spice.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
def forward(self, img, att_size=14):
x = img.unsqueeze(0)
x = self.resnet.conv1(x)
x = self.resnet.bn1(x)
x = self.resnet.relu(x)
x = self.resnet.maxpool(x)
x = self.resnet.layer1(x)
x = self.resnet.layer2(x)
x = self.resnet.layer3(x)
x = self.resnet.layer4(x)
fc = x.mean(3).mean(2).squeeze()
att = F.adaptive_avg_pool2d(x,[att_size,att_size]).squeeze().permute(1, 2, 0)
return fc, att
|
connect-caption-and-trace-main
|
captioning/utils/resnet_utils.py
|
import torch
import torch.nn as nn
import torchvision.models.resnet
from torchvision.models.resnet import BasicBlock, Bottleneck
class ResNet(torchvision.models.resnet.ResNet):
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__(block, layers, num_classes)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change
for i in range(2, 5):
getattr(self, 'layer%d'%i)[0].conv1.stride = (2,2)
getattr(self, 'layer%d'%i)[0].conv2.stride = (1,1)
def resnet18(pretrained=False):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return model
def resnet34(pretrained=False):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return model
def resnet50(pretrained=False):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return model
def resnet101(pretrained=False):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return model
def resnet152(pretrained=False):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3])
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return model
|
connect-caption-and-trace-main
|
captioning/utils/resnet.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
annFile = 'coco-caption/annotations/captions_coco_LN_test.json'
elif 'flickr30k' in dataset or 'f30k' in dataset or 'flk30k' in dataset:
annFile = 'coco-caption/annotations/captions_flk30k_LN_test.json'
elif 'ade20k' in dataset:
annFile = 'coco-caption/annotations/captions_ade20k_LN_test.json'
elif 'openimg' in dataset:
annFile = 'coco-caption/annotations/captions_openimg_LN_test.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, task='caption', eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# assert task
assert task in ['caption', 'trace', 'both']
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
trace_cost = []
while True:
data = loader.get_batch(split)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
if task == 'caption':
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, task=task), labels[..., 1:], masks[..., 1:]).item()
elif task == 'both':
loss = crit(
model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks,
task=task)[0], labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
if task == 'both':
seq, seq_logprobs, trace_predicted = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks,
task=task, opt=tmp_eval_kwargs, mode='sample')
else:
try:
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task=task, opt=tmp_eval_kwargs, mode='sample')
except:
print('evaluation meet error')
continue
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
if task == 'both':
### compute the loss for trace
for k in range(trace_predicted.shape[0]):
tmp_gt_length = trace_masks[k].sum().long()
tmp_gt_trace = trace_feats[k, :tmp_gt_length]
tmp_pred_length = (seq[k]>0).sum().long()
tmp_pred_trace = trace_predicted[k, :tmp_pred_length]
# choose only boxes not [0,0,1,1,1] in the ground truth
nonzero_idx = torch.nonzero(tmp_gt_trace[:, 4] != 1).squeeze()
tmp_gt_trace = tmp_gt_trace[nonzero_idx]
if len(tmp_gt_trace.shape) < 2: # if there is only one chosen box in this trace
tmp_gt_trace = tmp_gt_trace.unsqueeze(0)
tmp_gt_trace = tmp_gt_trace.unsqueeze(0)
tmp_pred_trace = tmp_pred_trace.unsqueeze(0)
if tmp_pred_trace.shape[1] <= tmp_gt_trace.shape[1]:
tmp_trace1 = tmp_pred_trace
tmp_trace2 = tmp_gt_trace
else:
tmp_trace1 = tmp_gt_trace
tmp_trace2 = tmp_pred_trace
# processing in terms of segments of length 20
seg_loss_list = []
for seg_idx in range(np.ceil(tmp_trace1.shape[1] / 20).astype(int)):
tmp_const = 20. * tmp_trace2.shape[1] / tmp_trace1.shape[1]
seg_tmp_trace1 = tmp_trace1[:, seg_idx * 20:(seg_idx + 1) * 20, :4]
seg_tmp_trace2 = tmp_trace2[:, np.floor(seg_idx * tmp_const).astype(int): np.ceil(
(seg_idx + 1) * tmp_const).astype(int), :4]
D = torch.abs(seg_tmp_trace1.unsqueeze(2) - seg_tmp_trace2.unsqueeze(1)).mean(dim=-1)
seg_tmp_T = local_OT(D, window = 0)
seg_tmp_cost = (seg_tmp_T * D).sum() / seg_tmp_trace1.shape[1]
if not torch.isnan(seg_tmp_cost):
seg_loss_list.append(seg_tmp_cost.item())
tmp_cost = np.mean(np.array(seg_loss_list))
if not np.isnan(tmp_cost):
trace_cost.append(tmp_cost)
print('trace LBM distance:', tmp_cost)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
print('both trace running ave LBM loss :', np.mean(np.array(trace_cost)))
# ### save for visualization # for visualization of trace_generation
# for i in range(len(sents)):
# vis_img_id = data['infos'][i]['id']
# with open('./vis/both_generation_supplement/pred_caption/pred_caption_' + str(vis_img_id)+'.txt', 'w') as f:
# f.write(sents[i])
# np.save('./vis/both_generation_supplement/pred_trace/pred_trace_' + str(vis_img_id),
# trace_predicted[i, :, :4].detach().cpu().numpy())
# print(vis_img_id, trace_feats.shape)
# with open('./vis/both_generation_supplement/info.txt', 'a') as f:
# f.write('img_id:%d\n' %vis_img_id)
# f.close()
# ############################
# ### save for visualization # for visualization of caption_generation
# for i in range(len(sents)):
# vis_img_id = data['infos'][i]['id']
# tmp_dir = './vis/caption_generation_' + eval_kwargs['dataset_choice']
# if not os.path.exists(tmp_dir):
# os.makedirs(tmp_dir)
# os.makedirs(tmp_dir + '/pred_caption')
# os.makedirs(tmp_dir + '/gt_trace')
# with open('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/pred_caption/pred_caption_' + str(vis_img_id) + '.txt',
# 'w') as f:
# f.write(sents[i])
# np.save('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/gt_trace/gt_trace_' + str(vis_img_id),
# trace_feats[i, :, :4].detach().cpu().numpy())
# print(vis_img_id, trace_feats.shape)
# with open('./vis/caption_generation_'+ eval_kwargs['dataset_choice'] +'/info.txt', 'a') as f:
# f.write('img_id:%s\n' % str(vis_img_id))
# f.close()
# ############################
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
if task == 'both':
print('both trace total LBM loss:', np.mean(np.array(trace_cost)))
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, window_size=0, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
loss_list = []
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
with torch.no_grad():
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# # ### save for visualization # for visualization of trace_generation
# sents = utils.decode_sequence(model.vocab, labels[:, 0, 1:])
# print(sents)
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
# vis_img_id = data['infos'][0]['id']
# tmp_dir = './vis/trace_generation_' + eval_kwargs['dataset_choice']
# if not os.path.exists(tmp_dir):
# os.makedirs(tmp_dir)
# os.makedirs(tmp_dir + '/pred_trace')
# os.makedirs(tmp_dir + '/gt_trace')
# os.makedirs(tmp_dir + '/gt_caption')
# with open(tmp_dir + '/gt_caption/' + str(vis_img_id)+'.txt', 'w') as f:
# f.write(sents[0])
# # np.save('./vis/trace_generation_11_14/pred_caption_' + str(vis_img_id),
# # labels[..., 1:].detach().cpu().numpy())
# np.save(tmp_dir + '/pred_trace/' +str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save(tmp_dir + '/gt_trace/' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open(tmp_dir + '/info.txt', 'a') as f:
# f.write('img_id:%s, l1-loss: %f\n'%(str(vis_img_id),((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()))
# f.close()
# # ############################
use_local_OT = True #
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
batch_loss_list = []
for idx_trace in range(trace_feats.shape[0]):
tmp_gt_length = trace_masks[idx_trace].sum().long()
single_tmp_trace_feats = tmp_trace_feats[idx_trace, :tmp_gt_length]
single_trace_feats = trace_feats[idx_trace, :tmp_gt_length]
# choose only boxes not [0,0,1,1,1] in the ground truth
nonzero_idx = torch.nonzero(single_trace_feats[:,4]!=1).squeeze()
single_trace_feats = single_trace_feats[nonzero_idx]
if len(single_trace_feats.shape) < 2: # if there is only one chosen box in this trace
single_trace_feats = single_trace_feats.unsqueeze(0)
single_tmp_trace_feats = single_tmp_trace_feats.unsqueeze(0)
single_trace_feats = single_trace_feats.unsqueeze(0)
if single_tmp_trace_feats.shape[1] <= single_trace_feats.shape[1]:
tmp_trace1 = single_tmp_trace_feats
tmp_trace2 = single_trace_feats
else:
tmp_trace1 = single_trace_feats
tmp_trace2 = single_tmp_trace_feats
# processing in terms of segments of length 20
seg_loss_list = []
for seg_idx in range(np.ceil(tmp_trace1.shape[1]/20).astype(int)):
tmp_const = 20. * tmp_trace2.shape[1] / tmp_trace1.shape[1]
seg_tmp_trace1 = tmp_trace1[:, seg_idx*20:(seg_idx+1)*20, :4]
seg_tmp_trace2 = tmp_trace2[:, np.floor(seg_idx*tmp_const).astype(int) : np.ceil((seg_idx+1)*tmp_const).astype(int) , :4]
D = torch.abs(seg_tmp_trace1.unsqueeze(2) - seg_tmp_trace2.unsqueeze(1)).mean(dim=-1)
seg_tmp_T = local_OT(D, window = window_size)
seg_tmp_cost = (seg_tmp_T * D ).sum() / seg_tmp_trace1.shape[1]
if not torch.isnan(seg_tmp_cost):
seg_loss_list.append(seg_tmp_cost.item())
if len(seg_loss_list) != 0:
batch_loss_list.append(np.mean(np.array(seg_loss_list)))
loss = np.mean(np.array(batch_loss_list))
# loss_orig = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss_orig = loss
if not np.isnan(loss):
loss_list.append(loss)
# loss_orig_list.append(loss_orig)
print('Running ave l1 loss:', np.mean(np.array(loss_list))) #, np.mean(np.array(loss_orig_list)))
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
print('Validation evaluation:', 'l1-loss:', val_loss)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_joint.py
|
from __future__ import print_function
import argparse
def if_use_feat(caption_model):
# Decide if load attention feature according to caption model
if caption_model in ['show_tell', 'all_img', 'fc', 'newfc']:
use_att, use_fc = False, True
elif caption_model == 'language_model':
use_att, use_fc = False, False
elif caption_model in ['updown', 'topdown']:
use_fc, use_att = True, True
else:
use_att, use_fc = True, False
return use_fc, use_att
def parse_opt():
parser = argparse.ArgumentParser()
# training task: caption / trace / c_joint_t (joint training of controlled caption/trace generation)
# / pred_both (the task of predicting both caption and trace at the same time)
parser.add_argument('--task', type=str, default='c_joint_t',
help='The task to train on. Choose from caption/trace/c_joint_t/pred_both')
parser.add_argument('--eval_task', type=str, default='caption',
help='The task to evaluate on. Choose from caption/trace/pred_both')
# Data input settings
parser.add_argument('--input_json', type=str, default='data/coco.json',
help='path to the json file containing additional info and vocab')
parser.add_argument('--input_fc_dir', type=str, default='data/cocotalk_fc',
help='path to the directory containing the preprocessed fc feats')
parser.add_argument('--input_att_dir', type=str, default='data/cocotalk_att',
help='path to the directory containing the preprocessed att feats')
parser.add_argument('--input_box_dir', type=str, default='data/cocotalk_box',
help='path to the directory containing the boxes of att feats')
parser.add_argument('--input_trace_dir', type=str, default='data/cocoLN_trace_box_by_04second',
help='path to the directory containing the trace feats')
parser.add_argument('--input_trace_class_label_dir', type=str, default='data/trace_by_word_classification_label',
help='path to the directory containing the trace feats')
parser.add_argument('--input_trace_feat_dir', type=str, default='/mnt/m2/Datasets/COCO/extracted_LN_trace_features_coco_by_word_correct_length',
help='path to trace feats (T*1024/2048)')
parser.add_argument('--input_label_h5', type=str, default='data/coco_label.h5',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--data_in_memory', action='store_true',
help='True if we want to save the features in memory')
parser.add_argument('--start_from', type=str, default=None,
help="""continue training from saved model at this path. Path must contain files saved by previous training process:
'infos.pkl' : configuration;
'model.pth' : weights
""")
parser.add_argument('--cached_tokens', type=str, default='coco-train-idxs',
help='Cached token file for calculating cider score during self critical training.')
# Model settings
parser.add_argument('--caption_model', type=str, default="show_tell",
help='show_tell, show_attend_tell, all_img, fc, att2in, att2in2, att2all2, adaatt, adaattmo, updown, stackatt, denseatt, transformer')
parser.add_argument('--rnn_size', type=int, default=512,
help='size of the rnn in number of hidden nodes in each layer')
parser.add_argument('--num_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--rnn_type', type=str, default='lstm',
help='rnn, gru, or lstm')
parser.add_argument('--input_encoding_size', type=int, default=512,
help='the encoding size of each token in the vocabulary, and the image.')
parser.add_argument('--att_hid_size', type=int, default=512,
help='the hidden size of the attention MLP; only useful in show_attend_tell; 0 if not using hidden layer')
parser.add_argument('--fc_feat_size', type=int, default=2048,
help='2048 for resnet, 4096 for vgg')
parser.add_argument('--att_feat_size', type=int, default=2048,
help='2048 for resnet, 512 for vgg')
parser.add_argument('--logit_layers', type=int, default=1,
help='number of layers in the RNN')
parser.add_argument('--use_bn', type=int, default=0,
help='If 1, then do batch_normalization first in att_embed, if 2 then do bn both in the beginning and the end of att_embed')
# feature manipulation
parser.add_argument('--norm_att_feat', type=int, default=0,
help='If normalize attention features')
parser.add_argument('--use_box', type=int, default=0,
help='If use box features')
parser.add_argument('--norm_box_feat', type=int, default=0,
help='If use box, do we normalize box feature')
# Add trace information from Localized Narratives: shape = Tx5
parser.add_argument('--use_trace', type=int, default=0,
help='If use trace features')
# Add trace feature: shape = Tx1024(2048 later)
parser.add_argument('--use_trace_feat', type=int, default=0,
help='If use trace features')
parser.add_argument('--dataset_choice', type=str, default='coco',
help='use coco or flk30k or others?')
parser.add_argument('--trace_max_length', type=int, default=225,
help='max length of trace/sentences')
# Optimization: General
parser.add_argument('--max_epochs', type=int, default=-1,
help='number of epochs')
parser.add_argument('--batch_size', type=int, default=16,
help='minibatch size')
parser.add_argument('--grad_clip_mode', type=str, default='value',
help='value or norm')
parser.add_argument('--grad_clip_value', type=float, default=0.1,
help='clip gradients at this value/max_norm, 0 means no clipping')
parser.add_argument('--drop_prob_lm', type=float, default=0.5,
help='strength of dropout in the Language Model RNN')
parser.add_argument('--self_critical_after', type=int, default=-1,
help='After what epoch do we start finetuning the CNN? (-1 = disable; never finetune, 0 = finetune from start)')
parser.add_argument('--seq_per_img', type=int, default=5,
help='number of captions to sample for each image during training. Done for efficiency since CNN forward pass is expensive. E.g. coco has 5 sents/image')
# Sample related
add_eval_sample_opts(parser)
#Optimization: for the Language Model
parser.add_argument('--optim', type=str, default='adam',
help='what update to use? rmsprop|sgd|sgdmom|adagrad|adam|adamw')
parser.add_argument('--learning_rate', type=float, default=4e-4,
help='learning rate')
parser.add_argument('--learning_rate_decay_start', type=int, default=-1,
help='at what iteration to start decaying learning rate? (-1 = dont) (in epoch)')
parser.add_argument('--learning_rate_decay_every', type=int, default=3,
help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--learning_rate_decay_rate', type=float, default=0.8,
help='every how many iterations thereafter to drop LR?(in epoch)')
parser.add_argument('--optim_alpha', type=float, default=0.9,
help='alpha for adam')
parser.add_argument('--optim_beta', type=float, default=0.999,
help='beta used for adam')
parser.add_argument('--optim_epsilon', type=float, default=1e-8,
help='epsilon that goes into denominator for smoothing')
parser.add_argument('--weight_decay', type=float, default=0,
help='weight_decay')
# Transformer
parser.add_argument('--label_smoothing', type=float, default=0,
help='')
parser.add_argument('--noamopt', action='store_true',
help='')
parser.add_argument('--noamopt_warmup', type=int, default=2000,
help='')
parser.add_argument('--noamopt_factor', type=float, default=1,
help='')
parser.add_argument('--reduce_on_plateau', action='store_true',
help='')
parser.add_argument('--reduce_on_plateau_factor', type=float, default=0.5,
help='')
parser.add_argument('--reduce_on_plateau_patience', type=int, default=3,
help='')
parser.add_argument('--cached_transformer', action='store_true',
help='')
parser.add_argument('--use_warmup', action='store_true',
help='warm up the learing rate?')
parser.add_argument('--scheduled_sampling_start', type=int, default=-1,
help='at what iteration to start decay gt probability')
parser.add_argument('--scheduled_sampling_increase_every', type=int, default=5,
help='every how many iterations thereafter to gt probability')
parser.add_argument('--scheduled_sampling_increase_prob', type=float, default=0.05,
help='How much to update the prob')
parser.add_argument('--scheduled_sampling_max_prob', type=float, default=0.25,
help='Maximum scheduled sampling prob.')
# Evaluation/Checkpointing
parser.add_argument('--val_images_use', type=int, default=5000,
help='how many images to use when periodically evaluating the validation loss? (-1 = all)')
parser.add_argument('--save_checkpoint_every', type=int, default=2500,
help='how often to save a model checkpoint (in iterations)?')
parser.add_argument('--save_every_epoch', action='store_true',
help='Save checkpoint every epoch, will overwrite save_checkpoint_every')
parser.add_argument('--save_history_ckpt', type=int, default=0,
help='If save checkpoints at every save point')
parser.add_argument('--checkpoint_path', type=str, default=None,
help='directory to store checkpointed models')
parser.add_argument('--language_eval', type=int, default=0,
help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--losses_log_every', type=int, default=25,
help='How often do we snapshot losses, for inclusion in the progress dump? (0 = disable)')
parser.add_argument('--load_best_score', type=int, default=1,
help='Do we load previous best score when resuming training.')
# misc
parser.add_argument('--id', type=str, default='',
help='an id identifying this run/job. used in cross-val and appended when writing progress files')
parser.add_argument('--train_only', type=int, default=0,
help='if true then use 80k, else use 110k')
# Reward
parser.add_argument('--cider_reward_weight', type=float, default=1,
help='The reward weight from cider')
parser.add_argument('--bleu_reward_weight', type=float, default=0,
help='The reward weight from bleu4')
# Structure_loss
parser.add_argument('--structure_loss_weight', type=float, default=1,
help='')
parser.add_argument('--structure_after', type=int, default=-1,
help='T')
parser.add_argument('--structure_loss_type', type=str, default='seqnll',
help='')
parser.add_argument('--struc_use_logsoftmax', action='store_true', help='')
parser.add_argument('--entropy_reward_weight', type=float, default=0,
help='Entropy reward, seems very interesting')
parser.add_argument('--self_cider_reward_weight', type=float, default=0,
help='self cider reward')
# Used for self critical or structure. Used when sampling is need during training
parser.add_argument('--train_sample_n', type=int, default=16,
help='The reward weight from cider')
parser.add_argument('--train_sample_method', type=str, default='sample',
help='')
parser.add_argument('--train_beam_size', type=int, default=1,
help='')
# Used for self critical
parser.add_argument('--sc_sample_method', type=str, default='greedy',
help='')
parser.add_argument('--sc_beam_size', type=int, default=1,
help='')
# For diversity evaluation during training
add_diversity_opts(parser)
# config
parser.add_argument('--cfg', type=str, default=None,
help='configuration; similar to what is used in detectron')
parser.add_argument(
'--set_cfgs', dest='set_cfgs',
help='Set config keys. Key value sequence seperate by whitespace.'
'e.g. [key] [value] [key] [value]\n This has higher priority'
'than cfg file but lower than other args. (You can only overwrite'
'arguments that have alerady been defined in config file.)',
default=[], nargs='+')
# How will config be used
# 1) read cfg argument, and load the cfg file if it's not None
# 2) Overwrite cfg argument with set_cfgs
# 3) parse config argument to args.
# 4) in the end, parse command line argument and overwrite args
# step 1: read cfg_fn
args = parser.parse_args()
if args.cfg is not None or args.set_cfgs is not None:
from .config import CfgNode
if args.cfg is not None:
cn = CfgNode(CfgNode.load_yaml_with_base(args.cfg))
else:
cn = CfgNode()
if args.set_cfgs is not None:
cn.merge_from_list(args.set_cfgs)
for k,v in cn.items():
if not hasattr(args, k):
print('Warning: key %s not in args' %k)
setattr(args, k, v)
args = parser.parse_args(namespace=args)
# Check if args are valid
assert args.rnn_size > 0, "rnn_size should be greater than 0"
assert args.num_layers > 0, "num_layers should be greater than 0"
assert args.input_encoding_size > 0, "input_encoding_size should be greater than 0"
assert args.batch_size > 0, "batch_size should be greater than 0"
assert args.drop_prob_lm >= 0 and args.drop_prob_lm < 1, "drop_prob_lm should be between 0 and 1"
assert args.seq_per_img > 0, "seq_per_img should be greater than 0"
assert args.beam_size > 0, "beam_size should be greater than 0"
assert args.save_checkpoint_every > 0, "save_checkpoint_every should be greater than 0"
assert args.losses_log_every > 0, "losses_log_every should be greater than 0"
assert args.language_eval == 0 or args.language_eval == 1, "language_eval should be 0 or 1"
assert args.load_best_score == 0 or args.load_best_score == 1, "language_eval should be 0 or 1"
assert args.train_only == 0 or args.train_only == 1, "language_eval should be 0 or 1"
# default value for start_from and checkpoint_path
args.checkpoint_path = args.checkpoint_path or './log_%s' %args.id
args.start_from = args.start_from or args.checkpoint_path
# Deal with feature things before anything
args.use_fc, args.use_att = if_use_feat(args.caption_model)
#if args.use_box: args.att_feat_size = args.att_feat_size + 5 # commented by zihang
return args
def add_eval_options(parser):
# Basic options
parser.add_argument('--batch_size', type=int, default=0,
help='if > 0 then overrule, otherwise load from checkpoint.')
parser.add_argument('--num_images', type=int, default=-1,
help='how many images to use when periodically evaluating the loss? (-1 = all)')
parser.add_argument('--language_eval', type=int, default=0,
help='Evaluate language as well (1 = yes, 0 = no)? BLEU/CIDEr/METEOR/ROUGE_L? requires coco-caption code from Github.')
parser.add_argument('--dump_images', type=int, default=1,
help='Dump images into vis/imgs folder for vis? (1=yes,0=no)')
parser.add_argument('--dump_json', type=int, default=1,
help='Dump json with predictions into vis folder? (1=yes,0=no)')
parser.add_argument('--dump_path', type=int, default=0,
help='Write image paths along with predictions into vis json? (1=yes,0=no)')
# Sampling options
add_eval_sample_opts(parser)
# For evaluation on a folder of images:
parser.add_argument('--image_folder', type=str, default='',
help='If this is nonempty then will predict on the images in this folder path')
parser.add_argument('--image_root', type=str, default='',
help='In case the image paths have to be preprended with a root path to an image folder')
# For evaluation on MSCOCO images from some split:
parser.add_argument('--input_fc_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_att_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_box_dir', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_trace_dir', type=str, default='',
help='path to trace boxes (T*5)') ## h5file??
parser.add_argument('--input_trace_feat_dir', type=str, default='',
help='path to trace feats (T*1024/2048)') ## h5file??
parser.add_argument('--input_label_h5', type=str, default='',
help='path to the h5file containing the preprocessed dataset')
parser.add_argument('--input_json', type=str, default='',
help='path to the json file containing additional info and vocab. empty = fetch from model checkpoint.')
parser.add_argument('--split', type=str, default='test',
help='if running on MSCOCO images, which split to use: val|test|train')
parser.add_argument('--coco_json', type=str, default='',
help='if nonempty then use this file in DataLoaderRaw (see docs there). Used only in MSCOCO test evaluation, where we have a specific json file of only test set images.')
# misc
parser.add_argument('--id', type=str, default='',
help='an id identifying this run/job. used only if language_eval = 1 for appending to intermediate files')
parser.add_argument('--verbose_beam', type=int, default=1,
help='if we need to print out all beam search beams.')
parser.add_argument('--verbose_loss', type=int, default=0,
help='If calculate loss using ground truth during evaluation')
def add_diversity_opts(parser):
parser.add_argument('--sample_n', type=int, default=1,
help='Diverse sampling')
parser.add_argument('--sample_n_method', type=str, default='sample',
help='sample, bs, dbs, gumbel, topk, dgreedy, dsample, dtopk, dtopp')
parser.add_argument('--eval_oracle', type=int, default=1,
help='if we need to calculate loss.')
# Sampling related options
def add_eval_sample_opts(parser):
parser.add_argument('--sample_method', type=str, default='greedy',
help='greedy; sample; gumbel; top<int>, top<0-1>')
parser.add_argument('--beam_size', type=int, default=1,
help='used when sample_method = greedy, indicates number of beams in beam search. Usually 2 or 3 works well. More is not better. Set this to 1 for faster runtime but a bit worse performance.')
parser.add_argument('--max_length', type=int, default=20,
help='Maximum length during sampling')
parser.add_argument('--length_penalty', type=str, default='',
help='wu_X or avg_X, X is the alpha')
parser.add_argument('--group_size', type=int, default=1,
help='used for diverse beam search. if group_size is 1, then it\'s normal beam search')
parser.add_argument('--diversity_lambda', type=float, default=0.5,
help='used for diverse beam search. Usually from 0.2 to 0.8. Higher value of lambda produces a more diverse list')
parser.add_argument('--temperature', type=float, default=1.0,
help='temperature when sampling from distributions (i.e. when sample_method = sample). Lower = "safer" predictions.')
parser.add_argument('--decoding_constraint', type=int, default=0,
help='If 1, not allowing same word in a row')
parser.add_argument('--block_trigrams', type=int, default=0,
help='block repeated trigram.')
parser.add_argument('--remove_bad_endings', type=int, default=0,
help='Remove bad endings')
parser.add_argument('--suppress_UNK', type=int, default=1,
help='Not predicting UNK')
if __name__ == '__main__':
import sys
sys.argv = [sys.argv[0]]
args = parse_opt()
print(args)
print()
sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml']
args1 = parse_opt()
print(dict(set(vars(args1).items()) - set(vars(args).items())))
print()
sys.argv = [sys.argv[0], '--cfg', 'configs/updown_long.yml', '--caption_model', 'att2in2']
args2 = parse_opt()
print(dict(set(vars(args2).items()) - set(vars(args1).items())))
|
connect-caption-and-trace-main
|
captioning/utils/opts.py
|
import torch
import scipy.optimize
import numpy as np
def local_OT(D, window = 0):
window = window
p = D.shape[1]; m = D.shape[2] # p < m, e.g., p = 10, m = 20
# construct the cx, ax=b
x = torch.rand([10,p*m])
A = torch.zeros([p,p*m])
b = torch.ones([p])
for i in range(p):
A[i, (i)*m:(i+1)*m] = 1
G = torch.zeros([m, p*m])
for i in range(m):
for j in range(p):
G[i, j*m+i] = 1
h = torch.ones([m])
A_local = torch.zeros([p, p, m])
for i in range(p):
# left = np.floor((i - window) * (m*1.0/p))
# right = (i + window) * (m*1.0/p)
left = (i - window) * (m * 1.0 / p)
right = (i + 1 + window) * (m * 1.0 / p)
for j in range(m):
# if j < left or j >= right:
if j < left or j >= right:
A_local[i, i, j] = 1
# if i+window+1<=m-1:
# A_local[i, i, i+(window+1):] = 1
# if i-(window+1) >=0:
# A_local[i, i, :i-window] = 1
A_local = A_local.view([p, p*m])
b_local = torch.zeros([p])
A = torch.cat([A, A_local], 0).numpy()
b = torch.cat([b, b_local], 0).numpy()
G = G.numpy()
h = h.numpy()
T_list = []
for i in range(D.shape[0]):
c = D[i].view(-1).detach().cpu().numpy()
try:
sol = scipy.optimize.linprog(c, A_ub = G, b_ub = h, A_eq = A, b_eq = b, bounds=(0, 1)) #options={'maxiter': 200, 'sym_pos':False}
sol_x = torch.from_numpy(sol.x).view([p,m]).float()
except:
sol_x = torch.cat([torch.eye(p), torch.zeros(p, m-p)], 1)
T_list.append(sol_x)
T = torch.stack(T_list, 0)
return T.to(D.device) #(D * T.cuda()).sum() / p #(T>0.5).float() # binarize it
### for debug
# D = torch.rand([1, 10, 20])
# cost_orig = torch.diag(D[0]).sum()
# T = local_OT(D)
# cost_new = (D * T).sum()
# print(cost_orig, cost_new)
|
connect-caption-and-trace-main
|
captioning/utils/local_optimal_transport.py
|
from random import uniform
import numpy as np
from collections import OrderedDict, defaultdict
from itertools import tee
import time
# -----------------------------------------------
def find_ngrams(input_list, n):
return zip(*[input_list[i:] for i in range(n)])
def compute_div_n(caps,n=1):
aggr_div = []
for k in caps:
all_ngrams = set()
lenT = 0.
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return np.array(aggr_div).mean(), np.array(aggr_div)
def compute_global_div_n(caps,n=1):
aggr_div = []
all_ngrams = set()
lenT = 0.
for k in caps:
for c in caps[k]:
tkns = c.split()
lenT += len(tkns)
ng = find_ngrams(tkns, n)
all_ngrams.update(ng)
if n == 1:
aggr_div.append(float(len(all_ngrams)))
else:
aggr_div.append(float(len(all_ngrams))/ (1e-6 + float(lenT)))
return aggr_div[0], np.repeat(np.array(aggr_div),len(caps))
|
connect-caption-and-trace-main
|
captioning/utils/div_utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import time
from collections import OrderedDict
import torch
import sys
try:
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD import CiderD
from pyciderevalcap.cider.cider import Cider
sys.path.append("coco-caption")
from pycocoevalcap.bleu.bleu import Bleu
except:
print('cider or coco-caption missing')
CiderD_scorer = None
Cider_scorer = None
Bleu_scorer = None
#CiderD_scorer = CiderD(df='corpus')
def init_scorer(cached_tokens):
global CiderD_scorer
CiderD_scorer = CiderD_scorer or CiderD(df=cached_tokens)
global Cider_scorer
Cider_scorer = Cider_scorer or Cider(df=cached_tokens)
global Bleu_scorer
Bleu_scorer = Bleu_scorer or Bleu(4)
def array_to_str(arr):
out = ''
for i in range(len(arr)):
out += str(arr[i]) + ' '
if arr[i] == 0:
break
return out.strip()
def get_self_critical_reward(greedy_res, data_gts, gen_result, opt):
batch_size = len(data_gts)
gen_result_size = gen_result.shape[0]
seq_per_img = gen_result_size // len(data_gts) # gen_result_size = batch_size * seq_per_img
assert greedy_res.shape[0] == batch_size
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
greedy_res = greedy_res.data.cpu().numpy()
for i in range(gen_result_size):
res[i] = [array_to_str(gen_result[i])]
for i in range(batch_size):
res[gen_result_size + i] = [array_to_str(greedy_res[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(len(res))]
res__ = {i: res[i] for i in range(len(res_))}
gts_ = {i: gts[i // seq_per_img] for i in range(gen_result_size)}
gts_.update({i+gen_result_size: gts[i] for i in range(batch_size)})
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts_, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts_, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
scores = scores[:gen_result_size].reshape(batch_size, seq_per_img) - scores[-batch_size:][:, np.newaxis]
scores = scores.reshape(gen_result_size)
rewards = np.repeat(scores[:, np.newaxis], gen_result.shape[1], 1)
return rewards
def get_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = OrderedDict()
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res[i] = [array_to_str(gen_result[i])]
gts = OrderedDict()
for i in range(len(data_gts)):
gts[i] = [array_to_str(data_gts[i][j]) for j in range(len(data_gts[i]))]
res_ = [{'image_id':i, 'caption': res[i]} for i in range(batch_size)]
res__ = {i: res[i] for i in range(batch_size)}
gts = {i: gts[i // seq_per_img] for i in range(batch_size)}
if opt.cider_reward_weight > 0:
_, cider_scores = CiderD_scorer.compute_score(gts, res_)
print('Cider scores:', _)
else:
cider_scores = 0
if opt.bleu_reward_weight > 0:
_, bleu_scores = Bleu_scorer.compute_score(gts, res__)
bleu_scores = np.array(bleu_scores[3])
print('Bleu scores:', _[3])
else:
bleu_scores = 0
scores = opt.cider_reward_weight * cider_scores + opt.bleu_reward_weight * bleu_scores
return scores
def get_self_cider_scores(data_gts, gen_result, opt):
batch_size = gen_result.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
res = []
gen_result = gen_result.data.cpu().numpy()
for i in range(batch_size):
res.append(array_to_str(gen_result[i]))
scores = []
for i in range(len(data_gts)):
tmp = Cider_scorer.my_self_cider([res[i*seq_per_img:(i+1)*seq_per_img]])
def get_div(eigvals):
eigvals = np.clip(eigvals, 0, None)
return -np.log(np.sqrt(eigvals[-1]) / (np.sqrt(eigvals).sum())) / np.log(len(eigvals))
scores.append(get_div(np.linalg.eigvalsh(tmp[0]/10)))
scores = np.array(scores)
return scores
|
connect-caption-and-trace-main
|
captioning/utils/rewards.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
from .local_optimal_transport import local_OT
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
# annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set # use on 11/01
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'caption'), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
# print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
use_local_OT = False
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
# trace_class_label = trace_feats[:,:,5] - 1
# pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
# print(prev_gt_out[0, :, :5])
loss_prev_gt_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_prev_gt = ((torch.abs(prev_gt_out[:, :, :4] - trace_feats[:, :, :4]) * loss_prev_gt_mask).sum() / (
loss_prev_gt_mask.sum() * 4)).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
tmp_trace_feats[:, i, :5] = curr_out
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks) # for non-iteratively
# break # for non-iteratively
# tmp_trace_feats = curr_out # for non-iteratively
# ### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation_2/pred_trace_'+str(vis_img_id), tmp_trace_feats[:,:,:4].detach().cpu().numpy())
# np.save('./vis/trace_generation_2/gt_trace_' + str(vis_img_id), trace_feats[:,:,:4].detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation_2/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
# ############################
# tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2) #
if use_local_OT:
D = torch.abs(tmp_trace_feats[:, :, :4].unsqueeze(2) - trace_feats[:, :, :4].unsqueeze(1)).mean(dim=-1)
T = local_OT(D).to(tmp_trace_feats.device)
loss = ((torch.abs(torch.matmul(tmp_trace_feats[:, :, :4].transpose(1, 2), T).transpose(1, 2) -
trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
print('loss', loss, 'loss_orig', (
(torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item())
else:
loss = ((torch.abs(tmp_trace_feats[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks,
trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats.clone()
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks, trace_masks)[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, 1:6] = curr_out
# print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
# print('pred_class_label',pred_class_label[0])
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 5] != 1))
classification_acc = ((pred_class_label == trace_class_label) * loss_mask).sum().float() / loss_mask.sum()
acc_list.append(classification_acc.item())
loss_mask = loss_mask.unsqueeze(2)
loss = ((torch.abs(tmp_trace_feats[:, :, 1:5] - trace_feats[:, :, 1:5]) * loss_mask).sum() / (
loss_mask.sum() * 4)).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import json
from json import encoder
import random
import string
import time
import os
import sys
from . import misc as utils
# load coco-caption if available
try:
sys.path.append("coco-caption")
from pycocotools.coco import COCO
from pycocoevalcap.eval import COCOEvalCap
except:
print('Warning: coco-caption not available')
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def count_bad(sen):
sen = sen.split(' ')
if sen[-1] in bad_endings:
return 1
else:
return 0
def getCOCO(dataset):
if 'coco' in dataset:
#annFile = 'coco-caption/annotations/captions_val2014.json'
# annFile = 'coco-caption/annotations/captions_LN_val2014_norepeat.json' # load localized narratives for evaluation
annFile = 'coco-caption/annotations/captions_LN_8kval.json' # load 8k LN validation set
elif 'flickr30k' in dataset or 'f30k' in dataset:
annFile = 'data/f30k_captions4eval.json'
return COCO(annFile)
def language_eval(dataset, preds, preds_n, eval_kwargs, split):
model_id = eval_kwargs['id']
eval_oracle = eval_kwargs.get('eval_oracle', 0)
# create output dictionary
out = {}
if len(preds_n) > 0:
# vocab size and novel sentences
if 'coco' in dataset:
dataset_file = 'data/dataset_coco.json'
elif 'flickr30k' in dataset or 'f30k' in dataset:
dataset_file = 'data/dataset_flickr30k.json'
training_sentences = set([' '.join(__['tokens']) for _ in json.load(open(dataset_file))['images'] if not _['split'] in ['val', 'test'] for __ in _['sentences']])
generated_sentences = set([_['caption'] for _ in preds_n])
novels = generated_sentences - training_sentences
out['novel_sentences'] = float(len(novels)) / len(preds_n)
tmp = [_.split() for _ in generated_sentences]
words = []
for _ in tmp:
words += _
out['vocab_size'] = len(set(words))
# encoder.FLOAT_REPR = lambda o: format(o, '.3f')
cache_path = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '.json')
coco = getCOCO(dataset)
valids = coco.getImgIds()
# filter results to only those in MSCOCO validation set
preds_filt = [p for p in preds if p['image_id'] in valids]
mean_perplexity = sum([_['perplexity'] for _ in preds_filt]) / len(preds_filt)
mean_entropy = sum([_['entropy'] for _ in preds_filt]) / len(preds_filt)
print('using %d/%d predictions' % (len(preds_filt), len(preds)))
json.dump(preds_filt, open(cache_path, 'w')) # serialize to temporary json file. Sigh, COCO API...
cocoRes = coco.loadRes(cache_path)
cocoEval = COCOEvalCap(coco, cocoRes)
cocoEval.params['image_id'] = cocoRes.getImgIds()
cocoEval.evaluate()
for metric, score in cocoEval.eval.items():
out[metric] = score
# Add mean perplexity
out['perplexity'] = mean_perplexity
out['entropy'] = mean_entropy
imgToEval = cocoEval.imgToEval
for k in list(imgToEval.values())[0]['SPICE'].keys():
if k != 'All':
out['SPICE_'+k] = np.array([v['SPICE'][k]['f'] for v in imgToEval.values()])
out['SPICE_'+k] = (out['SPICE_'+k][out['SPICE_'+k]==out['SPICE_'+k]]).mean()
for p in preds_filt:
image_id, caption = p['image_id'], p['caption']
imgToEval[image_id]['caption'] = caption
if len(preds_n) > 0:
from . import eval_multi
cache_path_n = os.path.join('eval_results/', '.cache_'+ model_id + '_' + split + '_n.json')
allspice = eval_multi.eval_allspice(dataset, preds_n, model_id, split)
out.update(allspice['overall'])
div_stats = eval_multi.eval_div_stats(dataset, preds_n, model_id, split)
out.update(div_stats['overall'])
if eval_oracle:
oracle = eval_multi.eval_oracle(dataset, preds_n, model_id, split)
out.update(oracle['overall'])
else:
oracle = None
self_cider = eval_multi.eval_self_cider(dataset, preds_n, model_id, split)
out.update(self_cider['overall'])
with open(cache_path_n, 'w') as outfile:
json.dump({'allspice': allspice, 'div_stats': div_stats, 'oracle': oracle, 'self_cider': self_cider}, outfile)
out['bad_count_rate'] = sum([count_bad(_['caption']) for _ in preds_filt]) / float(len(preds_filt))
outfile_path = os.path.join('eval_results/', model_id + '_' + split + '.json')
with open(outfile_path, 'w') as outfile:
json.dump({'overall': out, 'imgToEval': imgToEval}, outfile)
return out
def eval_split(model, crit, loader, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
verbose_beam = eval_kwargs.get('verbose_beam', 0)
verbose_loss = eval_kwargs.get('verbose_loss', 1)
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
split = eval_kwargs.get('split', 'val')
lang_eval = eval_kwargs.get('language_eval', 0)
dataset = eval_kwargs.get('dataset', 'coco')
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
remove_bad_endings = eval_kwargs.get('remove_bad_endings', 0)
os.environ["REMOVE_BAD_ENDINGS"] = str(remove_bad_endings) # Use this nasty way to make other code clean since it's a global configuration
device = eval_kwargs.get('device', 'cuda')
# Make sure in the evaluation mode
model.eval()
loader.reset_iterator(split)
n = 0
loss = 0
loss_sum = 0
loss_evals = 1e-8
predictions = []
n_predictions = [] # when sample_n > 1
while True:
data = loader.get_batch(split)
# print('In eval_utils:', split)###zihang
print(num_images)
n = n + len(data['infos'])
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'], data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
if labels is not None and verbose_loss:
# forward the model to get loss
with torch.no_grad():
loss = crit(model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:]).item()
loss_sum = loss_sum + loss
loss_evals = loss_evals + 1
# forward the model to also get generated samples for each image
with torch.no_grad():
tmp_eval_kwargs = eval_kwargs.copy()
tmp_eval_kwargs.update({'sample_n': 1})
seq, seq_logprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
seq = seq.data
entropy = - (F.softmax(seq_logprobs, dim=2) * seq_logprobs).sum(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
perplexity = - seq_logprobs.gather(2, seq.unsqueeze(2)).squeeze(2).sum(1) / ((seq>0).to(seq_logprobs).sum(1)+1)
# Print beam search
if beam_size > 1 and verbose_beam:
for i in range(fc_feats.shape[0]):
print('\n'.join([utils.decode_sequence(model.vocab, _['seq'].unsqueeze(0))[0] for _ in model.done_beams[i]]))
print('--' * 10)
sents = utils.decode_sequence(model.vocab, seq)
for k, sent in enumerate(sents):
entry = {'image_id': data['infos'][k]['id'], 'caption': sent, 'perplexity': perplexity[k].item(), 'entropy': entropy[k].item()}
if eval_kwargs.get('dump_path', 0) == 1:
entry['file_name'] = data['infos'][k]['file_path']
predictions.append(entry)
if eval_kwargs.get('dump_images', 0) == 1:
# dump the raw image to vis/ folder
cmd = 'cp "' + os.path.join(eval_kwargs['image_root'], data['infos'][k]['file_path']) + '" vis/imgs/img' + str(len(predictions)) + '.jpg' # bit gross
print(cmd)
os.system(cmd)
if verbose:
print('image %s: %s' %(entry['image_id'], entry['caption']))
if sample_n > 1:
eval_split_n(model, n_predictions, [fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data], eval_kwargs)
# ix0 = data['bounds']['it_pos_now']
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
# print('len:', len(predictions), n, ix1, split, num_images) ###zihang
for i in range(n - ix1):
predictions.pop()
if verbose:
print('evaluating validation preformance... %d/%d (%f)' %(n, ix1, loss))
if num_images >= 0 and n >= num_images:
break
lang_stats = None
if len(n_predictions) > 0 and 'perplexity' in n_predictions[0]:
n_predictions = sorted(n_predictions, key=lambda x: x['perplexity'])
if not os.path.isdir('eval_results'):
os.mkdir('eval_results')
torch.save((predictions, n_predictions), os.path.join('eval_results/', '.saved_pred_'+ eval_kwargs['id'] + '_' + split + '.pth'))
if lang_eval == 1:
lang_stats = language_eval(dataset, predictions, n_predictions, eval_kwargs, split)
# Switch back to training mode
model.train()
return loss_sum/loss_evals, predictions, lang_stats
# Only run when sample_n > 0
def eval_split_n(model, n_predictions, input_data, eval_kwargs={}):
verbose = eval_kwargs.get('verbose', True)
beam_size = eval_kwargs.get('beam_size', 1)
sample_n = eval_kwargs.get('sample_n', 1)
sample_n_method = eval_kwargs.get('sample_n_method', 'sample')
fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, data = input_data
tmp_eval_kwargs = eval_kwargs.copy()
if sample_n_method == 'bs':
# case 1 sample_n == beam size
tmp_eval_kwargs.update({'sample_n': 1, 'beam_size': sample_n, 'group_size': 1}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(fc_feats.shape[0]):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(sample_n)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
# case 2 sample / gumbel / topk sampling/ nucleus sampling
elif sample_n_method == 'sample' or \
sample_n_method == 'gumbel' or \
sample_n_method.startswith('top'):
tmp_eval_kwargs.update({'sample_n': sample_n, 'sample_method': sample_n_method, 'beam_size': 1}) # randomness from sample
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
_perplexity = - _sampleLogprobs.gather(2, _seq.unsqueeze(2)).squeeze(2).sum(1) / ((_seq>0).to(_sampleLogprobs).sum(1)+1)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent, 'perplexity': _perplexity[k].item()}
n_predictions.append(entry)
elif sample_n_method == 'dbs':
# Use diverse beam search
tmp_eval_kwargs.update({'beam_size': sample_n * beam_size, 'group_size': sample_n}) # randomness from softmax
with torch.no_grad():
model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
for k in range(loader.batch_size):
_sents = utils.decode_sequence(model.vocab, torch.stack([model.done_beams[k][_]['seq'] for _ in range(0, sample_n*beam_size, beam_size)]))
for sent in _sents:
entry = {'image_id': data['infos'][k]['id'], 'caption': sent}
n_predictions.append(entry)
else:
tmp_eval_kwargs.update({'sample_method': sample_n_method[1:], 'group_size': sample_n, 'beam_size':1}) # randomness from softmax
with torch.no_grad():
_seq, _sampleLogprobs = model(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt=tmp_eval_kwargs, mode='sample')
_sents = utils.decode_sequence(model.vocab, _seq)
for k, sent in enumerate(_sents):
entry = {'image_id': data['infos'][k // sample_n]['id'], 'caption': sent}
n_predictions.append(entry)
if verbose:
for entry in sorted(n_predictions[-fc_feats.shape[0] * sample_n:], key=lambda x: x['image_id']):
print('image %s: %s' %(entry['image_id'], entry['caption']))
def eval_trace_generation(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
print(data['infos'][0]['id'])
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
loss_prev_gt_list = []
acc_list = []
with torch.no_grad():
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
# prev_gt_correct
prev_gt_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
prev_gt_out = prev_gt_out * trace_masks.unsqueeze(2)
loss_prev_gt = crit(prev_gt_out[:, :, :4], trace_feats[:, :, :4]).item()
loss_prev_gt_list.append(loss_prev_gt)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks, 'trace')
# tmp_trace_feats[:, i, :5] = curr_out
break
tmp_trace_feats = curr_out
### save for visualization # for visualization of trace_generation
# vis_img_id = data['infos'][0]['id']
# np.save('./vis/trace_generation/pred_trace_'+str(vis_img_id), curr_out.detach().cpu().numpy())
# np.save('./vis/trace_generation/gt_trace_' + str(vis_img_id), trace_feats.detach().cpu().numpy())
# print(vis_img_id, crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item(), trace_feats.shape)
# with open('./vis/trace_generation/info.txt', 'a') as f:
# f.write('img_id:%d, l1-loss: %f\n'%(vis_img_id,(crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()))
# f.close()
############################
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = (torch.abs(tmp_trace_feats[:,:,:4] - trace_feats[:,:,:4]).sum() / ((trace_masks!=0).sum() * 4)).item()
# loss = (crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]) * trace_masks.shape[0]*trace_masks.shape[1] / (trace_masks!=0).sum()).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; prev_gt_loss:', loss_prev_gt)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss = np.mean(np.array(loss_list))
val_loss_prev_gt = np.mean(np.array(loss_prev_gt_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; prev_gt_loss:', val_loss_prev_gt)
model.train()
return val_loss
def eval_trace_generation_classification(model, crit, loader, eval_kwargs={}):
model.eval()
count = 0
split = 'val'
loader.reset_iterator(split)
device = 'cuda'
num_images = eval_kwargs.get('num_images', eval_kwargs.get('val_images_use', -1))
while True:
data = loader.get_batch(split)
ix1 = data['bounds']['it_max']
# print('ix1', ix1)###zihang
if num_images != -1:
ix1 = min(ix1, num_images)
else:
num_images = ix1
tmp = [data['fc_feats'], data['att_feats'], data['trace_feats'], data['box_feats'], data['labels'],
data['masks'], data['att_masks'], data['trace_masks']]
tmp = [_.to(device) if _ is not None else _ for _ in tmp]
fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks = tmp
loss_ce_list = []
gt_prev_acc_list = []
loss_list = []
acc_list = []
with torch.no_grad():
# get the loss in terms of cross-entropy
model_outputs = model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks,
trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
trace_class_label = trace_feats[:, :, 5] - 1
trace_class_label = trace_class_label.view(-1).long()
loss_ce = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1).item()
loss_ce_list.append(loss_ce)
gt_prev_acc = (((model_outputs.argmax(dim=1) == trace_class_label)*(trace_class_label!=-1).float()).sum() / \
(trace_class_label != -1).float().sum()).item()
gt_prev_acc_list.append(gt_prev_acc)
# get the loss for l1-loss and classification accuracy
tmp_trace_feats = trace_feats
trace_class_label = trace_feats[:,:,5] - 1
pred_class_label = torch.zeros_like(trace_class_label).to(trace_class_label.device)
for i in range(trace_feats.shape[1]):
# for regression
# curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:, i]
# for classification
curr_out = model(fc_feats, att_feats, tmp_trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)[:,i]
curr_out = curr_out.argmax(dim=-1)
pred_class_label[:, i] = curr_out
curr_out = box_feats[np.arange(box_feats.shape[0]), curr_out]
tmp_trace_feats[:, i, :5] = curr_out
print('prev_gt_class_label', model_outputs.argmax(dim=1).view(pred_class_label.shape)[0])
print('pred_class_label',pred_class_label[0])
classification_acc = ((pred_class_label == trace_class_label) * trace_masks).sum() / trace_masks.sum()
acc_list.append(classification_acc.item())
tmp_trace_feats = tmp_trace_feats * trace_masks.unsqueeze(2)
loss = crit(tmp_trace_feats[:,:,:4], trace_feats[:,:,:4]).item()
loss_list.append(loss)
count += att_feats.shape[0]
print('Validation evaluation(%d/%d):'%(count, num_images), 'l1-loss:', loss, '; loss_ce:', loss_ce, '; classification-acc:', classification_acc.item(), '; gt_prev_acc:', gt_prev_acc)
if count >= num_images: ### currently use 5000 in validation set
break
val_loss_ce = np.mean(np.array(loss_ce_list))
val_gt_prev_acc = np.mean(np.array(gt_prev_acc_list))
val_loss = np.mean(np.array(loss_list))
val_acc = np.mean(np.array(acc_list))
print('Validation evaluation: loss', 'l1-loss:', val_loss, '; loss_ce:', val_loss_ce, '; classification-acc:', val_acc, '; gt_prev_acc:', val_gt_prev_acc)
model.train()
return val_loss
|
connect-caption-and-trace-main
|
captioning/utils/eval_utils_caption_generation.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
# self.decode_layernorm = nn.LayerNorm(d_model, elementwise_affine=True)
# self.dropout = nn.Dropout(dropout)
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
self.trace_feat_embed = nn.Sequential(*(
(nn.Linear(2048, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5))))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
### get trace_feat
# trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
# trace_grid_feats = self.trace_feat_embed(trace_grid_feats)
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt and then add the trace_grid_feat: add trace_feat in the beginning
tgt_emd = self.tgt_embed(tgt, task) #, task
# if tgt.shape[1] > trace_feats.shape[1]:
# trace_feats = torch.cat([trace_feats, torch.zeros([trace_feats.shape[0], tgt_emd.shape[1]-trace_feats.shape[1],
# trace_feats.shape[2]]).to(trace_feats.device)], 1)
# else:
# trace_feats = trace_feats[:, :tgt_emd.shape[1], :]
# tgt_emd = self.dropout(self.decode_layernorm(tgt_emd + trace_feat))
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
for layer in self.layers:
x = layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
# self.sublayer = clones(SublayerConnection(size, dropout), 3)
self.sublayer = clones(SublayerConnection(size, dropout), 8+4)
###
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### both attn
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
###########
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat, trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
# trace_feat = self.sublayer[6](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
################################################
return self.sublayer[7](trace_feat, self.feed_forward_trace)
elif task == 'caption':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
x = self.sublayer[4](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
################################################
return self.sublayer[5](x, self.feed_forward_caption)
elif task == 'both':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
# trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
x_out = self.sublayer[8](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
trace_feat_out = self.sublayer[9](trace_feat,
lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[10](x_out, self.both_feed_forward_caption), self.sublayer[11](trace_feat_out, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(attn), c(attn), c(attn), c(attn),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
# self.trace_embed = nn.Linear(5, self.d_model)
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.trace_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ())+
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
self.trace_feat_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(2048, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm3 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm4 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
print(self.N_enc, self.N_dec, self.d_model, self.d_ff, self.h, self.dropout)
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
c = copy.deepcopy
# attn = MultiHeadedAttention(h, self.d_model, self.dropout)
# ff = PositionwiseFeedForward(self.d_model, self.d_ff, self.dropout)
position = PositionalEncoding(self.d_model, self.dropout)
self.caption_embed = caption_Embeddings(self.d_model, tgt_vocab, c(position))
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
if self.opt.use_trace_feat:
trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
trace_grid_feats = self.trace_layernorm3(self.trace_feat_embed(trace_grid_feats))
# trace_grid_feats = self.position_encoder(trace_grid_feats)
# trace_grid_feats = self.trace_layernorm4(trace_grid_feats)
trace_feats = self.trace_layernorm1(self.trace_embed(trace_feats))
if self.opt.use_trace_feat:
trace_feats = trace_feats + trace_grid_feats
# trace_feats_to_decoder = trace_feats
trace_feats = self.position_encoder(trace_feats) # add positional embedding
trace_feats = self.trace_layernorm2(trace_feats)
### comment to test: trace feat not from encoder, only from decoder
# att_feats = torch.cat([att_feats, trace_feats], 1) # concat with trace feats
# att_masks = torch.cat([att_masks, trace_masks.unsqueeze(1)], 2)
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
# comment for classification
# att_feats, box_feats, att_masks = self.clip_att(att_feats, box_feats, att_masks)
# original version by ruotian
# att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# my version: without pack and pad
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
# if torch.rand(1) > 0.5: # half [0,0,1,1,1], half random
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
# else:
# tmp_1 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp_2 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp = torch.stack([tmp_1[:, :, 0], tmp_2[:, :, 0], tmp_1[:, :, 1], tmp_2[:, :, 1],
# (tmp_1[:, :, 1] - tmp_1[:, :, 0]) * (tmp_2[:, :, 1] - tmp_2[:, :, 0])], 2)
# shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
# (1 - random_mask) * tmp.to(shifted_trace.device)
# concat the caption into visual features
seq_emd = self.caption_embed(seq, task)
att_feats = torch.cat([att_feats, seq_emd], 1)
att_masks = torch.cat([att_masks, seq_mask], 2)
# att_masks = torch.ones([att_feats.shape[0], 1, att_feats.shape[1]]).to(att_feats.device)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
# for classification, use (masked) dot product to provide logits
# out = out / torch.norm(out, dim=2).unsqueeze(2)
# memory = memory / torch.norm(memory, dim=2).unsqueeze(2)
# outputs = torch.matmul(out, memory.transpose(1,2))
# memory_mask = att_masks
# outputs = outputs.masked_fill(memory_mask == 0, float('-inf'))
#
# outputs = F.softmax(outputs, dim=-1)
# outputs = (outputs.unsqueeze(3) * box_feats.unsqueeze(1)).sum(dim=2)
# print('transformer_out',outputs.argmax(dim=-1)[0])
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_trace_generation_caption_to_encoder.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, past=None):
return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask, past=past)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
_x = sublayer(self.norm(x))
if type(_x) is tuple: # for multi-head attention that returns past
return x + self.dropout(_x[0]), _x[1]
return x + self.dropout(_x)
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, memory, src_mask, tgt_mask, past=None):
if past is not None:
present = [[], []]
x = x[:, -1:]
tgt_mask = tgt_mask[:, -1:] if tgt_mask is not None else None
past = list(zip(past[0].split(2, dim=0), past[1].split(2, dim=0)))
else:
past = [None] * len(self.layers)
for i, (layer, layer_past) in enumerate(zip(self.layers, past)):
x = layer(x, memory, src_mask, tgt_mask,
layer_past)
if layer_past is not None:
present[0].append(x[1][0])
present[1].append(x[1][1])
x = x[0]
if past[0] is None:
return self.norm(x)
else:
return self.norm(x), [torch.cat(present[0], 0), torch.cat(present[1], 0)]
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, feed_forward, dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 3)
def forward(self, x, memory, src_mask, tgt_mask, layer_past=None):
"Follow Figure 1 (right) for connections."
m = memory
if layer_past is None:
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
return self.sublayer[2](x, self.feed_forward)
else:
present = [None, None]
x, present[0] = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask, layer_past[0]))
x, present[1] = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask, layer_past[1]))
return self.sublayer[2](x, self.feed_forward), present
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None, layer_past=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# The past works differently here. For self attn, the query and key be updated incrementailly
# For src_attn the past is fixed.
# For src_attn, when the layer past is ready
if layer_past is not None and layer_past.shape[2] == key.shape[1] > 1: # suppose memory size always greater than 1
query = self.linears[0](query)
key, value = layer_past[0], layer_past[1]
present = torch.stack([key, value])
else:
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x) for l, x in zip(self.linears, (query, key, value))]
# self attn + past OR the first time step of src attn
if layer_past is not None and not (layer_past.shape[2] == key.shape[1] > 1):
past_key, past_value = layer_past[0], layer_past[1]
key = torch.cat((past_key, key), dim=1)
value = torch.cat((past_value, value), dim=1)
present = torch.stack([key, value])
query, key, value = \
[x.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for x in [query, key, value]]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
if layer_past is not None:
return self.linears[-1](x), present
else:
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
def forward(self, x):
return self.lut(x) * math.sqrt(self.d_model)
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn),
c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)),
Generator(d_model, tgt_vocab))
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks)
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks
def _prepare_feature_forward(self, att_feats, att_masks=None, seq=None):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks, seq)
out = self.model(att_feats, seq, att_masks, seq_mask)
outputs = self.model.generator(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state is the precomputed key/value. N_dec x seq_len x d_model
Note: due to the layer norm, it's not equivalant to stateless,
but it seems behaving similar
"""
# state is tokens + past
if len(state) == 0:
ys = it.unsqueeze(1)
# basically empty state, just to let it know to return past
# The second dim has to be batch_size, for beam search purpose
past = [fc_feats_ph.new_zeros(self.N_dec * 2, fc_feats_ph.shape[0], 0, self.d_model), # self
fc_feats_ph.new_zeros(self.N_dec * 2, fc_feats_ph.shape[0], 0, self.d_model)] # src
# 2 for self attn, 2 for src attn
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
past = state[1:]
out, past = self.model.decode(memory, mask,
ys, # We still feed the full past words, because we need it for position embedding to know the position id
subsequent_mask(ys.size(1))
.to(memory.device),
past=past)
return out[:, -1], [ys.unsqueeze(0)] + past
|
connect-caption-and-trace-main
|
captioning/models/cachedTransformer.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class ShowTellModel(CaptionModel):
def __init__(self, opt):
super(ShowTellModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.ss_prob = 0.0 # Schedule sampling probability
self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.core = getattr(nn, self.rnn_type.upper())(self.input_encoding_size, self.rnn_size, self.num_layers, bias=False, dropout=self.drop_prob_lm)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
def init_hidden(self, bsz):
weight = self.logit.weight
if self.rnn_type == 'lstm':
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
else:
return weight.new_zeros(self.num_layers, bsz, self.rnn_size)
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = []
if seq_per_img > 1:
fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)
for i in range(seq.size(1) + 1):
if i == 0:
xt = self.img_embed(fc_feats)
else:
if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i-1].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i-1].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i-1].clone()
# break if all the sequences end
if i >= 2 and seq[:, i-1].data.sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
output = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
def get_logprobs_state(self, it, state):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
return logprobs, state
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
for t in range(2):
if t == 0:
xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
elif t == 1: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self.sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length)
for t in range(self.seq_length + 2):
if t == 0:
xt = self.img_embed(fc_feats)
else:
if t == 1: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt.unsqueeze(0), state)
logprobs = F.log_softmax(self.logit(self.dropout(output.squeeze(0))), dim=1)
# sample the next word
if t == self.seq_length + 1: # skip if we achieve maximum length
break
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).to(logprobs.device)
sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished & (it > 0)
it = it * unfinished.type_as(it)
seq[:,t-1] = it #seq[t] the input of t+2 time step
seqLogprobs[:,t-1] = sampleLogprobs.view(-1)
if unfinished.sum() == 0:
break
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/ShowTellModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both_backup_2020_11_11.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, _ = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None,opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
if task != 'both':
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import numpy as np
import torch
from .ShowTellModel import ShowTellModel
from .FCModel import FCModel
from .AttModel_both import *
from .TransformerModel_mitr import TransformerModel
# from .cachedTransformer import TransformerModel as cachedTransformer
# from .BertCapModel import BertCapModel
# from .M2Transformer import M2TransformerModel
# from .AoAModel import AoAModel
def setup(opt):
if opt.caption_model in ['fc', 'show_tell']:
print('Warning: %s model is mostly deprecated; many new features are not supported.' %opt.caption_model)
if opt.caption_model == 'fc':
print('Use newfc instead of fc')
if opt.caption_model == 'fc':
model = FCModel(opt)
elif opt.caption_model == 'language_model':
model = LMModel(opt)
elif opt.caption_model == 'newfc':
model = NewFCModel(opt)
elif opt.caption_model == 'show_tell':
model = ShowTellModel(opt)
# Att2in model in self-critical
elif opt.caption_model == 'att2in':
model = Att2inModel(opt)
# Att2in model with two-layer MLP img embedding and word embedding
elif opt.caption_model == 'att2in2':
model = Att2in2Model(opt)
elif opt.caption_model == 'att2all2':
print('Warning: this is not a correct implementation of the att2all model in the original paper.')
model = Att2all2Model(opt)
# Adaptive Attention model from Knowing when to look
elif opt.caption_model == 'adaatt':
model = AdaAttModel(opt)
# Adaptive Attention with maxout lstm
elif opt.caption_model == 'adaattmo':
model = AdaAttMOModel(opt)
# Top-down attention model
elif opt.caption_model in ['topdown', 'updown']:
model = UpDownModel(opt)
# StackAtt
elif opt.caption_model == 'stackatt':
model = StackAttModel(opt)
# DenseAtt
elif opt.caption_model == 'denseatt':
model = DenseAttModel(opt)
# Transformer
elif opt.caption_model == 'transformer':
if getattr(opt, 'cached_transformer', False):
model = cachedTransformer(opt)
else:
print(TransformerModel)
model = TransformerModel(opt)
# AoANet
elif opt.caption_model == 'aoa':
model = AoAModel(opt)
elif opt.caption_model == 'bert':
model = BertCapModel(opt)
elif opt.caption_model == 'm2transformer':
model = M2TransformerModel(opt)
else:
raise Exception("Caption model not supported: {}".format(opt.caption_model))
return model
|
connect-caption-and-trace-main
|
captioning/models/__init__.py
|
# This file contains our mirrored Transformer network
# The branch for extracted visual features is implemented in "encoder",
# and then branches for trace and caption are implemented in "decoder"
# The cfg name correspondance:
# N_layer=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
trace_feats = trace_feats[:, :, :5]
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt
tgt_emd = self.tgt_embed(tgt, task)
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
def __init__(self, layer):
super(Decoder, self).__init__()
self.layer = layer
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
x = self.layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,
dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
self.sublayer = clones(SublayerConnection(size, dropout), 8+4*len(self.self_attn)) # 4 for each additional layer
### caption / trace generation
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### pred both
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
trace_feat = self.sublayer[0](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
return self.sublayer[1](trace_feat, self.feed_forward_trace)
elif task == 'caption':
trace_masks = trace_masks.unsqueeze(1)
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
x = self.sublayer[2](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
return self.sublayer[3](x, self.feed_forward_caption)
elif task == 'both':
for i in range(len(self.self_attn)):
x = self.sublayer[8 + 0 + 4 * i](x, lambda x: self.self_attn[i](x, x, x, tgt_mask))
x = self.sublayer[8 + 1 + 4 * i](x, lambda x: self.src_attn[i](x, m, m, src_mask))
trace_feat = self.sublayer[8 + 2 + 4 * i](trace_feat,
lambda trace_feat: self.trace_self_attn[i](trace_feat,
trace_feat,
trace_feat,
trace_masks))
trace_feat = self.sublayer[8 + 3 + 4 * i](trace_feat,
lambda trace_feat: self.trace_src_attn[i](trace_feat, m, m,
src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
x_out = self.sublayer[4](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
trace_feat_out = self.sublayer[5](trace_feat,
lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[6](x_out, self.both_feed_forward_caption), self.sublayer[7](trace_feat_out, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_layer=1,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_layer),
Decoder(DecoderLayer(d_model, clones(attn, N_layer), clones(attn, N_layer), c(attn), c(attn), clones(attn, N_layer), clones(attn, N_layer),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff),
dropout)),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_layer = getattr(opt, 'N_layer', opt.num_layers) # number of layers
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
self.model = self.make_model(0, tgt_vocab,
N_layer=self.N_layer,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats)
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0.5
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_mitr.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_encoder_trace.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
trace_feats_to_decoder, trace_masks, task)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
tmp_trace_feats, trace_masks, task,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
if task != 'both':
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1] - tmp_trace_feats.shape[1],
tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_standard_enco_deco_both.py
|
"""
Instruction to use meshed_memory_transformer (https://arxiv.org/abs/1912.08226)
pip install git+https://github.com/ruotianluo/meshed-memory-transformer.git
Note:
Currently m2transformer is not performing as well as original transformer. Not sure why? Still investigating.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
try:
from m2transformer.models.transformer import Transformer, MemoryAugmentedEncoder, MeshedDecoder, ScaledDotProductAttentionMemory
except:
print('meshed-memory-transformer not installed; please run `pip install git+https://github.com/ruotianluo/meshed-memory-transformer.git`')
from .TransformerModel import subsequent_mask, TransformerModel
class M2TransformerModel(TransformerModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
encoder = MemoryAugmentedEncoder(N_enc, 0, attention_module=ScaledDotProductAttentionMemory,
attention_module_kwargs={'m': 40})
# Another implementation is to use MultiLevelEncoder + att_embed
decoder = MeshedDecoder(tgt_vocab, 54, N_dec, -1) # -1 is padding;
model = Transformer(0, encoder, decoder) # 0 is bos
return model
def __init__(self, opt):
super(M2TransformerModel, self).__init__(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x: x # The visual embed is in the MAEncoder
# Notes: The dropout in MAEncoder is different from my att_embed, mine is 0.5?
# Also the attention mask seems wrong in MAEncoder too...intersting
def logit(self, x): # unsafe way
return x # M2transformer always output logsoftmax
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks)
memory, att_masks = self.model.encoder(att_feats)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
att_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, att_masks, seq)
seq = seq.clone()
seq[~seq_mask.any(-2)] = -1 # Make padding to be -1 (my dataloader uses 0 as padding)
outputs = self.model(att_feats, seq)
return outputs
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
out = self.model.decoder(ys, memory, mask)
return out[:, -1], [ys.unsqueeze(0)]
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
att_feats, _, __, ___ = self._prepare_feature_forward(att_feats, att_masks)
seq, logprobs, seqLogprobs = self.model.beam_search(att_feats, self.seq_length, 0,
beam_size, return_probs=True, out_size=beam_size)
seq = seq.reshape(-1, *seq.shape[2:])
seqLogprobs = seqLogprobs.reshape(-1, *seqLogprobs.shape[2:])
# if not (seqLogprobs.gather(-1, seq.unsqueeze(-1)).squeeze(-1) == logprobs.reshape(-1, logprobs.shape[-1])).all():
# import pudb;pu.db
# seqLogprobs = logprobs.reshape(-1, logprobs.shape[-1]).unsqueeze(-1).expand(-1,-1,seqLogprobs.shape[-1])
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/M2Transformer.py
|
import torch
def repeat_tensors(n, x):
"""
For a tensor of size Bx..., we repeat it n times, and make it Bnx...
For collections, do nested repeat
"""
if torch.is_tensor(x):
x = x.unsqueeze(1) # Bx1x...
x = x.expand(-1, n, *([-1]*len(x.shape[2:]))) # Bxnx...
x = x.reshape(x.shape[0]*n, *x.shape[2:]) # Bnx...
elif type(x) is list or type(x) is tuple:
x = [repeat_tensors(n, _) for _ in x]
return x
def split_tensors(n, x):
if torch.is_tensor(x):
assert x.shape[0] % n == 0
x = x.reshape(x.shape[0] // n, n, *x.shape[1:]).unbind(1)
elif type(x) is list or type(x) is tuple:
x = [split_tensors(n, _) for _ in x]
elif x is None:
x = [None] * n
return x
|
connect-caption-and-trace-main
|
captioning/models/utils.py
|
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from ..utils import misc as utils
from . import utils as model_utils
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobs, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobs = logprobs.clone()
batch_size = beam_seq_table[0].shape[0]
if divm > 0:
change = logprobs.new_zeros(batch_size, logprobs.shape[-1])
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][:, :, local_time] # Nxb
for prev_labels in range(bdash):
change.scatter_add_(1, prev_decisions[:, prev_labels].unsqueeze(-1), change.new_ones(batch_size, 1))
if local_time == 0:
logprobs = logprobs - change * diversity_lambda
else:
logprobs = logprobs - self.repeat_tensor(bdash, change) * diversity_lambda
return logprobs, unaug_logprobs
# does one step of classical beam search
def beam_step(logprobs, unaug_logprobs, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobs: probabilities augmented after diversity N*bxV
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions Nxbxl
#beam_seq_logprobs : log-probability of each decision made, NxbxlxV
#beam_logprobs_sum : joint log-probability of each beam Nxb
batch_size = beam_logprobs_sum.shape[0]
vocab_size = logprobs.shape[-1]
logprobs = logprobs.reshape(batch_size, -1, vocab_size) # NxbxV
if t == 0:
assert logprobs.shape[1] == 1
beam_logprobs_sum = beam_logprobs_sum[:, :1]
candidate_logprobs = beam_logprobs_sum.unsqueeze(-1) + logprobs # beam_logprobs_sum Nxb logprobs is NxbxV
ys, ix = torch.sort(candidate_logprobs.reshape(candidate_logprobs.shape[0], -1), -1, True)
ys, ix = ys[:,:beam_size], ix[:,:beam_size]
beam_ix = ix // vocab_size # Nxb which beam
selected_ix = ix % vocab_size # Nxb # which world
state_ix = (beam_ix + torch.arange(batch_size).type_as(beam_ix).unsqueeze(-1) * logprobs.shape[1]).reshape(-1) # N*b which in Nxb beams
if t > 0:
# gather according to beam_ix
assert (beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq)) == beam_seq.reshape(-1, beam_seq.shape[-1])[state_ix].view_as(beam_seq)).all()
beam_seq = beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq))
beam_seq_logprobs = beam_seq_logprobs.gather(1, beam_ix.unsqueeze(-1).unsqueeze(-1).expand_as(beam_seq_logprobs))
beam_seq = torch.cat([beam_seq, selected_ix.unsqueeze(-1)], -1) # beam_seq Nxbxl
beam_logprobs_sum = beam_logprobs_sum.gather(1, beam_ix) + \
logprobs.reshape(batch_size, -1).gather(1, ix)
assert (beam_logprobs_sum == ys).all()
_tmp_beam_logprobs = unaug_logprobs[state_ix].reshape(batch_size, -1, vocab_size)
beam_logprobs = unaug_logprobs.reshape(batch_size, -1, vocab_size).gather(1, beam_ix.unsqueeze(-1).expand(-1, -1, vocab_size)) # NxbxV
assert (_tmp_beam_logprobs == beam_logprobs).all()
beam_seq_logprobs = torch.cat([
beam_seq_logprobs,
beam_logprobs.reshape(batch_size, -1, 1, vocab_size)], 2)
new_state = [None for _ in state]
for _ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[_ix] = state[_ix][:, state_ix]
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
batch_size = init_logprobs.shape[0]
device = init_logprobs.device
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(batch_size, bdash, 0).to(device) for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(batch_size, bdash, 0, self.vocab_size + 1).to(device) for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(batch_size, bdash).to(device) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[[] for __ in range(group_size)] for _ in range(batch_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
# state_table = list(zip(*[_.reshape(-1, batch_size * bdash, group_size, *_.shape[2:]).chunk(group_size, 2) for _ in init_state]))
state_table = [[_.clone() for _ in init_state] for _ in range(group_size)]
# logprobs_table = list(init_logprobs.reshape(batch_size * bdash, group_size, -1).chunk(group_size, 0))
logprobs_table = [init_logprobs.clone() for _ in range(group_size)]
# END INIT
# Chunk elements in the args
args = list(args)
args = model_utils.split_tensors(group_size, args) # For each arg, turn (Bbg)x... to (Bb)x(g)x...
if self.__class__.__name__ == 'AttEnsemble':
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobs = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobs.scatter_(1, beam_seq_table[divm][:, :, t-divm-1].reshape(-1, 1).to(device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobs[torch.from_numpy(np.isin(beam_seq_table[divm][:, :, t-divm-1].cpu().numpy(), self.bad_endings_ix)).reshape(-1), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobs.size(1)-1)] == 'UNK':
logprobs[:,logprobs.size(1)-1] = logprobs[:, logprobs.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobs values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
logprobs, unaug_logprobs = add_diversity(beam_seq_table,logprobs,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm] = beam_step(logprobs,
unaug_logprobs,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for b in range(batch_size):
is_end = beam_seq_table[divm][b, :, t-divm] == self.eos_idx
assert beam_seq_table[divm].shape[-1] == t-divm+1
if t == self.seq_length + divm - 1:
is_end.fill_(1)
for vix in range(bdash):
if is_end[vix]:
final_beam = {
'seq': beam_seq_table[divm][b, vix].clone(),
'logps': beam_seq_logprobs_table[divm][b, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][b, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][b, vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[b][divm].append(final_beam)
beam_logprobs_sum_table[divm][b, is_end] -= 1000
# move the current group one step forward in time
it = beam_seq_table[divm][:, :, t-divm].reshape(-1).to(logprobs.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [[sorted(done_beams_table[b][i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)] for b in range(batch_size)]
done_beams = [sum(_, []) for _ in done_beams_table]
return done_beams
def old_beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
# local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':unaug_logprobsf[q]})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash, self.vocab_size + 1).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[] for _ in range(group_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
state_table = list(zip(*[_.chunk(group_size, 1) for _ in init_state]))
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
if self.__class__.__name__ == 'AttEnsemble':
args = [[_.chunk(group_size) if _ is not None else [None]*group_size for _ in args_] for args_ in args] # arg_name, model_name, group_name
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(logprobsf.device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobsf[torch.from_numpy(np.isin(beam_seq_table[divm][t-divm-1].cpu().numpy(), self.bad_endings_ix)), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobsf.size(1)-1)] == 'UNK':
logprobsf[:,logprobsf.size(1)-1] = logprobsf[:, logprobsf.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == self.eos_idx or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm].to(logprobsf.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = sum(done_beams_table, [])
return done_beams
def sample_next_word(self, logprobs, sample_method, temperature):
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
elif sample_method == 'gumbel': # gumbel softmax
# ref: https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(logprobs.device)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
y = logits + sample_gumbel(logits.size())
return F.log_softmax(y / temperature, dim=-1)
_logprobs = gumbel_softmax_sample(logprobs, temperature)
_, it = torch.max(_logprobs.data, 1)
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
else:
logprobs = logprobs / temperature
if sample_method.startswith('top'): # topk sampling
top_num = float(sample_method[3:])
if 0 < top_num < 1:
# nucleus sampling from # The Curious Case of Neural Text Degeneration
probs = F.softmax(logprobs, dim=1)
sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
_cumsum = sorted_probs.cumsum(1)
mask = _cumsum < top_num
mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
sorted_probs = sorted_probs * mask.to(sorted_probs)
sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
logprobs.scatter_(1, sorted_indices, sorted_probs.log())
else:
the_k = int(top_num)
tmp = torch.empty_like(logprobs).fill_(float('-inf'))
topk, indices = torch.topk(logprobs, the_k, dim=1)
tmp = tmp.scatter(1, indices, topk)
logprobs = tmp
it = torch.distributions.Categorical(logits=logprobs.detach()).sample()
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
return it, sampleLogprobs
def decode_sequence(self, seq):
return utils.decode_sequence(self.vocab, seq)
|
connect-caption-and-trace-main
|
captioning/models/CaptionModel_orig.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_caption_generation.py
|
# Implementation for paper 'Attention on Attention for Image Captioning'
# https://arxiv.org/abs/1908.06954
# RT: Code from original author's repo: https://github.com/husthuaan/AoANet/
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from .AttModel import pack_wrapper, AttModel, Attention
from .TransformerModel import LayerNorm, attention, clones, SublayerConnection, PositionwiseFeedForward
class MultiHeadedDotAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1, scale=1, project_k_v=1, use_output_layer=1, do_aoa=0, norm_q=0, dropout_aoa=0.3):
super(MultiHeadedDotAttention, self).__init__()
assert d_model * scale % h == 0
# We assume d_v always equals d_k
self.d_k = d_model * scale // h
self.h = h
# Do we need to do linear projections on K and V?
self.project_k_v = project_k_v
# normalize the query?
if norm_q:
self.norm = LayerNorm(d_model)
else:
self.norm = lambda x:x
self.linears = clones(nn.Linear(d_model, d_model * scale), 1 + 2 * project_k_v)
# output linear layer after the multi-head attention?
self.output_layer = nn.Linear(d_model * scale, d_model)
# apply aoa after attention?
self.use_aoa = do_aoa
if self.use_aoa:
self.aoa_layer = nn.Sequential(nn.Linear((1 + scale) * d_model, 2 * d_model), nn.GLU())
# dropout to the input of AoA layer
if dropout_aoa > 0:
self.dropout_aoa = nn.Dropout(p=dropout_aoa)
else:
self.dropout_aoa = lambda x:x
if self.use_aoa or not use_output_layer:
# AoA doesn't need the output linear layer
del self.output_layer
self.output_layer = lambda x:x
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, value, key, mask=None):
if mask is not None:
if len(mask.size()) == 2:
mask = mask.unsqueeze(-2)
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
single_query = 0
if len(query.size()) == 2:
single_query = 1
query = query.unsqueeze(1)
nbatches = query.size(0)
query = self.norm(query)
# Do all the linear projections in batch from d_model => h x d_k
if self.project_k_v == 0:
query_ = self.linears[0](query).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
key_ = key.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
value_ = value.view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
else:
query_, key_, value_ = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# Apply attention on all the projected vectors in batch.
x, self.attn = attention(query_, key_, value_, mask=mask,
dropout=self.dropout)
# "Concat" using a view
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
if self.use_aoa:
# Apply AoA
x = self.aoa_layer(self.dropout_aoa(torch.cat([x, query], -1)))
x = self.output_layer(x)
if single_query:
query = query.squeeze(1)
x = x.squeeze(1)
return x
class AoA_Refiner_Layer(nn.Module):
def __init__(self, size, self_attn, feed_forward, dropout):
super(AoA_Refiner_Layer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.use_ff = 0
if self.feed_forward is not None:
self.use_ff = 1
self.sublayer = clones(SublayerConnection(size, dropout), 1+self.use_ff)
self.size = size
def forward(self, x, mask):
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[-1](x, self.feed_forward) if self.use_ff else x
class AoA_Refiner_Core(nn.Module):
def __init__(self, opt):
super(AoA_Refiner_Core, self).__init__()
attn = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=1, scale=opt.multi_head_scale, do_aoa=opt.refine_aoa, norm_q=0, dropout_aoa=getattr(opt, 'dropout_aoa', 0.3))
layer = AoA_Refiner_Layer(opt.rnn_size, attn, PositionwiseFeedForward(opt.rnn_size, 2048, 0.1) if opt.use_ff else None, 0.1)
self.layers = clones(layer, 6)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class AoA_Decoder_Core(nn.Module):
def __init__(self, opt):
super(AoA_Decoder_Core, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.d_model = opt.rnn_size
self.use_multi_head = opt.use_multi_head
self.multi_head_scale = opt.multi_head_scale
self.use_ctx_drop = getattr(opt, 'ctx_drop', 0)
self.out_res = getattr(opt, 'out_res', 0)
self.decoder_type = getattr(opt, 'decoder_type', 'AoA')
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size, opt.rnn_size) # we, fc, h^2_t-1
self.out_drop = nn.Dropout(self.drop_prob_lm)
if self.decoder_type == 'AoA':
# AoA layer
self.att2ctx = nn.Sequential(nn.Linear(self.d_model * opt.multi_head_scale + opt.rnn_size, 2 * opt.rnn_size), nn.GLU())
elif self.decoder_type == 'LSTM':
# LSTM layer
self.att2ctx = nn.LSTMCell(self.d_model * opt.multi_head_scale + opt.rnn_size, opt.rnn_size)
else:
# Base linear layer
self.att2ctx = nn.Sequential(nn.Linear(self.d_model * opt.multi_head_scale + opt.rnn_size, opt.rnn_size), nn.ReLU())
# if opt.use_multi_head == 1: # TODO, not implemented for now
# self.attention = MultiHeadedAddAttention(opt.num_heads, opt.d_model, scale=opt.multi_head_scale)
if opt.use_multi_head == 2:
self.attention = MultiHeadedDotAttention(opt.num_heads, opt.rnn_size, project_k_v=0, scale=opt.multi_head_scale, use_output_layer=0, do_aoa=0, norm_q=1)
else:
self.attention = Attention(opt)
if self.use_ctx_drop:
self.ctx_drop = nn.Dropout(self.drop_prob_lm)
else:
self.ctx_drop = lambda x :x
def forward(self, xt, mean_feats, att_feats, p_att_feats, state, att_masks=None):
# state[0][1] is the context vector at the last step
h_att, c_att = self.att_lstm(torch.cat([xt, mean_feats + self.ctx_drop(state[0][1])], 1), (state[0][0], state[1][0]))
if self.use_multi_head == 2:
att = self.attention(h_att, p_att_feats.narrow(2, 0, self.multi_head_scale * self.d_model), p_att_feats.narrow(2, self.multi_head_scale * self.d_model, self.multi_head_scale * self.d_model), att_masks)
else:
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
ctx_input = torch.cat([att, h_att], 1)
if self.decoder_type == 'LSTM':
output, c_logic = self.att2ctx(ctx_input, (state[0][1], state[1][1]))
state = (torch.stack((h_att, output)), torch.stack((c_att, c_logic)))
else:
output = self.att2ctx(ctx_input)
# save the context vector to state[0][1]
state = (torch.stack((h_att, output)), torch.stack((c_att, state[1][1])))
if self.out_res:
# add residual connection
output = output + h_att
output = self.out_drop(output)
return output, state
class AoAModel(AttModel):
def __init__(self, opt):
super(AoAModel, self).__init__(opt)
self.num_layers = 2
# mean pooling
self.use_mean_feats = getattr(opt, 'mean_feats', 1)
if opt.use_multi_head == 2:
del self.ctx2att
self.ctx2att = nn.Linear(opt.rnn_size, 2 * opt.multi_head_scale * opt.rnn_size)
if self.use_mean_feats:
del self.fc_embed
if opt.refine:
self.refiner = AoA_Refiner_Core(opt)
else:
self.refiner = lambda x,y : x
self.core = AoA_Decoder_Core(opt)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed att feats
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
att_feats = self.refiner(att_feats, att_masks)
if self.use_mean_feats:
# meaning pooling
if att_masks is None:
mean_feats = torch.mean(att_feats, dim=1)
else:
mean_feats = (torch.sum(att_feats * att_masks.unsqueeze(-1), 1) / torch.sum(att_masks.unsqueeze(-1), 1))
else:
mean_feats = self.fc_embed(fc_feats)
# Project the attention feats first to reduce memory and computation.
p_att_feats = self.ctx2att(att_feats)
return mean_feats, att_feats, p_att_feats, att_masks
|
connect-caption-and-trace-main
|
captioning/models/AoAModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
return att_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, att_masks, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, output_logsoftmax=output_logsoftmax)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_orig.py
|
# This file contains Transformer network
# Most of the code is copied from http://nlp.seas.harvard.edu/2018/04/03/attention.html
# The cfg name correspondance:
# N=num_layers
# d_model=input_encoding_size
# d_ff=rnn_size
# h is always 8
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel_standard_enco_deco_both import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, src_embed, tgt_embed, generator_caption, generator_trace, d_model, dropout):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.src_embed = src_embed
self.tgt_embed = tgt_embed
self.generator_caption = generator_caption
self.generator_trace = generator_trace
# self.decode_layernorm = nn.LayerNorm(d_model, elementwise_affine=True)
# self.dropout = nn.Dropout(dropout)
self.trace_layernorm_caption = nn.LayerNorm(d_model, elementwise_affine=True)
self.trace_layernorm_trace = nn.LayerNorm(d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(d_model,0) # here don't use dropout inside positional embedding
self.trace_embed = nn.Sequential(*(
(nn.Linear(5, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5)) ))
self.trace_feat_embed = nn.Sequential(*(
(nn.Linear(2048, d_model),
nn.LayerNorm(d_model, elementwise_affine=True),
nn.ReLU(),
nn.Dropout(0.5))))
def forward(self, src, tgt, src_mask, tgt_mask, trace_feat, trace_masks, task):
"Take in and process masked src and target sequences."
memory = self.encode(src, src_mask)
return self.decode(memory, src_mask, tgt, tgt_mask, trace_feat, trace_masks, task), memory
def encode(self, src, src_mask):
return self.encoder(self.src_embed(src), src_mask)
def decode(self, memory, src_mask, tgt, tgt_mask, trace_feats, trace_masks, task):
# if task == 'trace':
### get trace_feat
# trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
# trace_grid_feats = self.trace_feat_embed(trace_grid_feats)
trace_feats = self.trace_embed(trace_feats)
trace_feats = self.trace_layernorm_trace(self.position_encoder(trace_feats))
### embed the tgt and then add the trace_grid_feat: add trace_feat in the beginning
tgt_emd = self.tgt_embed(tgt, task) #, task
# if tgt.shape[1] > trace_feats.shape[1]:
# trace_feats = torch.cat([trace_feats, torch.zeros([trace_feats.shape[0], tgt_emd.shape[1]-trace_feats.shape[1],
# trace_feats.shape[2]]).to(trace_feats.device)], 1)
# else:
# trace_feats = trace_feats[:, :tgt_emd.shape[1], :]
# tgt_emd = self.dropout(self.decode_layernorm(tgt_emd + trace_feat))
return self.decoder(tgt_emd, trace_feats, memory, src_mask, tgt_mask, trace_masks, task)
class Generator(nn.Module):
"Define standard linear + softmax generation step."
def __init__(self, d_model, vocab):
super(Generator, self).__init__()
self.proj = nn.Linear(d_model, vocab)
def forward(self, x):
return F.log_softmax(self.proj(x), dim=-1)
def clones(module, N):
"Produce N identical layers."
return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])
class Encoder(nn.Module):
"Core encoder is a stack of N layers"
def __init__(self, layer, N):
super(Encoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
def forward(self, x, mask):
"Pass the input (and mask) through each layer in turn."
for layer in self.layers:
x = layer(x, mask)
return self.norm(x)
class LayerNorm(nn.Module):
"Construct a layernorm module (See citation for details)."
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.a_2 = nn.Parameter(torch.ones(features))
self.b_2 = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.a_2 * (x - mean) / (std + self.eps) + self.b_2
class SublayerConnection(nn.Module):
"""
A residual connection followed by a layer norm.
Note for code simplicity the norm is first as opposed to last.
"""
def __init__(self, size, dropout):
super(SublayerConnection, self).__init__()
self.norm = LayerNorm(size)
self.dropout = nn.Dropout(dropout)
def forward(self, x, sublayer):
"Apply residual connection to any sublayer with the same size."
return x + self.dropout(sublayer(self.norm(x)))
class EncoderLayer(nn.Module):
"Encoder is made up of self-attn and feed forward (defined below)"
def __init__(self, size, self_attn, feed_forward, dropout):
super(EncoderLayer, self).__init__()
self.self_attn = self_attn
self.feed_forward = feed_forward
self.sublayer = clones(SublayerConnection(size, dropout), 2)
self.size = size
def forward(self, x, mask):
"Follow Figure 1 (left) for connections."
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, mask))
return self.sublayer[1](x, self.feed_forward)
class Decoder(nn.Module):
"Generic N layer decoder with masking."
def __init__(self, layer, N):
super(Decoder, self).__init__()
self.layers = clones(layer, N)
self.norm = LayerNorm(layer.size)
self.norm_2 = LayerNorm(layer.size)
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
for layer in self.layers:
x = layer(x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task)
if task == 'both':
return self.norm(x[0]), self.norm_2(x[1])
else:
return self.norm(x)
class DecoderLayer(nn.Module):
"Decoder is made of self-attn, src-attn, and feed forward (defined below)"
def __init__(self, size, self_attn, src_attn, caption_trace_attn, trace_caption_attn, trace_self_attn, trace_src_attn,
feed_forward_caption, feed_forward_trace, both_caption_trace_attn, both_trace_caption_attn,
both_feed_forward_caption, both_feed_forward_trace,dropout):
super(DecoderLayer, self).__init__()
self.size = size
self.self_attn = self_attn
self.src_attn = src_attn
self.feed_forward_caption = feed_forward_caption
self.feed_forward_trace = feed_forward_trace
# self.sublayer = clones(SublayerConnection(size, dropout), 3)
self.sublayer = clones(SublayerConnection(size, dropout), 8+4)
###
self.caption_trace_attn = caption_trace_attn
self.trace_caption_attn = trace_caption_attn
self.trace_self_attn = trace_self_attn
self.trace_src_attn = trace_src_attn
### both attn
self.both_caption_trace_attn = both_caption_trace_attn
self.both_trace_caption_attn = both_trace_caption_attn
self.both_feed_forward_caption = both_feed_forward_caption
self.both_feed_forward_trace = both_feed_forward_trace
###########
def forward(self, x, trace_feat, memory, src_mask, tgt_mask, trace_masks, task):
"Follow Figure 1 (right) for connections."
m = memory
if task == 'trace' or task == 'cycle_trace':
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat, trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_feat = self.sublayer[6](trace_feat, lambda trace_feat: self.trace_caption_attn(trace_feat, x, x, tgt_mask))
################################################
return self.sublayer[7](trace_feat, self.feed_forward_trace)
elif task == 'caption':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
x = self.sublayer[4](x, lambda x: self.caption_trace_attn(x, trace_feat, trace_feat, trace_masks))
################################################
return self.sublayer[5](x, self.feed_forward_caption)
elif task == 'both':
m = memory
x = self.sublayer[0](x, lambda x: self.self_attn(x, x, x, tgt_mask))
x = self.sublayer[1](x, lambda x: self.src_attn(x, m, m, src_mask))
### add an layer for x to attend on trace feature
# trace_mask = tgt_mask[:, -1, :].unsqueeze(1).long()
# trace_masks = trace_masks.unsqueeze(1)
trace_feat = self.sublayer[2](trace_feat,
lambda trace_feat: self.trace_self_attn(trace_feat, trace_feat, trace_feat,
trace_masks))
trace_feat = self.sublayer[3](trace_feat,
lambda trace_feat: self.trace_src_attn(trace_feat, m, m, src_mask))
trace_masks_for_caption = torch.cat([trace_masks,
trace_masks[:, -1, :].unsqueeze(1).repeat(1,tgt_mask.shape[1]-trace_masks.shape[1],1)], 1)
tgt_mask_for_trace = tgt_mask[:, :trace_masks.shape[1], :]
# x_out = self.sublayer[8](x, lambda x: self.both_caption_trace_attn(x, trace_feat, trace_feat, trace_masks_for_caption))
# trace_feat_out = self.sublayer[9](trace_feat,
# lambda trace_feat: self.both_trace_caption_attn(trace_feat, x, x, tgt_mask_for_trace))
return self.sublayer[10](x, self.both_feed_forward_caption), self.sublayer[11](trace_feat, self.both_feed_forward_trace)
def subsequent_mask(size):
"Mask out subsequent positions."
attn_shape = (1, size, size)
subsequent_mask = np.triu(np.ones(attn_shape), k=1).astype('uint8')
return torch.from_numpy(subsequent_mask) == 0
def attention(query, key, value, mask=None, dropout=None):
"Compute 'Scaled Dot Product Attention'"
d_k = query.size(-1)
scores = torch.matmul(query, key.transpose(-2, -1)) \
/ math.sqrt(d_k)
if mask is not None:
scores = scores.masked_fill(mask == 0, float('-inf'))
p_attn = F.softmax(scores, dim = -1)
if dropout is not None:
p_attn = dropout(p_attn)
return torch.matmul(p_attn, value), p_attn
class MultiHeadedAttention(nn.Module):
def __init__(self, h, d_model, dropout=0.1):
"Take in model size and number of heads."
super(MultiHeadedAttention, self).__init__()
assert d_model % h == 0
# We assume d_v always equals d_k
self.d_k = d_model // h
self.h = h
self.linears = clones(nn.Linear(d_model, d_model), 4)
self.attn = None
self.dropout = nn.Dropout(p=dropout)
def forward(self, query, key, value, mask=None):
"Implements Figure 2"
if mask is not None:
# Same mask applied to all h heads.
mask = mask.unsqueeze(1)
nbatches = query.size(0)
# 1) Do all the linear projections in batch from d_model => h x d_k
query, key, value = \
[l(x).view(nbatches, -1, self.h, self.d_k).transpose(1, 2)
for l, x in zip(self.linears, (query, key, value))]
# 2) Apply attention on all the projected vectors in batch.
x, self.attn = attention(query, key, value, mask=mask,
dropout=self.dropout)
# 3) "Concat" using a view and apply a final linear.
x = x.transpose(1, 2).contiguous() \
.view(nbatches, -1, self.h * self.d_k)
return self.linears[-1](x)
class PositionwiseFeedForward(nn.Module):
"Implements FFN equation."
def __init__(self, d_model, d_ff, dropout=0.1):
super(PositionwiseFeedForward, self).__init__()
self.w_1 = nn.Linear(d_model, d_ff)
self.w_2 = nn.Linear(d_ff, d_model)
self.dropout = nn.Dropout(dropout)
def forward(self, x):
return self.w_2(self.dropout(F.relu(self.w_1(x))))
class Embeddings(nn.Module):
def __init__(self, d_model, vocab):
super(Embeddings, self).__init__()
self.lut = nn.Embedding(vocab, d_model)
self.d_model = d_model
self.vocab = vocab
# self.layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
def forward(self, x, task=None):
if task != 'cycle_trace':
return self.lut(x) * math.sqrt(self.d_model)
else:
# # use gumbel softmax with \tau = 1
x = torch.nn.functional.softmax(torch.log(x) -
torch.log(-torch.log(torch.rand([x.shape[2]]))).unsqueeze(0).unsqueeze(0).to(x.device),
dim=-1)
return torch.matmul(x, self.lut(torch.arange(self.vocab).to(x.device))) \
* math.sqrt(self.d_model)
class caption_Embeddings(nn.Module):
def __init__(self, d_model, vocab, position_encoder):
super(caption_Embeddings, self).__init__()
self.position_encoder = position_encoder
self.embed = Embeddings(d_model, vocab)
def forward(self, x, task):
x = self.embed(x, task)
x = self.position_encoder(x)
return x
class PositionalEncoding(nn.Module):
"Implement the PE function."
def __init__(self, d_model, dropout, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
# Compute the positional encodings once in log space.
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len).unsqueeze(1).float()
div_term = torch.exp(torch.arange(0, d_model, 2).float() *
-(math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:, :x.size(1)]
return self.dropout(x)
class TransformerModel(AttModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
c = copy.deepcopy
attn = MultiHeadedAttention(h, d_model, dropout)
ff = PositionwiseFeedForward(d_model, d_ff, dropout)
position = PositionalEncoding(d_model, dropout)
# position_nodropout = PositionalEncoding(d_model, 0)
model = EncoderDecoder(
Encoder(EncoderLayer(d_model, c(attn), c(ff), dropout), N_enc),
Decoder(DecoderLayer(d_model, c(attn), c(attn), c(attn), c(attn), c(attn), c(attn),
c(ff), c(ff), c(attn), c(attn), c(ff), c(ff), dropout), N_dec),
lambda x:x, # nn.Sequential(Embeddings(d_model, src_vocab), c(position)),
caption_Embeddings(d_model, tgt_vocab, c(position)), #nn.Sequential(Embeddings(d_model, tgt_vocab), c(position)), #
Generator(d_model, tgt_vocab), nn.Sequential(nn.Linear(d_model, 5), nn.Sigmoid()),
d_model,dropout)
# This was important from their code.
# Initialize parameters with Glorot / fan_avg.
for p in model.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
return model
def __init__(self, opt):
super(TransformerModel, self).__init__(opt)
self.opt = opt
# self.config = yaml.load(open(opt.config_file))
self.N_enc = getattr(opt, 'N_enc', opt.num_layers)
self.N_dec = getattr(opt, 'N_dec', opt.num_layers)
self.d_model = getattr(opt, 'd_model', opt.input_encoding_size)
self.d_ff = getattr(opt, 'd_ff', opt.rnn_size)
self.h = getattr(opt, 'num_att_heads', 8)
self.dropout = getattr(opt, 'dropout', 0.1)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
delattr(self, 'att_embed')
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
# define trace embedding and layernorm
# self.trace_embed = nn.Linear(5, self.d_model)
self.box_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.trace_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ())+
(nn.Linear(5, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.d_model),) if self.use_bn==2 else ())))
self.trace_feat_embed = nn.Sequential(*(
((nn.BatchNorm1d(5),) if self.use_bn else ()) +
(nn.Linear(2048, self.d_model),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm)) +
((nn.BatchNorm1d(self.d_model),) if self.use_bn == 2 else ())))
self.box_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.box_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm1 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm2 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm3 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.trace_layernorm4 = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.att_layernorm = nn.LayerNorm(self.d_model, elementwise_affine=True)
self.position_encoder = PositionalEncoding(self.d_model, self.dropout)
delattr(self, 'embed')
self.embed = lambda x : x
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
delattr(self, 'logit')
del self.ctx2att
tgt_vocab = self.vocab_size + 1
print(self.N_enc, self.N_dec, self.d_model, self.d_ff, self.h, self.dropout)
self.model = self.make_model(0, tgt_vocab,
N_enc=self.N_enc,
N_dec=self.N_dec,
d_model=self.d_model,
d_ff=self.d_ff,
h=self.h,
dropout=self.dropout)
def logit(self, x): # unsafe way
return self.model.generator_caption.proj(x)
def init_hidden(self, bsz):
return []
def _prepare_feature(self, fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks):
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
if self.opt.use_trace_feat:
trace_grid_feats = trace_feats[:, :, 5:]
trace_feats = trace_feats[:, :, :5]
trace_grid_feats = self.trace_layernorm3(self.trace_feat_embed(trace_grid_feats))
# trace_grid_feats = self.position_encoder(trace_grid_feats)
# trace_grid_feats = self.trace_layernorm4(trace_grid_feats)
trace_feats = self.trace_layernorm1(self.trace_embed(trace_feats))
if self.opt.use_trace_feat:
trace_feats = trace_feats + trace_grid_feats
# trace_feats_to_decoder = trace_feats
trace_feats = self.position_encoder(trace_feats) # add positional embedding
trace_feats = self.trace_layernorm2(trace_feats)
### comment to test: trace feat not from encoder, only from decoder
# att_feats = torch.cat([att_feats, trace_feats], 1) # concat with trace feats
# att_masks = torch.cat([att_masks, trace_masks.unsqueeze(1)], 2)
###########################
memory = self.model.encode(att_feats, att_masks)
return fc_feats[...,:0], att_feats[...,:0], memory, att_masks, trace_feats_to_decoder
def _prepare_feature_forward(self, att_feats, box_feats, att_masks=None, seq=None):
# comment for classification
# att_feats, box_feats, att_masks = self.clip_att(att_feats, box_feats, att_masks)
# original version by ruotian
# att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# my version: without pack and pad
att_feats = self.att_embed(att_feats)
if att_masks is None:
att_masks = att_feats.new_ones(att_feats.shape[:2], dtype=torch.long)
att_masks = att_masks.unsqueeze(-2)
if seq is not None:
# crop the last one
# seq = seq[:,:-1]
seq_mask = (seq.data != self.eos_idx) & (seq.data != self.pad_idx)
seq_mask[:,0] = 1 # bos
seq_mask = seq_mask.unsqueeze(-2)
seq_mask = seq_mask & subsequent_mask(seq.size(-1)).to(seq_mask)
seq_per_img = seq.shape[0] // att_feats.shape[0]
if seq_per_img > 1:
att_feats, att_masks = utils.repeat_tensors(seq_per_img,
[att_feats, att_masks]
)
else:
seq_mask = None
return att_feats, box_feats, seq, att_masks, seq_mask
def _forward(self, fc_feats, att_feats, trace_feats, box_feats, seq, att_masks=None, trace_masks=None, task = None):
assert task == 'trace' or task == 'caption' or task == 'both' or task == 'cycle_trace'
if task != 'cycle_trace':
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
if task == 'both':
### get the original caption input
tmp_seq = seq[:, :trace_masks.shape[1]]
_, _, _, _, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
## prepare the shifted trace
shifted_trace = torch.cat(
[torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(shifted_trace.device).unsqueeze(0).unsqueeze(1)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
(out_caption, out_trace), memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask,
task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs_caption = self.model.generator_caption(out_caption)
outputs_trace = self.model.generator_trace(out_trace)
return outputs_caption, outputs_trace
elif task == 'trace' or task == 'cycle_trace':
# for classification
trace_feats = trace_feats[:, :, :5]
### get the original caption input
tmp_seq = torch.ones([trace_masks.shape[0], trace_masks.shape[1]]).to(trace_masks.device) # seq[:, :trace_masks.shape[1]]
seq = seq[:, 1:trace_masks.shape[1]+1] # crop the seq to real length
seq_mask = trace_masks.unsqueeze(1)
att_feats, box_feats, tmp_seq, att_masks, tmp_seq_mask = self._prepare_feature_forward(att_feats, box_feats, att_masks, tmp_seq)
## prepare the shifted trace
shifted_trace = torch.cat([torch.zeros(trace_feats.shape[0], 1, trace_feats.shape[2]).to(trace_feats.device), trace_feats], 1)
shifted_trace = shifted_trace[:, :-1, :] # ignore the last segment in shifted trace
shifted_trace_mask = tmp_seq_mask
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
# randomly mask part of trace_feats
if self.training:
random_mask_rate = 0
else:
random_mask_rate = 0
random_mask = (torch.rand(
[shifted_trace.shape[0], shifted_trace.shape[1]]) > random_mask_rate).float().unsqueeze(2).to(
shifted_trace.device)
# if torch.rand(1) > 0.5: # half [0,0,1,1,1], half random
shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
(1 - random_mask) * torch.tensor([0., 0., 1., 1., 1.]).to(
shifted_trace.device).unsqueeze(0).unsqueeze(1)
# else:
# tmp_1 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp_2 = torch.rand([shifted_trace.shape[0], shifted_trace.shape[1], 2]).sort(dim=2)[0]
# tmp = torch.stack([tmp_1[:, :, 0], tmp_2[:, :, 0], tmp_1[:, :, 1], tmp_2[:, :, 1],
# (tmp_1[:, :, 1] - tmp_1[:, :, 0]) * (tmp_2[:, :, 1] - tmp_2[:, :, 0])], 2)
# shifted_trace[:, :, :5] = shifted_trace[:, :, :5] * random_mask + \
# (1 - random_mask) * tmp.to(shifted_trace.device)
# out = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks)
out, memory = self.model(att_feats, seq, att_masks, seq_mask, shifted_trace, shifted_trace_mask, task) # trace generation
# for regression, use generator which is a linear layer and sigmoid
outputs = self.model.generator_trace(out)
# for classification, use (masked) dot product to provide logits
# out = out / torch.norm(out, dim=2).unsqueeze(2)
# memory = memory / torch.norm(memory, dim=2).unsqueeze(2)
# outputs = torch.matmul(out, memory.transpose(1,2))
# memory_mask = att_masks
# outputs = outputs.masked_fill(memory_mask == 0, float('-inf'))
#
# outputs = F.softmax(outputs, dim=-1)
# outputs = (outputs.unsqueeze(3) * box_feats.unsqueeze(1)).sum(dim=2)
# print('transformer_out',outputs.argmax(dim=-1)[0])
return outputs
elif task == 'caption':
att_feats, box_feats, seq, att_masks, seq_mask = self._prepare_feature_forward(att_feats, box_feats,
att_masks, seq)
# Localized Narratives: insert trace features into att_feats, trace_masks into att_masks
att_feats = self.att_layernorm(att_feats) # normalize att feat
if self.opt.use_box:
box_feats = self.box_layernorm1(self.box_embed(box_feats))
att_feats = self.box_layernorm2(att_feats + box_feats)
if self.opt.use_trace:
trace_feats_to_decoder = trace_feats
out, _ = self.model(att_feats, seq, att_masks, seq_mask, trace_feats_to_decoder, trace_masks, task)
outputs = self.model.generator_caption(out)
return outputs
# return torch.cat([_.unsqueeze(1) for _ in outputs], 1)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask, trace_feats_to_decoder, trace_masks, task):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
if task == 'caption':
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, trace_masks, 'caption')
return out[:, -1], [ys.unsqueeze(0)]
elif task == 'both':
out_caption, out_trace = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1)).to(memory.device),
trace_feats_to_decoder, subsequent_mask(ys.size(1)).to(memory.device), 'both')
return out_caption[:, -1], [ys.unsqueeze(0)], out_trace
|
connect-caption-and-trace-main
|
captioning/models/TransformerModel_standard_enco_deco_both.py
|
# This file contains ShowAttendTell and AllImg model
# ShowAttendTell is from Show, Attend and Tell: Neural Image Caption Generation with Visual Attention
# https://arxiv.org/abs/1502.03044
# AllImg is a model where
# img feature is concatenated with word embedding at every time step as the input of lstm
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from ..utils import misc as utils
from . import utils as model_utils
class CaptionModel(nn.Module):
def __init__(self):
super(CaptionModel, self).__init__()
# implements beam search
# calls beam_step and returns the final set of beams
# augments log-probabilities with diversity terms when number of groups > 1
def forward(self, *args, **kwargs):
mode = kwargs.get('mode', 'forward')
if 'mode' in kwargs:
del kwargs['mode']
return getattr(self, '_'+mode)(*args, **kwargs)
def beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobs, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobs = logprobs.clone()
batch_size = beam_seq_table[0].shape[0]
if divm > 0:
change = logprobs.new_zeros(batch_size, logprobs.shape[-1])
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][:, :, local_time] # Nxb
for prev_labels in range(bdash):
change.scatter_add_(1, prev_decisions[:, prev_labels].unsqueeze(-1), change.new_ones(batch_size, 1))
if local_time == 0:
logprobs = logprobs - change * diversity_lambda
else:
logprobs = logprobs - self.repeat_tensor(bdash, change) * diversity_lambda
return logprobs, unaug_logprobs
# does one step of classical beam search
def beam_step(logprobs, unaug_logprobs, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobs: probabilities augmented after diversity N*bxV
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions Nxbxl
#beam_seq_logprobs : log-probability of each decision made, NxbxlxV
#beam_logprobs_sum : joint log-probability of each beam Nxb
batch_size = beam_logprobs_sum.shape[0]
vocab_size = logprobs.shape[-1]
logprobs = logprobs.reshape(batch_size, -1, vocab_size) # NxbxV
if t == 0:
assert logprobs.shape[1] == 1
beam_logprobs_sum = beam_logprobs_sum[:, :1]
candidate_logprobs = beam_logprobs_sum.unsqueeze(-1) + logprobs # beam_logprobs_sum Nxb logprobs is NxbxV
ys, ix = torch.sort(candidate_logprobs.reshape(candidate_logprobs.shape[0], -1), -1, True)
ys, ix = ys[:,:beam_size], ix[:,:beam_size]
beam_ix = ix // vocab_size # Nxb which beam
selected_ix = ix % vocab_size # Nxb # which world
state_ix = (beam_ix + torch.arange(batch_size).type_as(beam_ix).unsqueeze(-1) * logprobs.shape[1]).reshape(-1) # N*b which in Nxb beams
if t > 0:
# gather according to beam_ix
assert (beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq)) == beam_seq.reshape(-1, beam_seq.shape[-1])[state_ix].view_as(beam_seq)).all()
beam_seq = beam_seq.gather(1, beam_ix.unsqueeze(-1).expand_as(beam_seq))
beam_seq_logprobs = beam_seq_logprobs.gather(1, beam_ix.unsqueeze(-1).unsqueeze(-1).expand_as(beam_seq_logprobs))
beam_seq = torch.cat([beam_seq, selected_ix.unsqueeze(-1)], -1) # beam_seq Nxbxl
beam_logprobs_sum = beam_logprobs_sum.gather(1, beam_ix) + \
logprobs.reshape(batch_size, -1).gather(1, ix)
assert (beam_logprobs_sum == ys).all()
_tmp_beam_logprobs = unaug_logprobs[state_ix].reshape(batch_size, -1, vocab_size)
beam_logprobs = unaug_logprobs.reshape(batch_size, -1, vocab_size).gather(1, beam_ix.unsqueeze(-1).expand(-1, -1, vocab_size)) # NxbxV
assert (_tmp_beam_logprobs == beam_logprobs).all()
beam_seq_logprobs = torch.cat([
beam_seq_logprobs,
beam_logprobs.reshape(batch_size, -1, 1, vocab_size)], 2)
new_state = [None for _ in state]
for _ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[_ix] = state[_ix][:, state_ix]
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
batch_size = init_logprobs.shape[0]
device = init_logprobs.device
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(batch_size, bdash, 0).to(device) for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(batch_size, bdash, 0, self.vocab_size + 1).to(device) for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(batch_size, bdash).to(device) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[[] for __ in range(group_size)] for _ in range(batch_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
# state_table = list(zip(*[_.reshape(-1, batch_size * bdash, group_size, *_.shape[2:]).chunk(group_size, 2) for _ in init_state]))
state_table = [[_.clone() for _ in init_state] for _ in range(group_size)]
# logprobs_table = list(init_logprobs.reshape(batch_size * bdash, group_size, -1).chunk(group_size, 0))
logprobs_table = [init_logprobs.clone() for _ in range(group_size)]
# END INIT
# Chunk elements in the args
args = list(args)
args = model_utils.split_tensors(group_size, args) # For each arg, turn (Bbg)x... to (Bb)x(g)x...
if self.__class__.__name__ == 'AttEnsemble':
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [[args[i][j] for i in range(len(args)-1)]+[args[-1]] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobs = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobs.scatter_(1, beam_seq_table[divm][:, :, t-divm-1].reshape(-1, 1).to(device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobs[torch.from_numpy(np.isin(beam_seq_table[divm][:, :, t-divm-1].cpu().numpy(), self.bad_endings_ix)).reshape(-1), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobs.size(1)-1)] == 'UNK':
logprobs[:,logprobs.size(1)-1] = logprobs[:, logprobs.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobs values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
logprobs, unaug_logprobs = add_diversity(beam_seq_table,logprobs,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm] = beam_step(logprobs,
unaug_logprobs,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for b in range(batch_size):
is_end = beam_seq_table[divm][b, :, t-divm] == self.eos_idx
assert beam_seq_table[divm].shape[-1] == t-divm+1
if t == self.seq_length + divm - 1:
is_end.fill_(1)
for vix in range(bdash):
if is_end[vix]:
final_beam = {
'seq': beam_seq_table[divm][b, vix].clone(),
'logps': beam_seq_logprobs_table[divm][b, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][b, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][b, vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[b][divm].append(final_beam)
beam_logprobs_sum_table[divm][b, is_end] -= 1000
# move the current group one step forward in time
it = beam_seq_table[divm][:, :, t-divm].reshape(-1).to(logprobs.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [[sorted(done_beams_table[b][i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)] for b in range(batch_size)]
done_beams = [sum(_, []) for _ in done_beams_table]
return done_beams
def old_beam_search(self, init_state, init_logprobs, *args, **kwargs):
# function computes the similarity score to be augmented
def add_diversity(beam_seq_table, logprobsf, t, divm, diversity_lambda, bdash):
local_time = t - divm
unaug_logprobsf = logprobsf.clone()
for prev_choice in range(divm):
prev_decisions = beam_seq_table[prev_choice][local_time]
for sub_beam in range(bdash):
for prev_labels in range(bdash):
logprobsf[sub_beam][prev_decisions[prev_labels]] = logprobsf[sub_beam][prev_decisions[prev_labels]] - diversity_lambda
return unaug_logprobsf
# does one step of classical beam search
def beam_step(logprobsf, unaug_logprobsf, beam_size, t, beam_seq, beam_seq_logprobs, beam_logprobs_sum, state):
#INPUTS:
#logprobsf: probabilities augmented after diversity
#beam_size: obvious
#t : time instant
#beam_seq : tensor contanining the beams
#beam_seq_logprobs: tensor contanining the beam logprobs
#beam_logprobs_sum: tensor contanining joint logprobs
#OUPUTS:
#beam_seq : tensor containing the word indices of the decoded captions
#beam_seq_logprobs : log-probability of each decision made, same size as beam_seq
#beam_logprobs_sum : joint log-probability of each beam
ys,ix = torch.sort(logprobsf,1,True)
candidates = []
cols = min(beam_size, ys.size(1))
rows = beam_size
if t == 0:
rows = 1
for c in range(cols): # for each column (word, essentially)
for q in range(rows): # for each beam expansion
#compute logprob of expanding beam q with word in (sorted) position c
local_logprob = ys[q,c].item()
candidate_logprob = beam_logprobs_sum[q] + local_logprob
# local_unaug_logprob = unaug_logprobsf[q,ix[q,c]]
candidates.append({'c':ix[q,c], 'q':q, 'p':candidate_logprob, 'r':unaug_logprobsf[q]})
candidates = sorted(candidates, key=lambda x: -x['p'])
new_state = [_.clone() for _ in state]
#beam_seq_prev, beam_seq_logprobs_prev
if t >= 1:
#we''ll need these as reference when we fork beams around
beam_seq_prev = beam_seq[:t].clone()
beam_seq_logprobs_prev = beam_seq_logprobs[:t].clone()
for vix in range(beam_size):
v = candidates[vix]
#fork beam index q into index vix
if t >= 1:
beam_seq[:t, vix] = beam_seq_prev[:, v['q']]
beam_seq_logprobs[:t, vix] = beam_seq_logprobs_prev[:, v['q']]
#rearrange recurrent states
for state_ix in range(len(new_state)):
# copy over state in previous beam q to new beam at vix
new_state[state_ix][:, vix] = state[state_ix][:, v['q']] # dimension one is time step
#append new end terminal at the end of this beam
beam_seq[t, vix] = v['c'] # c'th word is the continuation
beam_seq_logprobs[t, vix] = v['r'] # the raw logprob here
beam_logprobs_sum[vix] = v['p'] # the new (sum) logprob along this beam
state = new_state
return beam_seq,beam_seq_logprobs,beam_logprobs_sum,state,candidates
# Start diverse_beam_search
opt = kwargs['opt']
temperature = opt.get('temperature', 1) # This should not affect beam search, but will affect dbs
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
suppress_UNK = opt.get('suppress_UNK', 0)
length_penalty = utils.penalty_builder(opt.get('length_penalty', ''))
bdash = beam_size // group_size # beam per group
# INITIALIZATIONS
beam_seq_table = [torch.LongTensor(self.seq_length, bdash).zero_() for _ in range(group_size)]
beam_seq_logprobs_table = [torch.FloatTensor(self.seq_length, bdash, self.vocab_size + 1).zero_() for _ in range(group_size)]
beam_logprobs_sum_table = [torch.zeros(bdash) for _ in range(group_size)]
# logprobs # logprobs predicted in last time step, shape (beam_size, vocab_size+1)
done_beams_table = [[] for _ in range(group_size)]
# state_table = [list(torch.unbind(_)) for _ in torch.stack(init_state).chunk(group_size, 2)]
state_table = list(zip(*[_.chunk(group_size, 1) for _ in init_state]))
logprobs_table = list(init_logprobs.chunk(group_size, 0))
# END INIT
# Chunk elements in the args
args = list(args)
if self.__class__.__name__ == 'AttEnsemble':
args = [[_.chunk(group_size) if _ is not None else [None]*group_size for _ in args_] for args_ in args] # arg_name, model_name, group_name
args = [[[args[j][i][k] for i in range(len(self.models))] for j in range(len(args))] for k in range(group_size)] # group_name, arg_name, model_name
else:
args = [_.chunk(group_size) if _ is not None else [None]*group_size for _ in args]
args = [[args[i][j] for i in range(len(args))] for j in range(group_size)]
for t in range(self.seq_length + group_size - 1):
for divm in range(group_size):
if t >= divm and t <= self.seq_length + divm - 1:
# add diversity
logprobsf = logprobs_table[divm]
# suppress previous word
if decoding_constraint and t-divm > 0:
logprobsf.scatter_(1, beam_seq_table[divm][t-divm-1].unsqueeze(1).to(logprobsf.device), float('-inf'))
if remove_bad_endings and t-divm > 0:
logprobsf[torch.from_numpy(np.isin(beam_seq_table[divm][t-divm-1].cpu().numpy(), self.bad_endings_ix)), 0] = float('-inf')
# suppress UNK tokens in the decoding
if suppress_UNK and hasattr(self, 'vocab') and self.vocab[str(logprobsf.size(1)-1)] == 'UNK':
logprobsf[:,logprobsf.size(1)-1] = logprobsf[:, logprobsf.size(1)-1] - 1000
# diversity is added here
# the function directly modifies the logprobsf values and hence, we need to return
# the unaugmented ones for sorting the candidates in the end. # for historical
# reasons :-)
unaug_logprobsf = add_diversity(beam_seq_table,logprobsf,t,divm,diversity_lambda,bdash)
# infer new beams
beam_seq_table[divm],\
beam_seq_logprobs_table[divm],\
beam_logprobs_sum_table[divm],\
state_table[divm],\
candidates_divm = beam_step(logprobsf,
unaug_logprobsf,
bdash,
t-divm,
beam_seq_table[divm],
beam_seq_logprobs_table[divm],
beam_logprobs_sum_table[divm],
state_table[divm])
# if time's up... or if end token is reached then copy beams
for vix in range(bdash):
if beam_seq_table[divm][t-divm,vix] == self.eos_idx or t == self.seq_length + divm - 1:
final_beam = {
'seq': beam_seq_table[divm][:, vix].clone(),
'logps': beam_seq_logprobs_table[divm][:, vix].clone(),
'unaug_p': beam_seq_logprobs_table[divm][:, vix].sum().item(),
'p': beam_logprobs_sum_table[divm][vix].item()
}
final_beam['p'] = length_penalty(t-divm+1, final_beam['p'])
done_beams_table[divm].append(final_beam)
# don't continue beams from finished sequences
beam_logprobs_sum_table[divm][vix] = -1000
# move the current group one step forward in time
it = beam_seq_table[divm][t-divm].to(logprobsf.device)
logprobs_table[divm], state_table[divm] = self.get_logprobs_state(it, *(args[divm] + [state_table[divm]]))
logprobs_table[divm] = F.log_softmax(logprobs_table[divm] / temperature, dim=-1)
# all beams are sorted by their log-probabilities
done_beams_table = [sorted(done_beams_table[i], key=lambda x: -x['p'])[:bdash] for i in range(group_size)]
done_beams = sum(done_beams_table, [])
return done_beams
def sample_next_word(self, logprobs, sample_method, temperature):
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
elif sample_method == 'gumbel': # gumbel softmax
# ref: https://gist.github.com/yzh119/fd2146d2aeb329d067568a493b20172f
def sample_gumbel(shape, eps=1e-20):
U = torch.rand(shape).to(logprobs.device)
return -torch.log(-torch.log(U + eps) + eps)
def gumbel_softmax_sample(logits, temperature):
y = logits + sample_gumbel(logits.size())
return F.log_softmax(y / temperature, dim=-1)
_logprobs = gumbel_softmax_sample(logprobs, temperature)
_, it = torch.max(_logprobs.data, 1)
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
else:
logprobs = logprobs / temperature
if sample_method.startswith('top'): # topk sampling
top_num = float(sample_method[3:])
if 0 < top_num < 1:
# nucleus sampling from # The Curious Case of Neural Text Degeneration
probs = F.softmax(logprobs, dim=1)
sorted_probs, sorted_indices = torch.sort(probs, descending=True, dim=1)
_cumsum = sorted_probs.cumsum(1)
mask = _cumsum < top_num
mask = torch.cat([torch.ones_like(mask[:,:1]), mask[:,:-1]], 1)
sorted_probs = sorted_probs * mask.to(sorted_probs)
sorted_probs = sorted_probs / sorted_probs.sum(1, keepdim=True)
logprobs.scatter_(1, sorted_indices, sorted_probs.log())
else:
the_k = int(top_num)
tmp = torch.empty_like(logprobs).fill_(float('-inf'))
topk, indices = torch.topk(logprobs, the_k, dim=1)
tmp = tmp.scatter(1, indices, topk)
logprobs = tmp
it = torch.distributions.Categorical(logits=logprobs.detach()).sample()
sampleLogprobs = logprobs.gather(1, it.unsqueeze(1)) # gather the logprobs at sampled positions
return it, sampleLogprobs
def decode_sequence(self, seq):
return utils.decode_sequence(self.vocab, seq)
|
connect-caption-and-trace-main
|
captioning/models/CaptionModel.py
|
# This file is the implementation for ensemble evaluation.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from .CaptionModel import CaptionModel
from .AttModel import pack_wrapper, AttModel
class AttEnsemble(AttModel):
def __init__(self, models, weights=None):
CaptionModel.__init__(self)
# super(AttEnsemble, self).__init__()
self.models = nn.ModuleList(models)
self.vocab_size = models[0].vocab_size
self.seq_length = models[0].seq_length
self.bad_endings_ix = models[0].bad_endings_ix
self.ss_prob = 0
weights = weights or [1.0] * len(self.models)
self.register_buffer('weights', torch.tensor(weights))
def init_hidden(self, batch_size):
state = [m.init_hidden(batch_size) for m in self.models]
return self.pack_state(state)
def pack_state(self, state):
self.state_lengths = [len(_) for _ in state]
return sum([list(_) for _ in state], [])
def unpack_state(self, state):
out = []
for l in self.state_lengths:
out.append(state[:l])
state = state[l:]
return out
def embed(self, it):
return [m.embed(it) for m in self.models]
def core(self, *args):
return zip(*[m.core(*_) for m, _ in zip(self.models, zip(*args))])
def get_logprobs_state(self, it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
state = self.unpack_state(state)
output, state = self.core(xt, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, state, tmp_att_masks)
logprobs = torch.stack([F.softmax(m.logit(output[i]), dim=1) for i,m in enumerate(self.models)], 2).mul(self.weights).div(self.weights.sum()).sum(-1).log()
return logprobs, self.pack_state(state)
def _prepare_feature(self, *args):
return tuple(zip(*[m._prepare_feature(*args) for m in self.models]))
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
fc_feats, att_feats, p_att_feats, att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats = [fc_feats[i][k:k+1].expand(beam_size, fc_feats[i].size(1)) for i,m in enumerate(self.models)]
tmp_att_feats = [att_feats[i][k:k+1].expand(*((beam_size,)+att_feats[i].size()[1:])).contiguous() for i,m in enumerate(self.models)]
tmp_p_att_feats = [p_att_feats[i][k:k+1].expand(*((beam_size,)+p_att_feats[i].size()[1:])).contiguous() for i,m in enumerate(self.models)]
tmp_att_masks = [att_masks[i][k:k+1].expand(*((beam_size,)+att_masks[i].size()[1:])).contiguous() if att_masks[i] is not None else att_masks[i] for i,m in enumerate(self.models)]
it = fc_feats[0].data.new(beam_size).long().zero_()
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
# return the samples and their log likelihoods
|
connect-caption-and-trace-main
|
captioning/models/AttEnsemble.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import *
from . import utils
from .CaptionModel import CaptionModel
class LSTMCore(nn.Module):
def __init__(self, opt):
super(LSTMCore, self).__init__()
self.input_encoding_size = opt.input_encoding_size
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
# Build a LSTM
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
def forward(self, xt, state):
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = torch.max(\
all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size),
all_input_sums.narrow(1, 4 * self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class FCModel(CaptionModel):
def __init__(self, opt):
super(FCModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = opt.seq_length
self.fc_feat_size = opt.fc_feat_size
self.ss_prob = 0.0 # Schedule sampling probability
self.img_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.core = LSTMCore(opt)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
def init_hidden(self, bsz):
weight = self.logit.weight
if self.rnn_type == 'lstm':
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
else:
return weight.new_zeros(self.num_layers, bsz, self.rnn_size)
def _forward(self, fc_feats, att_feats, seq, att_masks=None):
batch_size = fc_feats.size(0)
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = []
if seq_per_img > 1:
fc_feats = utils.repeat_tensors(seq_per_img, fc_feats)
for i in range(seq.size(1) + 1):
if i == 0:
xt = self.img_embed(fc_feats)
else:
if self.training and i >= 2 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.data.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i-1].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i-1].data.clone()
#prob_prev = torch.exp(outputs[-1].data.index_select(0, sample_ind)) # fetch prev distribution: shape Nx(M+1)
#it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1))
prob_prev = torch.exp(outputs[-1].data) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i-1].clone()
# break if all the sequences end
if i >= 2 and seq[:, i-1].sum() == 0:
break
xt = self.embed(it)
output, state = self.core(xt, state)
output = F.log_softmax(self.logit(output), dim=1)
outputs.append(output)
return torch.cat([_.unsqueeze(1) for _ in outputs[1:]], 1).contiguous()
def get_logprobs_state(self, it, state):
# 'it' is contains a word index
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
return logprobs, state
def _sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
batch_size = fc_feats.size(0)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = torch.LongTensor(self.seq_length, batch_size).zero_()
seqLogprobs = torch.FloatTensor(self.seq_length, batch_size, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
for t in range(2):
if t == 0:
xt = self.img_embed(fc_feats[k:k+1]).expand(beam_size, self.input_encoding_size)
elif t == 1: # input <bos>
it = fc_feats.data.new(beam_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
self.done_beams[k] = self.beam_search(state, logprobs, opt=opt)
seq[:, k] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[:, k] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq.transpose(0, 1), seqLogprobs.transpose(0, 1)
def _sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
seq = fc_feats.new_zeros(batch_size, self.seq_length, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size, self.seq_length, self.vocab_size + 1)
for t in range(self.seq_length + 2):
if t == 0:
xt = self.img_embed(fc_feats)
else:
if t == 1: # input <bos>
it = fc_feats.data.new(batch_size).long().zero_()
xt = self.embed(it)
output, state = self.core(xt, state)
logprobs = F.log_softmax(self.logit(output), dim=1)
# sample the next_word
if t == self.seq_length + 1: # skip if we achieve maximum length
break
if sample_method == 'greedy':
sampleLogprobs, it = torch.max(logprobs.data, 1)
it = it.view(-1).long()
else:
if temperature == 1.0:
prob_prev = torch.exp(logprobs.data).cpu() # fetch prev distribution: shape Nx(M+1)
else:
# scale logprobs by temperature
prob_prev = torch.exp(torch.div(logprobs.data, temperature)).cpu()
it = torch.multinomial(prob_prev, 1).to(logprobs.device)
sampleLogprobs = logprobs.gather(1, it) # gather the logprobs at sampled positions
it = it.view(-1).long() # and flatten indices for downstream processing
if t >= 1:
# stop when all finished
if t == 1:
unfinished = it > 0
else:
unfinished = unfinished & (it > 0)
it = it * unfinished.type_as(it)
seq[:,t-1] = it #seq[t] the input of t+2 time step
seqLogprobs[:,t-1] = sampleLogprobs.view(-1)
if unfinished.sum() == 0:
break
return seq, seqLogprobs
|
connect-caption-and-trace-main
|
captioning/models/FCModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
trace_feats_to_decoder, trace_masks, task)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
tmp_trace_feats, trace_masks, task,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both_backup_2020_11_07.py
|
"""
BertCapModel is using huggingface transformer bert model as seq2seq model.
The result is not as goog as original transformer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
try:
from transformers import BertModel, BertConfig
except:
print('Hugginface transformers not installed; please visit https://github.com/huggingface/transformers')
from .TransformerModel import subsequent_mask, TransformerModel, Generator
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(inputs_embeds=src,
attention_mask=src_mask)[0]
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(input_ids=tgt,
attention_mask=tgt_mask,
encoder_hidden_states=memory,
encoder_attention_mask=src_mask)[0]
class BertCapModel(TransformerModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
enc_config = BertConfig(vocab_size=1,
hidden_size=d_model,
num_hidden_layers=N_enc,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=1,
type_vocab_size=1)
dec_config = BertConfig(vocab_size=tgt_vocab,
hidden_size=d_model,
num_hidden_layers=N_dec,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=17,
type_vocab_size=1,
is_decoder=True)
encoder = BertModel(enc_config)
def return_embeds(*args, **kwargs):
return kwargs['inputs_embeds']
del encoder.embeddings; encoder.embeddings = return_embeds
decoder = BertModel(dec_config)
model = EncoderDecoder(
encoder,
decoder,
Generator(d_model, tgt_vocab))
return model
def __init__(self, opt):
super(BertCapModel, self).__init__(opt)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1))
.to(memory.device))
return out[:, -1], [ys.unsqueeze(0)]
|
connect-caption-and-trace-main
|
captioning/models/BertCapModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state, word_box_attn = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
if task == 'caption':
return logprobs, state, word_box_attn
else:
return logprobs, state,
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state, word_box_attn = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
# if task != 'both':
# for i in range(trace_masks.shape[0]):
# tmp_num = trace_masks[i].sum().long()
# seq[i, tmp_num:] = 0
# seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
if task == 'caption':
return seq, seqLogprobs, torch.cat([word_box_attn,
torch.zeros([seq.shape[0], seq.shape[1]-word_box_attn.shape[1], word_box_attn.shape[2]]).to(seq.device)], 1)
else:
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_for_coco_caption_baseline.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
# if task != 'both':
# for i in range(trace_masks.shape[0]):
# tmp_num = trace_masks[i].sum().long()
# seq[i, tmp_num:] = 0
# seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_for_coco_caption_task.py
|
import torch
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
loss = self.crit(self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:])
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_caption_generation.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
self.show_gate_crit = torch.nn.CrossEntropyLoss()
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks,
show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels,
gts, gt_indices, sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# train generating both caption and trace
# caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='both')
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
# loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
# loss_mask.sum() * 4)
# loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
# loss_both = loss_both_caption + loss_both_trace
# # #
# # # # for caption generation
# caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
# #
# # # for trace generation - regression
# trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='trace')
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# # for show-control-tell 2 layer with gate prediction
# show_caption_outputs, show_gate_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_trace_masks, show_gate_labels=None, task='show')
# loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# loss_show_gate = self.show_gate_crit(show_gate_outputs.reshape(-1, show_gate_outputs.shape[-1]),
# show_gate_labels[..., 1:].reshape(-1))
# # for show control tell 1 layer, without gate prediction
show_caption_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats,
show_labels[..., :-1],
att_masks, show_trace_masks,
task='show')
loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # # for cycle trace and caption
# # trace_outputs_both = trace_outputs_both.detach()
# # caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
#
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
# sum the loss of caption and trace generation
loss = loss_show_caption # loss_caption + loss_trace + loss_both # + (loss_cycle_caption + loss_cycle_trace) * 0.5 + loss_caption + loss_trace
# for trace generation - classification
# model_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# model_outputs = F.log_softmax(model_outputs, dim=-1)
# model_outputs = model_outputs.view(-1, model_outputs.shape[2])
# trace_class_label = trace_feats[:,:,5] - 1
# trace_class_label = trace_class_label.view(-1).long()
# loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_show_control_tell.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
from ..utils.local_optimal_transport import local_OT
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
# if opt.label_smoothing > 0:
# self.crit = losses.LabelSmoothing(smoothing=opt.label_smoothing)
# else:
# self.crit = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# for caption generation
# loss = self.crit(self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:])
# for trace generation - regression
# outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss = (torch.abs(outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# construct the localized optimal transport
# D = torch.abs(outputs[:,:,:4].unsqueeze(2) - trace_feats[:,:,:4].unsqueeze(1)).mean(dim=-1)
# T = local_OT(D).to(outputs.device)
# loss = (torch.abs(torch.matmul(outputs[:, :, :4].transpose(1,2), T).transpose(1,2) -
# trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# for trace generation - classification
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
trace_class_label = trace_class_label.view(-1).long()
model_outputs = self.model(fc_feats, att_feats, trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks, trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_trace_generation.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
import numpy as np
import random
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
if self.opt.task == 'pred_both':
# train generating both caption and trace
caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='both')
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)
loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
loss_both = loss_both_caption + loss_both_trace # for baseline training
if self.opt.task in ['caption', 'c_joint_t']:
# for caption generation
caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='caption')
loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
if self.opt.task in ['trace', 'c_joint_t']:
# for trace generation - regression
trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='trace')
loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# # for cycle trace and caption
# trace_outputs_both = trace_outputs_both.detach()
# caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
################ random permute cycle loss ###################
### random permute trace within its segments
# permute_trace_list = []
# for i in range(trace_feats.shape[0]):
# tmp_gt_length = trace_masks[i].sum().long().item()
# tmp_trace = trace_feats[i, :tmp_gt_length]
# segment_list = []
# tmp_const = np.ceil(tmp_gt_length / 5).astype(int)
# for j in range(5):
# segment_list.append(tmp_trace[j * tmp_const: (j + 1) * tmp_const])
# random.shuffle(segment_list)
# tmp_permute_trace = torch.cat(segment_list, 0)
# if tmp_permute_trace.shape[0] < trace_masks.shape[1]:
# tmp_permute_trace = torch.cat([tmp_permute_trace,
# torch.zeros([trace_masks.shape[1]-tmp_permute_trace.shape[0], tmp_permute_trace.shape[1]]).to(trace_masks.device)])
# permute_trace_list.append(tmp_permute_trace)
# permute_trace_feats = torch.stack(permute_trace_list, 0)
#
if self.opt.task == 'c_joint_t':
#### random exchange trace within batch
random_idx = np.arange(trace_feats.shape[0])
np.random.shuffle(random_idx)
rnd_trace_feats = trace_feats[random_idx]
# construct the loss
rnd_caption_outputs = self.model(fc_feats, att_feats, rnd_trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='caption')
caption_outputs_cycle_1 = torch.exp(rnd_caption_outputs)
## caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
att_masks, trace_masks, task='cycle_trace')
loss_cycle_trace = (torch.abs(
trace_outputs_cycle_1[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)
if self.opt.task == 'pred_both':
loss = loss_both
elif self.opt.task == 'caption':
loss = loss_caption
elif self.opt.task == 'caption':
loss = loss_trace
elif self.opt.task == 'c_joint_t':
loss = loss_trace + 0.3 * (loss_caption) + 0.1 * (loss_cycle_trace)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_joint.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
self.show_gate_crit = torch.nn.CrossEntropyLoss()
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks,
show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels,
gts, gt_indices, sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# train generating both caption and trace
# caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='both')
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
# loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
# loss_mask.sum() * 4)
# loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
# loss_both = loss_both_caption + loss_both_trace
# # # #
# # # # for caption generation
# # caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
# # loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
# #
# # # for trace generation - regression
# trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='trace')[:, :-1]
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# for coco-caption training
### inference to get with coco trace
with torch.no_grad():
tmp_trace_feats = show_trace_feats[:, :1]
for i in range(show_labels.shape[2]-2):
# for regression
tmp_trace_feats_input = torch.cat(
[tmp_trace_feats, torch.zeros(tmp_trace_feats.shape[0], 1, tmp_trace_feats.shape[2]).to(tmp_trace_feats.device)], 1)
_, curr_out = self.model(fc_feats, att_feats, tmp_trace_feats_input, box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:, :tmp_trace_feats_input.shape[1]], task='both')
curr_out = curr_out[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
if i == 0:
tmp_trace_feats = curr_out.unsqueeze(1)
else:
tmp_trace_feats = torch.cat([tmp_trace_feats, curr_out.unsqueeze(1)], 1)
coco_trace_outputs = tmp_trace_feats.detach()
coco_caption_outputs, coco_trace_outputs_both = self.model(fc_feats, att_feats, coco_trace_outputs, box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:, :coco_trace_outputs.shape[1]], task='both')
loss_coco_caption = self.crit_caption(coco_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # for coco-caption-baseline
# baseline_caption_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_masks, task='caption')
# loss_coco_caption_baseline = self.crit_caption(baseline_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # # for show-control-tell
# show_caption_outputs, show_gate_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_trace_masks, show_gate_labels=show_gate_labels, task='show')
# loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# loss_show_gate = self.show_gate_crit(show_gate_outputs.reshape(-1, show_gate_outputs.shape[-1]),
# show_gate_labels[..., 1:].reshape(-1))
# # # for cycle trace and caption
# # trace_outputs_both = trace_outputs_both.detach()
# # caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
#
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
# sum the loss of caption and trace generation
loss = loss_coco_caption #loss_both + loss_trace #+ loss_caption # loss_coco_caption # loss_caption + loss_trace + loss_both # + (loss_cycle_caption + loss_cycle_trace) * 0.5 + loss_caption + loss_trace
# for trace generation - classification
# model_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# model_outputs = F.log_softmax(model_outputs, dim=-1)
# model_outputs = model_outputs.view(-1, model_outputs.shape[2])
# trace_class_label = trace_feats[:,:,5] - 1
# trace_class_label = trace_class_label.view(-1).long()
# loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_for_coco_caption.py
|
import torch
import torch.nn as nn
from ..utils.rewards import get_scores, get_self_cider_scores
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq>0).to(input)
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1).reshape(-1)
output = - input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
class StructureLosses(nn.Module):
"""
This loss is inspired by Classical Structured Prediction Losses for Sequence to Sequence Learning (Edunov et al., 2018).
"""
def __init__(self, opt):
super(StructureLosses, self).__init__()
self.opt = opt
self.loss_type = opt.structure_loss_type
def forward(self, input, seq, data_gts):
"""
Input is either logits or log softmax
"""
out = {}
batch_size = input.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
assert seq_per_img == self.opt.train_sample_n, seq_per_img
mask = (seq>0).to(input)
mask = torch.cat([mask.new_full((mask.size(0), 1), 1), mask[:, :-1]], 1)
scores = get_scores(data_gts, seq, self.opt)
scores = torch.from_numpy(scores).type_as(input).view(-1, seq_per_img)
out['reward'] = scores #.mean()
if self.opt.entropy_reward_weight > 0:
entropy = - (F.softmax(input, dim=2) * F.log_softmax(input, dim=2)).sum(2).data
entropy = (entropy * mask).sum(1) / mask.sum(1)
print('entropy', entropy.mean().item())
scores = scores + self.opt.entropy_reward_weight * entropy.view(-1, seq_per_img)
# rescale cost to [0,1]
costs = - scores
if self.loss_type == 'risk' or self.loss_type == 'softmax_margin':
costs = costs - costs.min(1, keepdim=True)[0]
costs = costs / costs.max(1, keepdim=True)[0]
# in principle
# Only risk need such rescale
# margin should be alright; Let's try.
# Gather input: BxTxD -> BxT
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
if self.loss_type == 'seqnll':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'risk':
# input is logsoftmax
input = input * mask
input = input.sum(1)
input = input.view(-1, seq_per_img)
output = (F.softmax(input.exp()) * costs).sum(1).mean()
# test
# avg_scores = input
# probs = F.softmax(avg_scores.exp_())
# loss = (probs * costs.type_as(probs)).sum() / input.size(0)
# print(output.item(), loss.item())
elif self.loss_type == 'max_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input).max(1)[0] / 2
output = output.mean()
# sanity test
# avg_scores = input + costs
# scores_with_high_target = avg_scores.clone()
# scores_with_high_target.scatter_(1, costs.min(1)[1].view(-1, 1), 1e10)
# target_and_offender_index = scores_with_high_target.sort(1, True)[1][:, 0:2]
# avg_scores = avg_scores.gather(1, target_and_offender_index)
# target_index = avg_scores.new_zeros(avg_scores.size(0), dtype=torch.long)
# loss = F.multi_margin_loss(avg_scores, target_index, size_average=True, margin=0)
# print(loss.item() * 2, output.item())
elif self.loss_type == 'multi_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input)
output = output.mean()
# sanity test
# avg_scores = input + costs
# loss = F.multi_margin_loss(avg_scores, costs.min(1)[1], margin=0)
# print(output, loss)
elif self.loss_type == 'softmax_margin':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'real_softmax_margin':
# input is logits
# This is what originally defined in Kevin's paper
# The result should be equivalent to softmax_margin
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'new_self_critical':
"""
A different self critical
Self critical uses greedy decoding score as baseline;
This setting uses the average score of the rest samples as baseline
(suppose c1...cn n samples, reward1 = score1 - 1/(n-1)(score2+..+scoren) )
"""
baseline = (scores.sum(1, keepdim=True) - scores) / (scores.shape[1] - 1)
scores = scores - baseline
# self cider used as reward to promote diversity (not working that much in this way)
if getattr(self.opt, 'self_cider_reward_weight', 0) > 0:
_scores = get_self_cider_scores(data_gts, seq, self.opt)
_scores = torch.from_numpy(_scores).type_as(scores).view(-1, 1)
_scores = _scores.expand_as(scores - 1)
scores += self.opt.self_cider_reward_weight * _scores
output = - input * mask * scores.view(-1, 1)
output = torch.sum(output) / torch.sum(mask)
out['loss'] = output
return out
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
# truncate to the same size
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].to(input)
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
# Average over each token
output = torch.sum(output) / torch.sum(mask)
return output
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size=0, padding_idx=0, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False, reduce=False)
# self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
# self.size = size
self.true_dist = None
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
# truncate to the same size
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
input = input.reshape(-1, input.size(-1))
target = target.reshape(-1)
mask = mask.reshape(-1).to(input)
# assert x.size(1) == self.size
self.size = input.size(1)
# true_dist = x.data.clone()
true_dist = input.data.clone()
# true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.fill_(self.smoothing / (self.size - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
# true_dist[:, self.padding_idx] = 0
# mask = torch.nonzero(target.data == self.padding_idx)
# self.true_dist = true_dist
return (self.criterion(input, true_dist).sum(1) * mask).sum() / mask.sum()
|
connect-caption-and-trace-main
|
captioning/modules/losses.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
self.loader = lambda x: np.load(six.BytesIO(x))['feat']
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False,
map_size=1099511627776 * 2,)
self.db_txn = env.begin(write=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def __getstate__(self):
state = self.__dict__
if self.db_type == 'lmdb':
state["db_txn"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
if self.db_type == 'lmdb':
env = lmdb.open(self.db_path, subdir=os.path.isdir(self.db_path),
readonly=True, lock=False,
readahead=False, meminit=False,
map_size=1099511627776 * 2,)
self.db_txn = env.begin(write=False)
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
byteflow = self.db_txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class CaptionDataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
if 'ix_to_word' in self.info:
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
for ix in range(len(self.info['images'])):
img = self.info['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val':
self.split_ix['val'].append(ix)
elif img['split'] == 'test':
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def collate_func(self, batch):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_seq, \
ix = sample
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
fc_batch, att_batch, label_batch, gts, infos = \
zip(*sorted(zip(fc_batch, att_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# set att_masks to None if attention features have same length
if data['att_masks'].sum() == data['att_masks'].size:
data['att_masks'] = None
data['labels'] = np.vstack(label_batch)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
data['gts'] = gts # all ground truth captions of each images
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, ix):
"""This function returns a tuple that is further passed to collate_fn
"""
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box:
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
if self.norm_box_feat:
box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
att_feat = np.hstack([att_feat, box_feat])
# sort the features by the size of boxes
att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
else:
seq = None
return (fc_feat,
att_feat, seq,
ix)
def __len__(self):
return len(self.info['images'])
if __name__ == '__main__':
from captioning.utils.misc import pickle_load
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
dataset = Dataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
import pudb;pu.db
|
connect-caption-and-trace-main
|
captioning/data/pth_loader.py
|
connect-caption-and-trace-main
|
captioning/data/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import os
import numpy as np
import random
import torch
import skimage
import skimage.io
import scipy.misc
from torchvision import transforms as trn
preprocess = trn.Compose([
#trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
from ..utils.resnet_utils import myResnet
from ..utils import resnet
class DataLoaderRaw():
def __init__(self, opt):
self.opt = opt
self.coco_json = opt.get('coco_json', '')
self.folder_path = opt.get('folder_path', '')
self.batch_size = opt.get('batch_size', 1)
self.seq_per_img = 1
# Load resnet
self.cnn_model = opt.get('cnn_model', 'resnet101')
self.my_resnet = getattr(resnet, self.cnn_model)()
self.my_resnet.load_state_dict(torch.load('./data/imagenet_weights/'+self.cnn_model+'.pth'))
self.my_resnet = myResnet(self.my_resnet)
self.my_resnet.cuda()
self.my_resnet.eval()
# load the json file which contains additional information about the dataset
print('DataLoaderRaw loading images from folder: ', self.folder_path)
self.files = []
self.ids = []
print(len(self.coco_json))
if len(self.coco_json) > 0:
print('reading from ' + opt.coco_json)
# read in filenames from the coco-style json file
self.coco_annotation = json.load(open(self.coco_json))
for k,v in enumerate(self.coco_annotation['images']):
fullpath = os.path.join(self.folder_path, v['file_name'])
self.files.append(fullpath)
self.ids.append(v['id'])
else:
# read in all the filenames from the folder
print('listing all images in directory ' + self.folder_path)
def isImage(f):
supportedExt = ['.jpg','.JPG','.jpeg','.JPEG','.png','.PNG','.ppm','.PPM']
for ext in supportedExt:
start_idx = f.rfind(ext)
if start_idx >= 0 and start_idx + len(ext) == len(f):
return True
return False
n = 1
for root, dirs, files in os.walk(self.folder_path, topdown=False):
for file in files:
fullpath = os.path.join(self.folder_path, file)
if isImage(fullpath):
self.files.append(fullpath)
self.ids.append(str(n)) # just order them sequentially
n = n + 1
self.N = len(self.files)
print('DataLoaderRaw found ', self.N, ' images')
self.iterator = 0
# Nasty
self.dataset = self # to fix the bug in eval
def get_batch(self, split, batch_size=None):
batch_size = batch_size or self.batch_size
# pick an index of the datapoint to load next
fc_batch = np.ndarray((batch_size, 2048), dtype = 'float32')
att_batch = np.ndarray((batch_size, 14, 14, 2048), dtype = 'float32')
max_index = self.N
wrapped = False
infos = []
for i in range(batch_size):
ri = self.iterator
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
wrapped = True
# wrap back around
self.iterator = ri_next
img = skimage.io.imread(self.files[ri])
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
img = np.concatenate((img, img, img), axis=2)
img = img[:,:,:3].astype('float32')/255.0
img = torch.from_numpy(img.transpose([2,0,1])).cuda()
img = preprocess(img)
with torch.no_grad():
tmp_fc, tmp_att = self.my_resnet(img)
fc_batch[i] = tmp_fc.data.cpu().float().numpy()
att_batch[i] = tmp_att.data.cpu().float().numpy()
info_struct = {}
info_struct['id'] = self.ids[ri]
info_struct['file_path'] = self.files[ri]
infos.append(info_struct)
data = {}
data['fc_feats'] = fc_batch
data['att_feats'] = att_batch.reshape(batch_size, -1, 2048)
data['labels'] = np.zeros([batch_size, 0])
data['masks'] = None
data['att_masks'] = None
data['bounds'] = {'it_pos_now': self.iterator, 'it_max': self.N, 'wrapped': wrapped}
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def reset_iterator(self, split):
self.iterator = 0
def get_vocab_size(self):
return len(self.ix_to_word)
def get_vocab(self):
return self.ix_to_word
|
connect-caption-and-trace-main
|
captioning/data/dataloaderraw.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
def load_npz(x):
x = np.load(six.BytesIO(x))
return x['feat'] if 'feat' in x else x['arr_0'] # normally it should be 'feat', but under cocotest_bu, the key is saved to be 'z' mistakenly.
self.loader = load_npz
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class Dataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.use_trace = getattr(opt, 'use_trace', 0)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
self.dataset_choice = getattr(opt, 'dataset_choice', 'coco')
self.trace_max_length = getattr(opt, 'trace_max_length', 225)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
if 'ix_to_word' in self.info:
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.trace_loader = HybridLoader(self.opt.input_trace_dir, '.npy', in_memory=self.data_in_memory)
self.trace_class_label_loader = HybridLoader(self.opt.input_trace_class_label_dir, '.npy', in_memory=self.data_in_memory)
self.trace_feat_loader = HybridLoader(self.opt.input_trace_feat_dir, '.npy', in_memory=self.data_in_memory)
self.dataset_choice = getattr(opt, 'dataset_choice', 'coco')
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
for ix in range(len(self.info['images'])):
img = self.info['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val' and (self.dataset_choice in ['coco', 'ade20k']): #
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'test' and (self.dataset_choice in ['coco', 'ade20k']):
pass
elif img['split'] == 'val' and self.dataset_choice in ['flk30k', 'openimg']: #
pass
elif img['split'] == 'test' and self.dataset_choice in ['flk30k', 'openimg']: #
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def collate_func(self, batch, split):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
trace_batch = []
box_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_trace, tmp_box, tmp_seq, \
ix, it_pos_now, tmp_wrapped = sample
if tmp_wrapped:
wrapped = True
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
trace_batch.append(tmp_trace)
box_batch.append(tmp_box)
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
# commented for classification
# fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
data['box_feats'] = np.zeros([len(box_batch), max_att_len, box_batch[0].shape[1]], dtype='float32')
assert att_batch[0].shape[0] == box_batch[0].shape[0], 'box should have same shape[0] with att'
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['box_feats'][i, :box_batch[i].shape[0]] = box_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# merge trace_feats
max_trace_len = max([_.shape[0] for _ in trace_batch])
data['trace_feats'] = np.zeros([len(trace_batch), max_trace_len, trace_batch[0].shape[1]], dtype='float32')
for i in range(len(trace_batch)):
data['trace_feats'][i, :trace_batch[i].shape[0]] = trace_batch[i]
data['trace_masks'] = np.zeros(data['trace_feats'].shape[:2], dtype='float32')
for i in range(len(trace_batch)):
data['trace_masks'][i, :trace_batch[i].shape[0]] = 1
data['labels'] = np.vstack(label_batch)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
data['gts'] = gts # all ground truth captions of each images
data['bounds'] = {'it_pos_now': it_pos_now, # the it_pos_now of the last sample
'it_max': len(self.split_ix[split]), 'wrapped': wrapped}
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
ix, it_pos_now, wrapped = index #self.split_ix[index]
if self.use_trace:
trace_feat = self.trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')[:self.trace_max_length]
# for classification
# trace_class_label = self.trace_class_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32') + 1 # do -1 when using in loss
# trace_feat = np.concatenate([np.reshape(trace_class_label, [-1,1]), trace_feat], 1)
### for using grid level feature, commented for using trace box feature
if self.use_trace_feat:
trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for gird feature with 14*14, 2048+2
# trace_grid_feat = np.reshape(trace_grid_feat, [14*14, 2048])
# grid_resolution = 14
# grid_x = torch.arange(grid_resolution).unsqueeze(0).repeat(grid_resolution, 1).view(-1).unsqueeze(1)
# grid_y = torch.arange(grid_resolution).unsqueeze(1).repeat(1, grid_resolution).view(-1).unsqueeze(1)
# trace_grid_feat = np.concatenate([trace_grid_feat, grid_x, grid_y], 1)
# if self.use_trace_feat: # then concat trace_feat
# grid_resolution = 14 # currently it's 14*14 grid
# trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# trace_grid_feat = np.transpose(np.reshape(trace_grid_feat, [2048, -1]))
# tmp_trace_center_xy = np.stack([(trace_feat[:, 0]+trace_feat[:, 2])/2,
# (trace_feat[:, 1]+trace_feat[:, 3])/2], 1)
# tmp_trace_center_xy = np.clip(tmp_trace_center_xy, 0., 1.)
# tmp_trace_grid_idx = np.clip(np.floor(tmp_trace_center_xy[:,1]*grid_resolution), 0, grid_resolution-1)*grid_resolution + \
# np.clip(np.floor(tmp_trace_center_xy[:,0]*grid_resolution), 0, grid_resolution-1)
# trace_grid_feat = trace_grid_feat[tmp_trace_grid_idx.astype(int), :] # [T, 1024/2048]
# extend the trace_feat by sigma=0.1
# sigma = 0.1
# trace_feat[:, 0] = trace_feat[:, 0] - sigma
# trace_feat[:, 1] = trace_feat[:, 1] - sigma
# trace_feat[:, 2] = trace_feat[:, 2] + sigma
# trace_feat[:, 3] = trace_feat[:, 3] + sigma
# trace_feat = np.clip(trace_feat, 0., 1.)
# trace_feat[:, 4] = (trace_feat[:,2] - trace_feat[:,0]) * (trace_feat[:,3] - trace_feat[:,1])
if self.use_trace_feat:
# concat location and grid feat
trace_feat = np.concatenate([trace_feat, trace_grid_feat], 1)
else:
trace_feat = np.zeros((0, 0), dtype='float32')
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box:
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
if self.dataset_choice == 'flk30k':
x1, y1, x2, y2, w, h = np.hsplit(box_feat, 6)
box_feat = np.hstack((x1, y1, x2, y2, (x2 - x1) * (y2 - y1)))
elif self.dataset_choice == 'ade20k' or self.dataset_choice == 'openimg': # 4-d
x1, y1, x2, y2 = np.hsplit(box_feat, 4)
box_feat = np.hstack((x1, y1, x2, y2, (x2 - x1) * (y2 - y1)))
else:
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
else:
box_feat = np.zeros((0, 0), dtype='float32')
# if self.use_box:
# box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# # devided by image width and height
# x1,y1,x2,y2 = np.hsplit(box_feat, 4)
# h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
# box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
# if self.norm_box_feat:
# box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
# att_feat = np.hstack([att_feat, box_feat])
# # sort the features by the size of boxes
# att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
else:
seq = None
return (fc_feat,
att_feat, trace_feat, box_feat, seq,
ix, it_pos_now, wrapped)
def __len__(self):
return len(self.info['images'])
class DataLoader:
def __init__(self, opt):
self.opt = opt
self.batch_size = self.opt.batch_size
self.dataset = Dataset(opt)
# Initialize loaders and iters
self.loaders, self.iters = {}, {}
for split in ['train', 'val', 'test']:
if split == 'train':
sampler = MySampler(self.dataset.split_ix[split], shuffle=True, wrap=True)
else:
sampler = MySampler(self.dataset.split_ix[split], shuffle=False, wrap=False)
self.loaders[split] = data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
sampler=sampler,
pin_memory=True,
num_workers=4, # 4 is usually enough
collate_fn=lambda x: self.dataset.collate_func(x, split),
drop_last=False)
self.iters[split] = iter(self.loaders[split])
def get_batch(self, split):
try:
data = next(self.iters[split])
except StopIteration:
self.iters[split] = iter(self.loaders[split])
data = next(self.iters[split])
return data
def reset_iterator(self, split):
self.loaders[split].sampler._reset_iter()
self.iters[split] = iter(self.loaders[split])
def get_vocab_size(self):
return self.dataset.get_vocab_size()
@property
def vocab_size(self):
return self.get_vocab_size()
def get_vocab(self):
return self.dataset.get_vocab()
def get_seq_length(self):
return self.dataset.get_seq_length()
@property
def seq_length(self):
return self.get_seq_length()
def state_dict(self):
def get_prefetch_num(split):
if self.loaders[split].num_workers > 0:
return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size
else:
return 0
return {split: loader.sampler.state_dict(get_prefetch_num(split)) \
for split, loader in self.loaders.items()}
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
for split in self.loaders.keys():
self.loaders[split].sampler.load_state_dict(state_dict[split])
class MySampler(data.sampler.Sampler):
def __init__(self, index_list, shuffle, wrap):
self.index_list = index_list
self.shuffle = shuffle
self.wrap = wrap
# if wrap, there will be not stop iteration called
# wrap True used during training, and wrap False used during test.
self._reset_iter()
def __iter__(self):
return self
def __next__(self):
wrapped = False
if self.iter_counter == len(self._index_list):
self._reset_iter()
if self.wrap:
wrapped = True
else:
raise StopIteration()
if len(self._index_list) == 0: # overflow when 0 samples
return None
elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped)
self.iter_counter += 1
return elem
def next(self):
return self.__next__()
def _reset_iter(self):
if self.shuffle:
rand_perm = npr.permutation(len(self.index_list))
self._index_list = [self.index_list[_] for _ in rand_perm]
else:
self._index_list = self.index_list
self.iter_counter = 0
def __len__(self):
return len(self.index_list)
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
self._index_list = state_dict['index_list']
self.iter_counter = state_dict['iter_counter']
def state_dict(self, prefetched_num=None):
prefetched_num = prefetched_num or 0
return {
'index_list': self._index_list,
'iter_counter': self.iter_counter - prefetched_num
}
|
connect-caption-and-trace-main
|
captioning/data/dataloader.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
def load_npz(x):
x = np.load(six.BytesIO(x))
return x['feat'] if 'feat' in x else x['z'] # normally it should be 'feat', but under cocotest_bu, the key is saved to be 'z' mistakenly.
self.loader = load_npz
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class Dataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.use_trace = getattr(opt, 'use_trace', 0)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
self.union_vocab_json = json.load(open('./data/coco_LN_union_vocab.json'))
if 'ix_to_word' in self.info:
self.ix_to_word = self.union_vocab_json['ix_to_word'] #self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
### for show-control-tell dataset on coco caption
self.show_coco_h5_label_file = h5py.File('data/coco_in_LN_vocab_labels.h5', 'r', driver='core')
show_seq_size = self.show_coco_h5_label_file['labels'].shape
self.show_seq_length = show_seq_size[1]
self.show_label = self.show_coco_h5_label_file['labels'][:]
self.show_label_start_ix = self.show_coco_h5_label_file['label_start_ix'][:]
self.show_label_end_ix = self.show_coco_h5_label_file['label_end_ix'][:]
##############################################
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.trace_loader = HybridLoader(self.opt.input_trace_dir, '.npy', in_memory=self.data_in_memory)
self.show_trace_loader = HybridLoader('./data/show_control_tell_box_seq', '.npy', in_memory=self.data_in_memory)
self.show_gate_label_loader = HybridLoader('./data/show_control_tell_box_gate_label', '.npy', in_memory=self.data_in_memory)
self.trace_class_label_loader = HybridLoader(self.opt.input_trace_class_label_dir, '.npy', in_memory=self.data_in_memory)
self.trace_feat_loader = HybridLoader(self.opt.input_trace_feat_dir, '.npy', in_memory=self.data_in_memory)
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
### load the Kaparthy split
tmp_cocotalk_json = json.load(open('/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch/data/cocotalk.json'))
for ix in range(len(tmp_cocotalk_json['images'])):
img = tmp_cocotalk_json['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val': #
pass
elif img['split'] == 'test':
self.split_ix['val'].append(ix) ###zihang
self.split_ix['test'].append(ix)
#self.split_ix['test'].append(ix) ###zihang
#self.split_ix['test'].append(ix) ###zihang
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
# for ix in range(len(self.info['images'])):
# img = self.info['images'][ix]
# if not 'split' in img:
# self.split_ix['train'].append(ix)
# self.split_ix['val'].append(ix)
# self.split_ix['test'].append(ix)
# elif img['split'] == 'train':
# self.split_ix['train'].append(ix)
# elif img['split'] == 'val': #
# pass
# elif img['split'] == 'test':
# self.split_ix['val'].append(ix) ###zihang
# self.split_ix['test'].append(ix)
# #self.split_ix['test'].append(ix) ###zihang
# #self.split_ix['test'].append(ix) ###zihang
# elif opt.train_only == 0: # restval
# self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def get_captions_show(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.show_label_start_ix[ix] - 1 # label_start_ix starts from 1
ix2 = self.show_label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
# if ncap < seq_per_img:
# # we need to subsample (with replacement)
# seq = np.zeros([seq_per_img, self.show_seq_length], dtype='int')
# for q in range(seq_per_img):
# ixl = random.randint(ix1, ix2)
# seq[q, :] = self.show_label[ixl, :self.show_seq_length]
# else:
# ixl = random.randint(ix1, ix2 - seq_per_img + 1)
# seq = self.show_label[ixl: ixl + seq_per_img, :self.show_seq_length]
### zihang: temporarily load all captions for the image
ixl = ix1 # get first 5 instead of random
seq = self.show_label[ixl: ixl + seq_per_img, :self.show_seq_length]
# seq = self.show_label[ix1:ix2+1, :self.show_seq_length]
return seq
def collate_func(self, batch, split):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
trace_batch = []
box_batch = []
show_trace_feat_batch = []
show_label_batch = []
show_gate_label_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_trace, tmp_box, tmp_seq, \
ix, it_pos_now, tmp_wrapped, tmp_show_seq, tmp_show_trace_feat, tmp_show_gate_label_orig = sample
if tmp_wrapped:
wrapped = True
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
trace_batch.append(tmp_trace)
box_batch.append(tmp_box)
# show-control-tell
for tmp_i in range(tmp_show_trace_feat.shape[0]):
show_trace_feat_batch.append(tmp_show_trace_feat[tmp_i]) # append the trace feats of one caption sentence
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
tmp_show_label = np.zeros([5, self.show_seq_length + 2], dtype='int')
tmp_show_label[:, 1: self.show_seq_length + 1] = tmp_show_seq
show_label_batch.append(tmp_show_label)
# for gate
tmp_show_gate_label = np.zeros([5, self.show_seq_length + 2], dtype='int')
tmp_show_gate_label[:, 1: self.show_seq_length + 1] = tmp_show_gate_label_orig[:5, :self.show_seq_length]
show_gate_label_batch.append(tmp_show_gate_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
# commented for classification
# fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
data['box_feats'] = np.zeros([len(box_batch), max_att_len, box_batch[0].shape[1]], dtype='float32')
assert att_batch[0].shape[0] == box_batch[0].shape[0], 'box should have same shape[0] with att'
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['box_feats'][i, :box_batch[i].shape[0]] = box_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# set att_masks to None if attention features have same length #commented by zihang
# if data['att_masks'].sum() == data['att_masks'].size:
# data['att_masks'] = None
# merge trace_feats
max_trace_len = max([_.shape[0] for _ in trace_batch])
data['trace_feats'] = np.zeros([len(trace_batch), max_trace_len, trace_batch[0].shape[1]], dtype='float32')
for i in range(len(trace_batch)):
data['trace_feats'][i, :trace_batch[i].shape[0]] = trace_batch[i]
data['trace_masks'] = np.zeros(data['trace_feats'].shape[:2], dtype='float32')
for i in range(len(trace_batch)):
data['trace_masks'][i, :trace_batch[i].shape[0]] = 1
# set trace_masks to None if attention features have same length #commented by zihang
# if data['trace_masks'].sum() == data['trace_masks'].size:
# data['trace_masks'] = None
# merge show-control-tell trace feats
max_trace_len = max([_.shape[0] for _ in show_trace_feat_batch])
data['show_trace_feats'] = np.zeros([len(show_trace_feat_batch), max_trace_len, show_trace_feat_batch[0].shape[1]], dtype='float32')
for i in range(len(show_trace_feat_batch)):
data['show_trace_feats'][i, :show_trace_feat_batch[i].shape[0]] = show_trace_feat_batch[i]
data['show_trace_masks'] = np.zeros(data['show_trace_feats'].shape[:2], dtype='float32')
for i in range(len(show_trace_feat_batch)):
data['show_trace_masks'][i, :show_trace_feat_batch[i].shape[0]] = 1
for i in range(data['show_trace_feats'].shape[0]):
for j in range(data['show_trace_feats'].shape[1]):
if data['show_trace_feats'][i,j,0] < 0:
data['show_trace_masks'][i, j] = 0
data['show_trace_feats'] = np.clip(data['show_trace_feats'], 0., 1.)
data['labels'] = np.vstack(label_batch)
data['show_labels'] = np.expand_dims(np.vstack(show_label_batch), 1)
data['show_gate_labels'] = np.expand_dims(np.vstack(show_gate_label_batch), 1)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
# generate mask for show-control-tell
nonzeros = np.array(list(map(lambda x: (x != 0).sum() + 2, data['show_labels'])))
mask_batch = np.zeros([data['show_labels'].shape[0], self.show_seq_length + 2], dtype='float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['show_masks'] = np.expand_dims(mask_batch, 1)
data['gts'] = gts # all ground truth captions of each images
data['bounds'] = {'it_pos_now': it_pos_now, # the it_pos_now of the last sample
'it_max': len(self.split_ix[split]), 'wrapped': wrapped}
# print('In dataloader', len(self.split_ix[split]), split, infos)###zihang
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
ix, it_pos_now, wrapped = index #self.split_ix[index]
show_trace_feat = self.show_trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')[:5]
show_gate_label = self.show_gate_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# print(show_trace_feat.shape, ix)
try:
assert show_trace_feat.shape[2] == 5
except:
print(show_trace_feat.shape, ix)
if self.use_trace:
trace_feat = self.trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for classification
# trace_class_label = self.trace_class_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32') + 1 # do -1 when using in loss
# trace_feat = np.concatenate([np.reshape(trace_class_label, [-1,1]), trace_feat], 1)
### for using grid level feature, commented for using trace box feature
if self.use_trace_feat:
trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for gird feature with 14*14, 2048+2
# trace_grid_feat = np.reshape(trace_grid_feat, [14*14, 2048])
# grid_resolution = 14
# grid_x = torch.arange(grid_resolution).unsqueeze(0).repeat(grid_resolution, 1).view(-1).unsqueeze(1)
# grid_y = torch.arange(grid_resolution).unsqueeze(1).repeat(1, grid_resolution).view(-1).unsqueeze(1)
# trace_grid_feat = np.concatenate([trace_grid_feat, grid_x, grid_y], 1)
# if self.use_trace_feat: # then concat trace_feat
# grid_resolution = 14 # currently it's 14*14 grid
# trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# trace_grid_feat = np.transpose(np.reshape(trace_grid_feat, [2048, -1]))
# tmp_trace_center_xy = np.stack([(trace_feat[:, 0]+trace_feat[:, 2])/2,
# (trace_feat[:, 1]+trace_feat[:, 3])/2], 1)
# tmp_trace_center_xy = np.clip(tmp_trace_center_xy, 0., 1.)
# tmp_trace_grid_idx = np.clip(np.floor(tmp_trace_center_xy[:,1]*grid_resolution), 0, grid_resolution-1)*grid_resolution + \
# np.clip(np.floor(tmp_trace_center_xy[:,0]*grid_resolution), 0, grid_resolution-1)
# trace_grid_feat = trace_grid_feat[tmp_trace_grid_idx.astype(int), :] # [T, 1024/2048]
# extend the trace_feat by sigma=0.1
# sigma = 0.1
# trace_feat[:, 0] = trace_feat[:, 0] - sigma
# trace_feat[:, 1] = trace_feat[:, 1] - sigma
# trace_feat[:, 2] = trace_feat[:, 2] + sigma
# trace_feat[:, 3] = trace_feat[:, 3] + sigma
# trace_feat = np.clip(trace_feat, 0., 1.)
# trace_feat[:, 4] = (trace_feat[:,2] - trace_feat[:,0]) * (trace_feat[:,3] - trace_feat[:,1])
if self.use_trace_feat:
# concat location and grid feat
trace_feat = np.concatenate([trace_feat, trace_grid_feat], 1)
else:
trace_feat = np.zeros((0, 0), dtype='float32')
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box: # zihang updated version
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
else:
box_feat = np.zeros((0, 0), dtype='float32')
# if self.use_box:
# box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# # devided by image width and height
# x1,y1,x2,y2 = np.hsplit(box_feat, 4)
# h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
# box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
# if self.norm_box_feat:
# box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
# att_feat = np.hstack([att_feat, box_feat])
# # sort the features by the size of boxes
# att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
seq_show = self.get_captions_show(ix, 5)
else:
seq = None
return (fc_feat,
att_feat, trace_feat, box_feat, seq,
ix, it_pos_now, wrapped, seq_show, show_trace_feat, show_gate_label)
def __len__(self):
return len(self.info['images'])
class DataLoader:
def __init__(self, opt):
self.opt = opt
self.batch_size = self.opt.batch_size
self.dataset = Dataset(opt)
# Initialize loaders and iters
self.loaders, self.iters = {}, {}
for split in ['train', 'val', 'test']:
if split == 'train':
sampler = MySampler(self.dataset.split_ix[split], shuffle=True, wrap=True)
else:
sampler = MySampler(self.dataset.split_ix[split], shuffle=False, wrap=False)
self.loaders[split] = data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
sampler=sampler,
pin_memory=True,
num_workers=4, # 4 is usually enough
collate_fn=lambda x: self.dataset.collate_func(x, split),
drop_last=False)
self.iters[split] = iter(self.loaders[split])
def get_batch(self, split):
# print('In Dataloader, get_batch', split)###zihang
try:
data = next(self.iters[split])
except StopIteration:
self.iters[split] = iter(self.loaders[split])
data = next(self.iters[split])
return data
def reset_iterator(self, split):
self.loaders[split].sampler._reset_iter()
self.iters[split] = iter(self.loaders[split])
def get_vocab_size(self):
return self.dataset.get_vocab_size()
@property
def vocab_size(self):
return self.get_vocab_size()
def get_vocab(self):
return self.dataset.get_vocab()
def get_seq_length(self):
return self.dataset.get_seq_length()
@property
def seq_length(self):
return self.get_seq_length()
def state_dict(self):
def get_prefetch_num(split):
if self.loaders[split].num_workers > 0:
return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size
else:
return 0
return {split: loader.sampler.state_dict(get_prefetch_num(split)) \
for split, loader in self.loaders.items()}
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
for split in self.loaders.keys():
self.loaders[split].sampler.load_state_dict(state_dict[split])
class MySampler(data.sampler.Sampler):
def __init__(self, index_list, shuffle, wrap):
self.index_list = index_list
self.shuffle = shuffle
self.wrap = wrap
# if wrap, there will be not stop iteration called
# wrap True used during training, and wrap False used during test.
self._reset_iter()
def __iter__(self):
return self
def __next__(self):
wrapped = False
if self.iter_counter == len(self._index_list):
self._reset_iter()
if self.wrap:
wrapped = True
else:
raise StopIteration()
if len(self._index_list) == 0: # overflow when 0 samples
return None
elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped)
self.iter_counter += 1
return elem
def next(self):
return self.__next__()
def _reset_iter(self):
if self.shuffle:
rand_perm = npr.permutation(len(self.index_list))
self._index_list = [self.index_list[_] for _ in rand_perm]
else:
self._index_list = self.index_list
self.iter_counter = 0
def __len__(self):
return len(self.index_list)
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
self._index_list = state_dict['index_list']
self.iter_counter = state_dict['iter_counter']
def state_dict(self, prefetched_num=None):
prefetched_num = prefetched_num or 0
return {
'index_list': self._index_list,
'iter_counter': self.iter_counter - prefetched_num
}
|
connect-caption-and-trace-main
|
captioning/data/dataloader_show_control_tell.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
import argparse
parser = argparse.ArgumentParser()
# output_dir
parser.add_argument('--downloaded_feats', default='data/bu_data', help='downloaded feature directory')
parser.add_argument('--output_dir', default='data/cocobu', help='output feature files')
args = parser.parse_args()
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infiles = ['trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv',
'trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv',\
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0', \
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1']
os.makedirs(args.output_dir+'_att')
os.makedirs(args.output_dir+'_fc')
os.makedirs(args.output_dir+'_box')
for infile in infiles:
print('Reading ' + infile)
with open(os.path.join(args.downloaded_feats, infile), "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.decodestring(item[field].encode('ascii')),
dtype=np.float32).reshape((item['num_boxes'],-1))
np.savez_compressed(os.path.join(args.output_dir+'_att', str(item['image_id'])), feat=item['features'])
np.save(os.path.join(args.output_dir+'_fc', str(item['image_id'])), item['features'].mean(0))
np.save(os.path.join(args.output_dir+'_box', str(item['image_id'])), item['boxes'])
|
connect-caption-and-trace-main
|
scripts/make_bu_data.py
|
"""
Preprocess a raw json dataset into features files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: two folders of features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
from six.moves import cPickle
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from torchvision import transforms as trn
preprocess = trn.Compose([
#trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
from captioning.utils.resnet_utils import myResnet
import captioning.utils.resnet as resnet
def main(params):
net = getattr(resnet, params['model'])()
net.load_state_dict(torch.load(os.path.join(params['model_root'],params['model']+'.pth')))
my_resnet = myResnet(net)
my_resnet.cuda()
my_resnet.eval()
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
seed(123) # make reproducible
dir_fc = params['output_dir']+'_fc'
dir_att = params['output_dir']+'_att'
if not os.path.isdir(dir_fc):
os.mkdir(dir_fc)
if not os.path.isdir(dir_att):
os.mkdir(dir_att)
for i,img in enumerate(imgs):
# load the image
I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename']))
# handle grayscale input images
if len(I.shape) == 2:
I = I[:,:,np.newaxis]
I = np.concatenate((I,I,I), axis=2)
I = I.astype('float32')/255.0
I = torch.from_numpy(I.transpose([2,0,1])).cuda()
I = preprocess(I)
with torch.no_grad():
tmp_fc, tmp_att = my_resnet(I, params['att_size'])
# write to pkl
np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy())
np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy())
if i % 1000 == 0:
print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N))
print('wrote ', params['output_dir'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_dir', default='data', help='output h5 file')
# options
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
parser.add_argument('--att_size', default=14, type=int, help='14x14 or 7x7')
parser.add_argument('--model', default='resnet101', type=str, help='resnet101, resnet152')
parser.add_argument('--model_root', default='./data/imagenet_weights', type=str, help='model root')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_feats.py
|
# coding: utf-8
"""
Create a reference json file used for evaluation with `coco-caption` repo.
Used when reference json is not provided, (e.g., flickr30k, or you have your own split of train/val/test)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
import sys
import hashlib
from random import shuffle, seed
def main(params):
imgs = json.load(open(params['input_json'][0], 'r'))['images']
# tmp = []
# for k in imgs.keys():
# for img in imgs[k]:
# img['filename'] = img['image_id'] # k+'/'+img['image_id']
# img['image_id'] = int(
# int(hashlib.sha256(img['image_id']).hexdigest(), 16) % sys.maxint)
# tmp.append(img)
# imgs = tmp
# create output json file
out = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': 'http://mscoco.org', 'version': '1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}, 'licenses': [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}], 'type': 'captions'}
out.update({'images': [], 'annotations': []})
cnt = 0
empty_cnt = 0
for i, img in enumerate(imgs):
if img['split'] == 'train':
continue
out['images'].append(
{'id': img.get('cocoid', img['imgid'])})
for j, s in enumerate(img['sentences']):
if len(s) == 0:
continue
s = ' '.join(s['tokens'])
out['annotations'].append(
{'image_id': out['images'][-1]['id'], 'caption': s, 'id': cnt})
cnt += 1
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', nargs='+', required=True,
help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json',
help='output json file')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent=2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_reference_json.py
|
"""
Precompute ngram counts of captions, to accelerate cider computation during training time.
"""
import os
import json
import argparse
from six.moves import cPickle
import captioning.utils.misc as utils
from collections import defaultdict
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD_scorer import CiderScorer
def get_doc_freq(refs, params):
tmp = CiderScorer(df_mode="corpus")
for ref in refs:
tmp.cook_append(None, ref)
tmp.compute_doc_freq()
return tmp.document_frequency, len(tmp.crefs)
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if (params['split'] == img['split']) or \
(params['split'] == 'train' and img['split'] == 'restval') or \
(params['split'] == 'all'):
#(params['split'] == 'val' and img['split'] == 'restval') or \
ref_words = []
ref_idxs = []
for sent in img['sentences']:
if hasattr(params, 'bpe'):
sent['tokens'] = params.bpe.segment(' '.join(sent['tokens'])).strip().split(' ')
tmp_tokens = sent['tokens'] + ['<eos>']
tmp_tokens = [_ if _ in wtoi else 'UNK' for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
ngram_words, count_refs = get_doc_freq(refs_words, params)
ngram_idxs, count_refs = get_doc_freq(refs_idxs, params)
print('count_refs:', count_refs)
return ngram_words, ngram_idxs, count_refs
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
dict_json = json.load(open(params['dict_json'], 'r'))
itow = dict_json['ix_to_word']
wtoi = {w:i for i,w in itow.items()}
# Load bpe
if 'bpe' in dict_json:
import tempfile
import codecs
codes_f = tempfile.NamedTemporaryFile(delete=False)
codes_f.close()
with open(codes_f.name, 'w') as f:
f.write(dict_json['bpe'])
with codecs.open(codes_f.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
params.bpe = bpe
imgs = imgs['images']
ngram_words, ngram_idxs, ref_len = build_dict(imgs, wtoi, params)
utils.pickle_dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','wb'))
utils.pickle_dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','wb'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', default='data/dataset_coco.json', help='input json file to process into hdf5')
parser.add_argument('--dict_json', default='data/cocotalk.json', help='output json file')
parser.add_argument('--output_pkl', default='data/coco-all', help='output pickle file')
parser.add_argument('--split', default='all', help='test, val, train, all')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_ngrams.py
|
import argparse
import h5py
import os
import numpy as np
import json
from tqdm import tqdm
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
if params['fc_input_dir'] is not None:
print('processing fc')
with h5py.File(params['fc_output']) as file_fc:
for i, img in enumerate(tqdm(imgs)):
npy_fc_path = os.path.join(
params['fc_input_dir'],
str(img['cocoid']) + '.npy')
d_set_fc = file_fc.create_dataset(
str(img['cocoid']), data=np.load(npy_fc_path))
file_fc.close()
if params['att_input_dir'] is not None:
print('processing att')
with h5py.File(params['att_output']) as file_att:
for i, img in enumerate(tqdm(imgs)):
npy_att_path = os.path.join(
params['att_input_dir'],
str(img['cocoid']) + '.npz')
d_set_att = file_att.create_dataset(
str(img['cocoid']),
data=np.load(npy_att_path)['feat'])
file_att.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--fc_output', default='data', help='output h5 filename for fc')
parser.add_argument('--att_output', default='data', help='output h5 file for att')
parser.add_argument('--fc_input_dir', default=None, help='input directory for numpy fc files')
parser.add_argument('--att_input_dir', default=None, help='input directory for numpy att files')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent=2))
main(params)
|
connect-caption-and-trace-main
|
scripts/dump_to_h5df.py
|
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from PIL import Image
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for img in imgs:
for sent in img['sentences']:
for w in sent['tokens']:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
# print some stats
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for w,n in counts.items() if n <= count_thr]
vocab = [w for w,n in counts.items() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words))
# lets look at the distribution of lengths as well
sent_lengths = {}
for img in imgs:
for sent in img['sentences']:
txt = sent['tokens']
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range(max_len+1):
print('%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len))
# lets now produce the final annotations
if bad_count > 0:
# additional special UNK token we will use below to map infrequent words to
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentences']:
txt = sent['tokens']
caption = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_captions'].append(caption)
return vocab
def encode_captions(imgs, params, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j,s in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k,w in enumerate(s):
if k < max_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123) # make reproducible
# create the vocab
vocab = build_vocab(imgs, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
L, label_start_ix, label_end_ix, label_length = encode_captions(imgs, params, wtoi)
# create output h5 file
N = len(imgs)
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = []
for i,img in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if 'filename' in img: jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename']) # copy it over, might need
if 'cocoid' in img:
jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful)
elif 'imgid' in img:
jimg['id'] = img['imgid']
if params['images_root'] != '':
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
jimg['width'], jimg['height'] = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json', help='output json file')
parser.add_argument('--output_h5', default='data', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_labels.py
|
import torch
import scipy.optimize
import numpy as np
m = 10
pred = torch.rand([10, m, 4])
label = torch.rand([10, m, 4])
def local_OT(D):
p = D.shape[1]; m = D.shape[2]
# construct the cx, ax=b
x = torch.rand([10,m*m])
A = torch.zeros([m+m,m*m])
b = torch.ones([m+m])
for i in range(p):
A[i, (i)*m:(i+1)*m] = 1
for i in range(m):
for j in range(p):
A[p+i, j*m+i] = 1
A_local = torch.zeros([m, m, m])
for i in range(m):
if i+2<=m-1:
A_local[i, i, i+2:] = 1
if i-2 >=0:
A_local[i, i, :i-1] = 1
A_local = A_local.view([m, m*m])
b_local = torch.zeros([m])
A = torch.cat([A, A_local], 0).numpy()
b = torch.cat([b, b_local], 0).numpy()
T_list = []
for i in range(D.shape[0]):
c = D[i].view(-1).detach().cpu().numpy()
sol = scipy.optimize.linprog(c, A_eq = A, b_eq = b, bounds=(0, None))
sol_x = torch.from_numpy(sol.x).view([p,m])
T_list.append(sol_x)
T = torch.stack(T_list, 0)
return (T>0.5).float() # binarize it
T = local_OT(torch.rand([10,20,20]))
print('finish')
|
connect-caption-and-trace-main
|
scripts/my_local_optimal_transport.py
|
# copy from https://github.com/Lyken17/Efficient-PyTorch/tools
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import os, sys
import os.path as osp
from PIL import Image
import six
import string
import lmdb
import pickle
import tqdm
import numpy as np
import argparse
import json
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
import csv
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'status']
class FolderLMDB(data.Dataset):
def __init__(self, db_path, fn_list=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=osp.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
if fn_list is not None:
self.length = len(fn_list)
self.keys = fn_list
else:
raise Error
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index].encode())
# load image
imgbuf = byteflow
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
if args.extension == '.npz':
feat = np.load(buf)['feat']
else:
feat = np.load(buf)
except Exception as e:
print(self.keys[index], e)
return None
return feat
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
def make_dataset(dir, extension):
images = []
dir = os.path.expanduser(dir)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, [extension]):
path = os.path.join(root, fname)
images.append(path)
return images
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def raw_npz_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npz_data = np.load(six.BytesIO(bin_data))['feat']
except Exception as e:
print(path)
npz_data = None
print(e)
return bin_data, npz_data
def raw_npy_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npy_data = np.load(six.BytesIO(bin_data))
except Exception as e:
print(path)
npy_data = None
print(e)
return bin_data, npy_data
class Folder(data.Dataset):
def __init__(self, root, loader, extension, fn_list=None):
super(Folder, self).__init__()
self.root = root
if fn_list:
samples = [os.path.join(root, str(_)+extension) for _ in fn_list]
else:
samples = make_dataset(self.root, extension)
self.loader = loader
self.extension = extension
self.samples = samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path = self.samples[index]
sample = self.loader(path)
return (path.split('/')[-1].split('.')[0],) + sample
def __len__(self):
return len(self.samples)
def folder2lmdb(dpath, fn_list, write_frequency=5000):
directory = osp.expanduser(osp.join(dpath))
print("Loading dataset from %s" % directory)
if args.extension == '.npz':
dataset = Folder(directory, loader=raw_npz_reader, extension='.npz',
fn_list=fn_list)
else:
dataset = Folder(directory, loader=raw_npy_reader, extension='.npy',
fn_list=fn_list)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
# lmdb_path = osp.join(dpath, "%s.lmdb" % (directory.split('/')[-1]))
lmdb_path = osp.join("%s.lmdb" % (directory))
isdir = os.path.isdir(lmdb_path)
print("Generate LMDB to %s" % lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
tsvfile = open(args.output_file, 'a')
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
names = []
all_keys = []
for idx, data in enumerate(tqdm.tqdm(data_loader)):
# print(type(data), data)
name, byte, npz = data[0]
if npz is not None:
txn.put(name.encode(), byte)
all_keys.append(name)
names.append({'image_id': name, 'status': str(npz is not None)})
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
print('writing')
txn.commit()
txn = db.begin(write=True)
# write in tsv
for name in names:
writer.writerow(name)
names = []
tsvfile.flush()
print('writing finished')
# write all keys
txn.put("keys".encode(), pickle.dumps(all_keys))
# finish iterating through dataset
txn.commit()
for name in names:
writer.writerow(name)
tsvfile.flush()
tsvfile.close()
print("Flushing database ...")
db.sync()
db.close()
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
# parser.add_argument('--json)
parser.add_argument('--input_json', default='./data/dataset_coco.json', type=str)
parser.add_argument('--output_file', default='.dump_cache.tsv', type=str)
parser.add_argument('--folder', default='./data/cocobu_att', type=str)
parser.add_argument('--extension', default='.npz', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
global args
args = parse_args()
args.output_file += args.folder.split('/')[-1]
if args.folder.find('/') > 0:
args.output_file = args.folder[:args.folder.rfind('/')+1]+args.output_file
print(args.output_file)
img_list = json.load(open(args.input_json, 'r'))['images']
fn_list = [str(_['cocoid']) for _ in img_list]
found_ids = set()
try:
with open(args.output_file, 'r') as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
if item['status'] == 'True':
found_ids.add(item['image_id'])
except:
pass
fn_list = [_ for _ in fn_list if _ not in found_ids]
folder2lmdb(args.folder, fn_list)
# Test existing.
found_ids = set()
with open(args.output_file, 'r') as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
if item['status'] == 'True':
found_ids.add(item['image_id'])
folder_dataset = FolderLMDB(args.folder+'.lmdb', list(found_ids))
data_loader = DataLoader(folder_dataset, num_workers=16, collate_fn=lambda x: x)
for data in tqdm.tqdm(data_loader):
assert data[0] is not None
|
connect-caption-and-trace-main
|
scripts/dump_to_lmdb.py
|
import numpy as np
import os
import h5py
import numpy as np
import jsonlines
import re
import json
# The first directory should lead to your feature files extracted by detectrons, and the box_only and feats_only are the new folders for saving bounding boxes and features (which will be used during training).
i = 0
for f in os.listdir('/mnt/m2/Datasets/ADE20k/full_images_feats/features/'):
i += 1
item = np.load('/mnt/m2/Datasets/ADE20k/full_images_feats/features/'+f)
id = f.split('.jpg')[0]
np.save('/mnt/m2/Datasets/ADE20k/full_images_feats/box_only/'+str(id), item['norm_bb'])
np.savez('/mnt/m2/Datasets/ADE20k/full_images_feats/feats_only/'+str(id), item['box_feats'])
if i % 1000 == 0:
print('Processing #', i)
print('finish!')
|
connect-caption-and-trace-main
|
scripts/prepare_feats_boxes_from_npz.py
|
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.lua
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from PIL import Image
import codecs
import tempfile
from subword_nmt import learn_bpe, apply_bpe
# python scripts/build_bpe_subword_nmt.py --input_json data/dataset_coco.json --output_json data/cocotalkbpe.json --output_h5 data/cocotalkbpe
def build_vocab(imgs, params):
# count up the number of words
captions = []
for img in imgs:
for sent in img['sentences']:
captions.append(' '.join(sent['tokens']))
captions='\n'.join(captions)
all_captions = tempfile.NamedTemporaryFile(delete=False)
all_captions.close()
with open(all_captions.name, 'w') as txt_file:
txt_file.write(captions)
#
codecs_output = tempfile.NamedTemporaryFile(delete=False)
codecs_output.close()
with codecs.open(codecs_output.name, 'w', encoding='UTF-8') as output:
learn_bpe.learn_bpe(codecs.open(all_captions.name, encoding='UTF-8'), output, params['symbol_count'])
with codecs.open(codecs_output.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8')
for _, img in enumerate(imgs):
img['final_captions'] = []
for sent in img['sentences']:
txt = ' '.join(sent['tokens'])
txt = bpe.segment(txt).strip()
img['final_captions'].append(txt.split(' '))
tmpout.write(txt)
tmpout.write('\n')
if _ < 20:
print(txt)
tmpout.close()
tmpin = codecs.open(tmp.name, encoding='UTF-8')
vocab = learn_bpe.get_vocabulary(tmpin)
vocab = sorted(vocab.keys(), key=lambda x: vocab[x], reverse=True)
# Always insert UNK
print('inserting the special UNK token')
vocab.append('UNK')
print('Vocab size:', len(vocab))
os.remove(all_captions.name)
with open(codecs_output.name, 'r') as codes:
bpe = codes.read()
os.remove(codecs_output.name)
os.remove(tmp.name)
return vocab, bpe
def encode_captions(imgs, params, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j,s in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k,w in enumerate(s):
if k < max_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123) # make reproducible
# create the vocab
vocab, bpe = build_vocab(imgs, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
L, label_start_ix, label_end_ix, label_length = encode_captions(imgs, params, wtoi)
# create output h5 file
N = len(imgs)
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = []
out['bpe'] = bpe
for i,img in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if 'filename' in img: jimg['file_path'] = os.path.join(img['filepath'], img['filename']) # copy it over, might need
if 'cocoid' in img: jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful)
if params['images_root'] != '':
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
jimg['width'], jimg['height'] = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json', help='output json file')
parser.add_argument('--output_h5', default='data', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--symbol_count', default=10000, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/build_bpe_subword_nmt.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is the main script used for training Classy Vision jobs.
This can be used for training on your local machine, using CPU or GPU, and
for distributed training. This script also supports Tensorboard, Visdom and
checkpointing.
Example:
For training locally, simply specify a configuration file and whether
to use CPU or GPU:
$ ./classy_train.py --device gpu --config configs/my_config.json
For distributed training, this can be invoked via
:func:`torch.distributed.launch`. For instance
$ python -m torch.distributed.launch \
--nnodes=1 \
--nproc_per_node=1 \
--master_addr=localhost \
--master_port=29500 \
--use_env \
classy_train.py \
--config=configs/resnet50_synthetic_image_classy_config.json \
--log_freq=100
For other use cases, try
$ ./classy_train.py --help
"""
import logging
import os
from datetime import datetime
from pathlib import Path
import torch
from classy_vision.generic.distributed_util import get_rank, get_world_size
from classy_vision.generic.opts import check_generic_args, parse_train_arguments
from classy_vision.generic.registry_utils import import_all_packages_from_directory
from classy_vision.generic.util import load_json
from classy_vision.hooks import (
CheckpointHook,
LossLrMeterLoggingHook,
ModelComplexityHook,
ProfilerHook,
ProgressBarHook,
TensorboardPlotHook,
VisdomHook,
)
from tasks.biasamp_classification_task import FineTuningTask, build_task
from classy_vision.trainer import DistributedTrainer, LocalTrainer
from torchvision import set_image_backend, set_video_backend
from torch.nn.modules.loss import CrossEntropyLoss
from torch.distributed.elastic.multiprocessing.errors import record
try:
import hydra
import omegaconf
hydra_available = True
except ImportError:
hydra_available = False
@record
def main(args, config):
# Global flags
torch.manual_seed(0)
set_image_backend(args.image_backend)
set_video_backend(args.video_backend)
task = build_task(config)
# Load checkpoint, if available.
if args.checkpoint_load_path:
task.set_checkpoint(args.checkpoint_load_path)
# Load a checkpoint contraining a pre-trained model. This is how we
# implement fine-tuning of existing models.
if args.pretrained_checkpoint_path:
assert isinstance(
task, FineTuningTask
), "Can only use a pretrained checkpoint for fine tuning tasks"
task.set_pretrained_checkpoint(args.pretrained_checkpoint_path)
# Configure hooks to do tensorboard logging, checkpoints and so on.
# `configure_hooks` adds default hooks, while extra hooks can be specified
# in config file and stored in `task.hooks`. Here, we merge them when we
# set the final hooks of the task.
task.set_hooks(configure_hooks(args, config) + task.hooks)
# LocalTrainer is used for a single replica. DistributedTrainer will setup
# training to use PyTorch's DistributedDataParallel.
trainer_class = {"none": LocalTrainer, "ddp": DistributedTrainer}[
args.distributed_backend
]
trainer = trainer_class()
logging.info(
f"Starting training on rank {get_rank()} worker. "
f"World size is {get_world_size()}"
)
# That's it! When this call returns, training is done.
trainer.train(task)
output_folder = Path(args.checkpoint_folder).resolve()
logging.info("Training successful!")
logging.info(f'Results of this training run are available at: "{output_folder}"')
def configure_hooks(args, config):
hooks = [LossLrMeterLoggingHook(args.log_freq),
ModelComplexityHook()]
# Make a folder to store checkpoints and tensorboard logging outputs
suffix = datetime.now().isoformat()
base_folder = Path(__file__).parent / f"hold_output/output_{suffix}"
print('checkpoint folder: ' + args.checkpoint_folder)
if args.checkpoint_folder != "":
base_folder = Path(args.checkpoint_folder)
args.checkpoint_folder = base_folder / "checkpoints"
os.makedirs(args.checkpoint_folder, exist_ok=True)
args.checkpoint_folder = str(args.checkpoint_folder)
logging.info(f"Logging outputs to {base_folder}")
logging.info(f"Logging checkpoints to {args.checkpoint_folder}")
if not args.skip_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter
os.makedirs(Path(base_folder) / "tensorboard", exist_ok=True)
tb_writer = SummaryWriter(log_dir=Path(base_folder) / "tensorboard")
hooks.append(TensorboardPlotHook(tb_writer))
except ImportError:
logging.warning("tensorboard not installed, skipping tensorboard hooks")
args_dict = vars(args)
args_dict["config"] = config
print('checkpoint folder: ' + args.checkpoint_folder)
hooks.append(
CheckpointHook(
args.checkpoint_folder, args_dict, checkpoint_period=args.checkpoint_period
)
)
if args.profiler:
hooks.append(ProfilerHook())
if args.show_progress:
hooks.append(ProgressBarHook())
if args.visdom_server != "":
hooks.append(VisdomHook(args.visdom_server, args.visdom_port))
return hooks
if hydra_available:
@hydra.main(config_path="hydra_configs", config_name="args")
def hydra_main(cfg):
args = cfg
check_generic_args(cfg)
config = omegaconf.OmegaConf.to_container(cfg.config)
main(args, config)
# run all the things:
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.info("Classy Vision's default training script.")
# This imports all modules in the same directory as classy_train.py
# Because of the way Classy Vision's registration decorators work,
# importing a module has a side effect of registering it with Classy
# Vision. This means you can give classy_train.py a config referencing your
# custom module (e.g. my_dataset) and it'll actually know how to
# instantiate it.
file_root = Path(__file__).parent
import_all_packages_from_directory(file_root)
if hydra_available:
hydra_main()
else:
args = parse_train_arguments()
config = load_json(args.config_file)
main(args, config)
|
cv_bias_amplification-main
|
my-project-release/my-project/classy_train.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the losses/ directory
import_all_modules(FILE_ROOT, "losses")
|
cv_bias_amplification-main
|
my-project-release/my-project/losses/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
from classy_vision.losses import ClassyLoss, register_loss
@register_loss("one_hot_binary_ce_loss")
class OneHotBinaryCELoss(ClassyLoss):
def forward(self, input, target):
labels = F.one_hot(target, num_classes=10).float()
return F.binary_cross_entropy(input, labels)
@classmethod
def from_config(cls, config):
# We don't need anything from the config
return cls()
|
cv_bias_amplification-main
|
my-project-release/my-project/losses/one_hot_binary_ce_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import os
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset, build_dataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
Timer,
copy_model_to_gpu,
get_torch_version,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
update_classy_state,
)
from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks
from classy_vision.losses import ClassyLoss, build_loss
from classy_vision.meters import ClassyMeter, build_meters
from classy_vision.models import ClassyModel, build_model
from classy_vision.optim import (
ClassyOptimizer,
build_optimizer,
build_optimizer_schedulers,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task, build_task
from classy_vision.tasks.classy_task import ClassyTask
from classy_vision.tasks.fine_tuning_task import FineTuningTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("biasamp_classification_task")
class BiasAmpClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.checkpoint_load_strict = True
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "fork"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def set_checkpoint_load_strict(self, checkpoint_load_strict: bool):
"""Sets checkpoint on task.
Args:
checkpoint_load_strict: Whether to use load_strict when copying model weights
"""
self.checkpoint_load_strict = checkpoint_load_strict
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"],
num_classes=config["mixup"].get("num_classes"),
cutmix_alpha=config["mixup"].get("cutmix_alpha", 0),
cutmix_minmax=config["mixup"].get("cutmix_minmax"),
mix_prob=config["mixup"].get("mix_prob", 1.0),
switch_prob=config["mixup"].get("switch_prob", 0.5),
mode=config["mixup"].get("mode", "batch"),
label_smoothing=config["mixup"].get("label_smoothing", 0.0),
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes"""
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
self.train = False if self.test_only else state["train"]
self.base_model.set_classy_state(state["base_model"])
if self.test_only:
# if we're only testing, just need the state of the model to be updated
return
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end(self.phase_type)
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end(f"{self.phase_type}_total")
if hasattr(self.datasets[self.phase_type], "on_phase_end"):
self.datasets[self.phase_type].on_phase_end()
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
start_time = (
self.phase_start_time_train
if tag == self.phase_type
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{"tag": tag, "phase_idx": self.train_phase_idx, "im_per_sec": im_per_sec}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
|
cv_bias_amplification-main
|
my-project-release/my-project/tasks/biasamp_classification_task.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from classy_vision.tasks.classy_task import ClassyTask
from classy_vision.tasks.fine_tuning_task import FineTuningTask
FILE_ROOT = Path(__file__).parent
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
TASK_REGISTRY_TB = {}
TASK_CLASS_NAMES_TB = {}
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config["name"]].from_config(config)
return task
def register_task(name):
"""Registers a ClassyTask subclass.
This decorator allows Classy Vision to instantiate a subclass of ClassyTask
from a configuration file, even if the class itself is not part of the
Classy Vision framework. To use it, apply this decorator to a ClassyTask
subclass, like this:
.. code-block:: python
@register_task('my_task')
class MyTask(ClassyTask):
...
To instantiate a task from a configuration file, see :func:`build_task`."""
def register_task_cls(cls):
if name in TASK_REGISTRY:
msg = "Cannot register duplicate task ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, TASK_REGISTRY_TB[name]))
if not issubclass(cls, ClassyTask):
raise ValueError(
"Task ({}: {}) must extend ClassyTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
msg = (
"Cannot register task with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, TASK_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
TASK_REGISTRY_TB[name] = tb
TASK_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_task_cls
from .biasamp_classification_task import BiasAmpClassificationTask # isort:skip
# from .fine_tuning_task import FineTuningTask # isort:skip
__all__ = [
"ClassyTask",
# "FineTuningTask",
"build_task",
"register_task",
"BiasAmpClassificationTask",
]
# automatically import any Python files in the tasks/ directory
import_all_modules(FILE_ROOT, "tasks")
|
cv_bias_amplification-main
|
my-project-release/my-project/tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
from torchvision import datasets, transforms
import classy_vision.generic.util as util
import torchvision
import math
import numpy as np
import json
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms, ClassyTransform, register_transform
from PIL import Image
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar100_random_sample_train")
class CIFAR100RandomSampleTrain(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
dataset_size,
p,
seed,
class_mapping):
# Grab original dataset
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train='train')
# Instantiate storage for task images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
self.mapped_classes = [] # will become a list of mapped classes for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in range(0,len(class_mapping))} # key=task-class, value(to be)=original idx
# Store indices for task images
for i in range(len(dataset)):
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
# Shuffle task images for selecting subset
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_tasks_breakdown.items():
np.random.shuffle(valid_cifar_idx_tasks_breakdown[key])
class_size = int(np.rint(500*dataset_size))
# Collect task images and class mappings for CIFAR100 subset
for key, _ in valid_cifar_idx_tasks_breakdown.items():
self.valid_cifar_idx_tasks.extend(valid_cifar_idx_tasks_breakdown[key][0:class_size])
self.mapped_classes.extend([class_mapping[key]]*class_size)
# Assign attribute based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
for key, _ in valid_cifar_idx_tasks_breakdown.items():
hold = [1] * (int)(np.round(class_size * p[class_mapping[key]], 0)) + [0] * (int)(np.round(class_size * (1.0-p[class_mapping[key]]), 0))
np.random.shuffle(hold)
attr_breakdown[key] = hold
# Assign overlay image based on attribute-class assignment
self.valid_attrs = [None]*class_size*100 # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# this assumes that the dataset ordering does not change between iterations.
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = 'b' if attr else 'a'
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.mapped_classes)
assert num_samples == len(self.valid_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
img = sample[0]
mapped_label = self.mapped_classes[idx]
attribute = self.valid_attrs[idx]
# perform overlay transform
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
img = 255 - img if attribute == 'b' else img
sample[0] = Image.fromarray(img.astype(img_dtype))
sample.append(idx)
sample.append(mapped_label)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
dataset_size=config["dataset_size"],
p=config["p"],
seed=config["seed"],
class_mapping=config["class_mapping"]
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar100_random_sample.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from PIL import Image
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import json
from classy_vision.dataset.transforms import build_transforms, ClassyTransform, register_transform
from classy_vision.dataset import build_dataset
import classy_vision.generic.util as util
from collections.abc import Iterable
import cv2
import random
import time
import logging
@register_transform("invert")
class Invert(ClassyTransform):
"""With probablity p_class, invert the image.
Args:
p (dict <int: float>): Probabilities for each class.
seed (int): Seed used for replication.
"""
def __init__(self, p, seed):
self.p = p
self.seed = seed
def __call__(self, sample):
"""
Args:
sample (tuple): Image to be altered and its class
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
if self.seed >= 0:
with util.torch_seed(self.seed + sample_id):
with util.numpy_seed(self.seed + sample_id):
attribute = 'b' if np.random.rand() < self.p[mapped_label] else 'a'
else:
attribute = 'b' if np.random.rand() < self.p[mapped_label] else 'a'
img = 255 - img if attribute == 'b' else img
img = img.astype(img_dtype)
return (Image.fromarray(img), original_label, sample_id, mapped_label, attribute)
def __repr__(self):
return self.__class__.__name__
@register_transform("invert_exact")
class InvertExact(ClassyTransform):
"""Invert the image according to the provided inversion list.
Args:
invert (list <int>): Whether or not the image at index i should be inverted
"""
def __init__(self, invert):
self.invert = invert
def __call__(self, sample):
"""
Args:
sample (tuple): Image to be altered and its class
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
attribute = 'b' if self.invert[sample_id] else 'a'
img = 255 - img if attribute == 'b' else img
img = img.astype(img_dtype)
return (Image.fromarray(img), original_label, sample_id, mapped_label, attribute)
def __repr__(self):
return self.__class__.__name__
@register_transform("assign_class")
class AssignClass(ClassyTransform):
"""Re-assign each image class to a given class.
Args:
classes (dict <int: int>): New class assignments, with current class:new class
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, sample):
"""
Args:
sample (tuple): Class to be altered and its image.
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
label = sample[1]
sample_id = sample[2]
return (img, label, sample_id, self.classes[label])
def __repr__(self):
return self.__class__.__name__
@register_transform("swap_task_attr")
class SwapTaskAttr(ClassyTransform):
"""Switch the task and attribute.
Converts the original attribute to a numeric form.
"""
def __call__(self, sample):
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
attribute = sample[4]
return (img, original_label, sample_id, ord(attribute)-97, mapped_label)
def __repr__(self):
return self.__class__.__name__
@register_transform("assign_class_str")
class AssignClassStr(ClassyTransform):
"""Re-assign the image to a given class.
Args:
classes (dict <int: int>): New class assignments, with current class:new class
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, sample):
"""
Args:
sample (tuple): Class to be altered and its image.
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
label = sample[1]
sample_id = sample[2]
attribute = sample[3]
return (img, label, sample_id, attribute, self.classes[str(label)])
def __repr__(self):
return self.__class__.__name__
@register_transform("rand_assign_class_rand_invert")
class RandAssignClassRandInvert(ClassyTransform):
"""Helper function to make configs easier to write. Warning: Requires
dataset to be registered before transform is called. Requires dataset
to be cheap to do one pass over to create maps when transform is
created
Randomly assigns the original class targets to a new, smaller, set
of class targets. The original class set will be evenly divided
among the new classes Then inverts images with probability p based
on the probability map provided.
Args:
num_new_classes (int): New set of classes
invert_probs (array[float]): Inversion probability for each class
dataset_name (string): Already registered dataset for retrieving class info
exact (bool): Exact number of inversions (i.e. class_1: 0.5 => exactly half of of class_1 images will be inverted vs flipping a coin for each image)
assignment_seed (optional int): This is the seed used for the random generation ... must be same if you want the class mapping to match for test set
inversion_seed (optional int): This is the seed for actually inverting each image. If None, uses time.
"""
def __init__(self, num_new_classes, invert_probs, dataset_config, exact=True, assignment_seed=0, inversion_seed=0):
# assertions
assert len(invert_probs) == num_new_classes, "inversion probabilities must match the number of final classes"
assert assignment_seed is not None, "Assignment seed cannot be None otherwise it will be impossible to track the mapping"
for i in range(0, num_new_classes):
assert invert_probs[i] >= 0.0 and invert_probs[i] <= 1.0, "Valid probabilities must be provided"
if inversion_seed is None:
inversion_seed = int(time.time())
# For most datasets, only the name is required, we set batchsize, shuffle, transforms, num_workers
dataset_config["batchsize_per_replica"] = 1
dataset_config["shuffle"] = False
dataset_config["transforms"] = []
dataset_config["num_workers"] = 0
# Get target mapping directly from dataset
dataset = build_dataset(dataset_config)
index_to_class_mapping = {}
target_counts = {}
for i in range(0, len(dataset)):
sample = dataset[i]
index_to_class_mapping[i] = {"original_target": sample[1]}
if sample[1] not in target_counts:
target_counts[sample[1]] = 0
target_counts[sample[1]] += 1
target_list = list(target_counts.keys())
target_list.sort()
new_target_list = []
quotient = len(target_list) // num_new_classes
remainder = len(target_list) % num_new_classes
# Create correct number of new class instances
for i in range(0, num_new_classes):
num = quotient
if i < remainder:
num += 1
new_target_list += [i for j in range(0, num)]
with util.numpy_seed(assignment_seed):
np.random.shuffle(new_target_list)
class_mapping = dict(zip(target_list, new_target_list))
logging.info("Classy mapping: {}".format(str(class_mapping)))
self.random_assign = AssignClass(class_mapping)
# Now that we have our random assignment, need our exact list
inversion_counts = {}
for i in range(0, len(target_counts)):
if class_mapping[i] not in inversion_counts:
inversion_counts[class_mapping[i]] = 0
inversion_counts[class_mapping[i]] += target_counts[i]
target_to_inversion_lists = {}
target_to_inversion_iterators = []
for i in range(0, len(invert_probs)):
prob = invert_probs[i]
count = inversion_counts[i]
target_to_inversion_lists[i] = [0] * round(count * (1 - prob)) + [1] * round(count * prob)
with util.numpy_seed(inversion_seed):
np.random.shuffle(target_to_inversion_lists[i])
target_to_inversion_iterators.append(iter(target_to_inversion_lists[i]))
inversions = [None] * len(dataset)
for i in range(0, len(dataset)):
it = target_to_inversion_iterators[class_mapping[index_to_class_mapping[i]["original_target"]]]
inversions[i] = next(it)
logging.info("Inversions: {}".format(str(inversions)))
self.exact_invert = InvertExact(inversions)
def __call__(self, sample):
new_sample = self.random_assign(sample)
new_sample = self.exact_invert(new_sample)
return new_sample
def __repr__(self):
return self.__class__.__name__
@register_transform("PadToSize")
class PadToSize(ClassyTransform):
"""
Pad the input PIL Image so that it has the specified size. The image is returned
unchanged if at least one dimension of the original image is larger than the
corresponding dimension in the requested size.
Args:
size (sequence): Output size (height, width)
border_type (string): The type cv2 border type to use.
pad_both_sides (bool): True: add padding to both sides to keep the image
in the centre; False: add padding to the right and/or bottom.
"""
def __init__(
self,
size,
border_type="BORDER_CONSTANT",
pad_both_sides=True,
):
self.size = size
self.pad_both_sides = pad_both_sides
self.border_type = self._getBorderType(border_type)
assert (
isinstance(size, Iterable) and len(size) == 2
), "Got inappropriate size arg: {}. Expected a sequence (h, w)".format(
type(size)
)
def _pad(self, img: Image.Image) -> Image.Image:
padding = self._get_padding(img)
assert len(padding) == 2
padding_tlbr = self._get_padding_tlbr(padding)
if (
padding_tlbr[0] > 0
or padding_tlbr[1] > 0
or padding_tlbr[2] > 0
or padding_tlbr[3] > 0
):
padded = cv2.copyMakeBorder(
cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR),
padding_tlbr[0],
padding_tlbr[2],
padding_tlbr[1],
padding_tlbr[3],
self.border_type,
value=[0, 0, 0], # black
)
result_img = Image.fromarray(cv2.cvtColor(padded, cv2.COLOR_BGR2RGB))
return result_img
def _getBorderType(self, border_type: str) -> int:
if border_type == "BORDER_CONSTANT":
return cv2.BORDER_CONSTANT
elif border_type == "BORDER_REFLECT":
return cv2.BORDER_REFLECT
elif border_type == "BORDER_REFLECT_101":
return cv2.BORDER_REFLECT_101
elif border_type == "BORDER_REPLICATE":
return cv2.BORDER_REPLICATE
elif border_type == "BORDER_WRAP":
return cv2.BORDER_WRAP
else:
assert f'unsupported border type "{border_type}"'
def _get_padding(self, img: Image.Image) -> Iterable:
img_width, img_height = img.size
return (self.size[0] - img_height, self.size[1] - img_width)
def _get_padding_tlbr(self, padding: Iterable) -> Iterable:
top_padding = padding[0] // 2 if self.pad_both_sides else 0
left_padding = padding[1] // 2 if self.pad_both_sides else 0
bottom_padding = padding[0] - top_padding
right_padding = padding[1] - left_padding
return [top_padding, left_padding, bottom_padding, right_padding]
def __call__(self, img: Image.Image) -> Image.Image:
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image with dimensions (h, w).
"""
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img_width, img_height = img.size
if img_height > self.size[0] or img_width > self.size[1]:
return img
else:
return self._pad(img)
def __repr__(self):
return (
self.__class__.__name__
+ "(size={0}, border_type={1}, pad_both_sides={2})".format(
self.size,
self.border_type,
repr(self.pad_both_sides),
)
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/inversion_transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import CIFAR100
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
@register_dataset("cifar100_train")
class CIFAR100Train(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train='train')
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
@register_dataset("cifar100_test")
class MyClassyDatasetCIFAR100Test(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train=False)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar100.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the datasets/ directory
import_all_modules(FILE_ROOT, "datasets")
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
@register_dataset("fashionmnist_train")
class FashionMNISTTrain(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.FashionMNIST(root='./', download=True, train='train')
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
@register_dataset("fashionmnist_test")
class FashionMNISTTest(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.FashionMNIST(root='./', download=True, train=False)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/fashionmnist.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
import numpy as np
import classy_vision.generic.util as util
from PIL import Image
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar10_train_overlay")
class CIFAR10TrainOverlay(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
task_classes,
attr_classes,
eta,
epsilon,
seed):
# Set up necessary variables
assert len(task_classes) == 2 # assume class size = 2 for now
assert len(attr_classes) == 2 # assume class size = 2 for now
p = [np.round(0.5 + (epsilon * 0.01), 2), np.round(0.5 - (epsilon * 0.01), 2)]
self.eta = eta
# Grab original dataset
dataset = torchvision.datasets.CIFAR10(root='./', download=True, train='train')
# Instantiate storage for task and attribute images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in task_classes} # key=task-class, value(to be)=original idx
valid_cifar_idx_attrs_breakdown = {i:[] for i in attr_classes} # key=attr-class, value(to be)=original idx
# Store indices for task and attribute images
for i in range(len(dataset)):
if dataset[i][1] in task_classes:
self.valid_cifar_idx_tasks.append(i)
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
if dataset[i][1] in attr_classes:
valid_cifar_idx_attrs_breakdown[dataset[i][1]].append(i)
# Shuffle attribute images for random pairing
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_attrs_breakdown.items():
np.random.shuffle(valid_cifar_idx_attrs_breakdown[key])
# Assign attribute-class based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
for t, t_i in zip(task_classes, range(len(task_classes))):
hold = [attr_classes[0]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * p[t_i], 0)) + [attr_classes[1]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * (1.0-p[t_i]), 0))
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
np.random.shuffle(hold)
attr_breakdown[t] = hold
# Assign overlay image based on attribute-class assignment
self.valid_cifar_idx_attrs= [None]*num_samples # will become a list of original idxs for the attr-class subset, aligned with corresponding idxs in task_idx_list
self.valid_attrs = [None]*num_samples # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
attr_pointers = {attr:0 for attr in attr_classes} # used to parse self.attr_idx_subset for exact assignment
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# images at a given `idx` for both attr_idx_list and task_idx_list should be overlayed on each other.
# we use the pointers to ensure that a unique attr-class image is used for each task-class image.
# this assumes that the dataset ordering does not change between iterations.
self.valid_cifar_idx_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = valid_cifar_idx_attrs_breakdown[attr][attr_pointers[attr]]
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = attr
attr_pointers[attr] += 1
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.valid_cifar_idx_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
overlay_sample = list(self.dataset[self.valid_cifar_idx_attrs[idx]])
attribute = self.valid_attrs[idx]
sample.append(idx)
# perform overlay transform
img = sample[0]
overlay_img = overlay_sample[0]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
sample[0] = Image.blend(img, overlay_img, self.eta*0.01)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
task_classes=config["task_classes"],
attr_classes = config["attr_classes"],
eta=config["eta"],
epsilon=config["epsilon"],
seed=config["seed"]
)
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar10_test_overlay")
class CIFAR10TestOverlay(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
task_classes,
attr_classes,
eta,
epsilon,
seed):
# Set up necessary variables
assert len(task_classes) == 2 # assume class size = 2 for now
assert len(attr_classes) == 2 # assume class size = 2 for now
p = [np.round(0.5 + (epsilon * 0.01), 2), np.round(0.5 - (epsilon * 0.01), 2)]
self.eta = eta
# Grab original dataset
dataset = torchvision.datasets.CIFAR10(root='./', download=True, train=False)
# Instantiate storage for task and attribute images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in task_classes} # key=task-class, value(to be)=original idx
valid_cifar_idx_attrs_breakdown = {i:[] for i in attr_classes} # key=attr-class, value(to be)=original idx
# Store indices for task and attribute images
for i in range(len(dataset)):
if dataset[i][1] in task_classes:
self.valid_cifar_idx_tasks.append(i)
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
if dataset[i][1] in attr_classes:
valid_cifar_idx_attrs_breakdown[dataset[i][1]].append(i)
# Shuffle attribute images for random pairing
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_attrs_breakdown.items():
np.random.shuffle(valid_cifar_idx_attrs_breakdown[key])
# Assign attribute-class based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
for t, t_i in zip(task_classes, range(len(task_classes))):
hold = [attr_classes[0]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * p[t_i], 0)) + [attr_classes[1]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * (1.0-p[t_i]), 0))
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
np.random.shuffle(hold)
attr_breakdown[t] = hold
# Assign overlay image based on attribute-class assignment
self.valid_cifar_idx_attrs= [None]*num_samples # will become a list of original idxs for the attr-class subset, aligned with corresponding idxs in task_idx_list
self.valid_attrs = [None]*num_samples # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
attr_pointers = {attr:0 for attr in attr_classes} # used to parse self.attr_idx_subset for exact assignment
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# images at a given `idx` for both attr_idx_list and task_idx_list should be overlayed on each other.
# we use the pointers to ensure that a unique attr-class image is used for each task-class image.
# this assumes that the dataset ordering does not change between iterations.
self.valid_cifar_idx_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = valid_cifar_idx_attrs_breakdown[attr][attr_pointers[attr]]
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = attr
attr_pointers[attr] += 1
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.valid_cifar_idx_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
overlay_sample = list(self.dataset[self.valid_cifar_idx_attrs[idx]])
attribute = self.valid_attrs[idx]
sample.append(idx)
# perform overlay transform
img = sample[0]
overlay_img = overlay_sample[0]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
sample[0] = Image.blend(img, overlay_img, self.eta*0.01)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
task_classes=config["task_classes"],
attr_classes = config["attr_classes"],
eta=config["eta"],
epsilon=config["epsilon"],
seed=config["seed"]
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar10_overlay.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the models/ directory
import_all_modules(FILE_ROOT, "models")
|
cv_bias_amplification-main
|
my-project-release/my-project/models/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from torchvision.models.resnet import Bottleneck, BasicBlock, conv1x1
import torch.nn as nn
import torchvision.models as models
from classy_vision.models import ClassyModel, register_model
import math
@register_model("custom_resnet")
class CustomResNet(ClassyModel):
def __init__(
self,
channels: int,
num_classes: int,
layers: List[int],
block: Optional[Type[Union[BasicBlock, Bottleneck]]] = None,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
block = BasicBlock
self.inplanes = 64
super(CustomResNet, self).__init__()
self.conv1 = nn.Conv2d(channels, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@classmethod
def from_config(cls, config):
# This method takes a configuration dictionary
# and returns an instance of the class. In this case,
# we'll let the number of classes be configurable.
return cls(
channels=config["channels"],
num_classes=config["num_classes"],
layers=config["layers"]
)
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
cv_bias_amplification-main
|
my-project-release/my-project/models/custom_resnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
from models.custom_resnet import CustomResNet
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "fashionmnist" # used to store experiment configs and results
IDS_TEST = "./test_ids.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.2860],
"std": [0.3530]
}
}
TEST_DATASET = "fashionmnist_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1, 1, 0]
]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/fashionmnist/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Run from within the /scripts folder.
import json
import numpy as np
import pandas as pd
import classy_vision.generic.util as util
import random
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1, 1, 0]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 55, 5)
EXPERIMENT_NAME = "fashionmnist" # used to store experiment configs and results
IDS_TEST = "./test_ids.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 60_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['num_classes'] = num_classes
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/fashionmnist/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100_width" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_width/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_width" # used to store experiment configs and results
WIDTHS = [4, 8, 16, 32, 64]
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnet110_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
counter_ids = []
for width in WIDTHS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['init_planes'] = width
data['model']['heads'][0]['in_plane'] = 4 * width
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(width)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_width/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
if counter == 1001:
continue
if NUM_CHECKPOINTS == None:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
for checkpoint in NUM_CHECKPOINTS:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/model_phase-' + str(checkpoint) + '_end.torch'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = checkpoint
)
if (checkpoint + 2) % 100 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100/scripts/training_measurements_checkpoints.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100" # used to store experiment configs and results
DEPTHS = {110: [[18, 18, 18], "block2-17"],
92: [[15, 15, 15], "block2-14"],
74: [[12, 12, 12], "block2-11"],
56: [[9, 9, 9], "block2-8"],
50: [[8, 8, 8], "block2-7"],
44: [[7, 7, 7], "block2-6"],
38: [[6, 6, 6], "block2-5"],
32: [[5, 5, 5], "block2-4"],
32: [[5, 5, 5], "block2-4"],
26: [[4, 4, 4], "block2-3"],
20: [[3, 3, 3], "block2-2"],
14: [[2, 2, 2], "block2-1"],
8: [[1, 1, 1], "block2-0"],}
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnetx_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for depth in DEPTHS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['num_blocks'] = DEPTHS[depth][0]
data['model']['heads'][0]['fork_block'] = DEPTHS[depth][1]
data['model']['num_classes'] = num_classes
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid CONFIG_PATH")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(depth)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar10_overlay" # used to store experiment configs and results
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 2_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"assign_class_str": {},
"normalize": {
"mean": [
0.4914,
0.4822,
0.4465],
"std": [
0.247,
0.243,
0.261]
}
}
TEST_DATASET = "cifar10_test_overlay"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [{'task': [5, 9], 'attr': [2, 4]},
{'task': [8, 7], 'attr': [3, 2]},
{'task': [7, 1], 'attr': [5, 2]},
{'task': [4, 0], 'attr': [9, 6]},
{'task': [2, 4], 'attr': [6, 3]},
{'task': [6, 8], 'attr': [7, 4]},
{'task': [8, 5], 'attr': [4, 1]},
{'task': [3, 4], 'attr': [5, 6]},
{'task': [1, 8], 'attr': [0, 2]},
{'task': [3, 5], 'attr': [2, 6]},
{'task': [5, 9], 'attr': [3, 4]},
{'task': [3, 7], 'attr': [8, 1]},
{'task': [0, 6], 'attr': [8, 1]},
{'task': [3, 1], 'attr': [0, 4]},
{'task': [6, 7], 'attr': [2, 5]},
{'task': [6, 9], 'attr': [2, 0]},
{'task': [5, 3], 'attr': [6, 7]},
{'task': [9, 2], 'attr': [1, 8]},
{'task': [3, 8], 'attr': [9, 2]},
{'task': [8, 0], 'attr': [4, 5]}]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
a = int(a)
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class_str" in TRANSFORMS:
transforms.append({"name": "assign_class_str", "classes": {str(class_assignment["task"][0]): 0, str(class_assignment["task"][1]): 1}})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": [
"input",
"original_target",
"sample_id",
"attribute",
"target"
]
},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8,
"num_samples": 2_000,
"task_classes": class_assignment["task"],
"attr_classes": class_assignment["attr"],
"eta": eta,
"epsilon": epsilon,
"seed": test_seed
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {class_assignment["attr"][0]: 0, class_assignment["attr"][1]: 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
etas.append(eta)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
etas = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
eta = row['eta']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
if NUM_CHECKPOINTS == None:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar10_overlay/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [{'task': [5, 9], 'attr': [2, 4]},
{'task': [8, 7], 'attr': [3, 2]},
{'task': [7, 1], 'attr': [5, 2]},
{'task': [4, 0], 'attr': [9, 6]},
{'task': [2, 4], 'attr': [6, 3]},
{'task': [6, 8], 'attr': [7, 4]},
{'task': [8, 5], 'attr': [4, 1]},
{'task': [3, 4], 'attr': [5, 6]},
{'task': [1, 8], 'attr': [0, 2]},
{'task': [3, 5], 'attr': [2, 6]},
{'task': [5, 9], 'attr': [3, 4]},
{'task': [3, 7], 'attr': [8, 1]},
{'task': [0, 6], 'attr': [8, 1]},
{'task': [3, 1], 'attr': [0, 4]},
{'task': [6, 7], 'attr': [2, 5]},
{'task': [6, 9], 'attr': [2, 0]},
{'task': [5, 3], 'attr': [6, 7]},
{'task': [9, 2], 'attr': [1, 8]},
{'task': [3, 8], 'attr': [9, 2]},
{'task': [8, 0], 'attr': [4, 5]}]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(20, 50, 10)
ETAS = range(0, 101, 10)
EXPERIMENT_NAME = "cifar10_overlay" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar10.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar10.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar10_resnet110_gpu1_lrmultistep.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 2_000
NUM_SAMPLES_TRAIN = 10_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class_str": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
etas = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for eta in ETAS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
test_seed = get_test_seed(model_i)
for i in range(len(data['dataset']['train']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['train']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
for i in range(len(data['dataset']['test']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['test']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
data['model']['num_classes'] = num_classes
data['dataset']['train']['num_samples'] = 10_000
data['dataset']['train']['task_classes'] = class_assignment["task"]
data['dataset']['train']['attr_classes'] = class_assignment["attr"]
data['dataset']['train']['eta'] = eta
data['dataset']['train']['epsilon'] = epsilon
data['dataset']['train']['seed'] = train_seed
data['dataset']['test']['num_samples'] = 2_000
data['dataset']['test']['task_classes'] = class_assignment["task"]
data['dataset']['test']['attr_classes'] = class_assignment["attr"]
data['dataset']['test']['eta'] = eta
data['dataset']['test']['epsilon'] = epsilon
data['dataset']['test']['seed'] = test_seed
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
etas.append(eta)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar10_overlay/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar100_trainingsize" # used to store experiment configs and results
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, d_s, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
dataset_sizes.append(d_s)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
dataset_sizes = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
d_s = row['dataset_size']
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
d_s = d_s,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates,
"dataset_size": dataset_sizes
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates,
"dataset_size": dataset_sizes
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_trainingsize/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
DATASET_SIZES = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_trainingsize" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100subset_resnet110_gpu1_lrmultistep.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
dataset_sizes = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for d_s in DATASET_SIZES:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
test_seed = get_test_seed(model_i)
for i in range(len(data['dataset']['train']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['train']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
for i in range(len(data['dataset']['test']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['test']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
data['model']['num_classes'] = 2
data['num_epochs'] = (int)(np.rint(500 * (1/d_s)))
data['optimizer']['param_schedulers']['lr']['milestones'] = [(int)(np.rint(1*(1/d_s))), (int)(np.rint(250*(1/d_s))), (int)(np.rint(375*(1/d_s)))]
data['dataset']['train']['num_samples'] = (int)(np.rint(NUM_SAMPLES_TRAIN * d_s))
data['dataset']['train']['dataset_size'] = d_s
data['dataset']['train']['p'] = p
data['dataset']['train']['seed'] = train_seed
data['dataset']['train']['class_mapping'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
dataset_sizes.append(d_s)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"dataset_size": dataset_sizes,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_trainingsize/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100_regularization" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_regularization/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_regularization" # used to store experiment configs and results
WEIGHT_DECAYS = np.logspace(-5, -2, 8)
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnet110_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
counter_ids = []
for weight_decay in WEIGHT_DECAYS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['optimizer']['weight_decay'] = weight_decay
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(weight_decay)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_regularization/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar100_swapped" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"swap_task_attr": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0]]
NUM_CHECKPOINTS = 50
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
a = int(a)
t = int(t)
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {0: 0, 1: 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
for checkpoint in range(18, 999, 20):
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/model_phase-' + str(checkpoint) + '_end.torch'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = checkpoint
)
if (checkpoint + 2) % 100 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_swapped/scripts/training_measurements_checkpoints.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
[1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0]]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_swapped" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnet110_gpu1_lrmultistep.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None,
"swap_task_attr": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
if "swap_task_attr" in TRANSFORMS.keys():
assert "swap_binary_task" not in TRANSFORMS.keys()
data['dataset']['train']['transforms'].insert(invert_exact_index_train + 1, {"name": "swap_task_attr"})
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
if "swap_task_attr" in TRANSFORMS.keys():
assert "swap_binary_task" not in TRANSFORMS.keys()
data['dataset']['test']['transforms'].insert(invert_exact_index_test + 1, {"name": "swap_task_attr"})
data['model']['num_classes'] = num_classes
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_swapped/scripts/generate_experiment_configs.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the datasets/ directory
import_all_modules(FILE_ROOT, "trainer")
|
cv_bias_amplification-main
|
my-project-release/my-project/trainer/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import Optional
from classy_vision.generic.distributed_util import set_cpu_device, set_cuda_device_index
from classy_vision.trainer.classy_trainer import ClassyTrainer
class GPUTrainer(ClassyTrainer):
"""Trainer to be used if you want want use only a single training process."""
def __init__(self, rank):
self.rank = rank
def train(self, task):
if task.use_gpu:
logging.info("Using GPU, CUDA device index: {}".format(0))
set_cuda_device_index(self.rank)
else:
logging.info("Using CPU")
set_cpu_device()
super().train(task)
|
cv_bias_amplification-main
|
my-project-release/my-project/trainer/gpu_trainer.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
|
Clockwork-main
|
__init__.py
|
#!/usr/bin/env python3
# pyre-stricts
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.s
import asyncio
from planner.config import (
PlannerConfig,
get_algorithm,
get_task_fetcher,
)
from planner.plan_writer import PlanWriter
from planner.planner import Planner
"""
This file is not production code and is not gauranteed
to always run or be up to date. It is just a useful tool
for Varun to test the planner without modifying
any production code.
"""
def main() -> int:
config = PlannerConfig(
task_fetcher=get_task_fetcher("hard_coded"),
scheduling_algorithm=get_algorithm("right_based"),
plan_writer=PlanWriter(),
)
planner = Planner(config=config)
return asyncio.run(planner.run())
if __name__ == "__main__":
"""
Runs one iteration of the planner.
"""
exit(main())
|
Clockwork-main
|
main.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
import logging
import sys
import traceback
from common.data_types import UnixtimeAssignments
from planner.config import PlannerConfig, TaskPoolConfig
__ALL__ = ["Planner"]
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger: logging.Logger = logging.getLogger(__name__)
class Planner:
def __init__(self, config: PlannerConfig) -> None:
self.config: PlannerConfig = config
async def run(self) -> int:
ret_code = 0
try:
pool = self.config.task_pool
plan = await self.execute_task_pool(pool)
await self.config.plan_writer.overwrite_plan(plan)
except Exception:
etype, value, tb = sys.exc_info()
traceback.print_exception(etype, value, tb)
ret_code = 1
return ret_code
@staticmethod
async def execute_task_pool(pool: TaskPoolConfig) -> UnixtimeAssignments:
tasks = await pool.task_fetcher.fetch()
plan = await pool.scheduling_algorithm.run(tasks)
missing_from_plan = frozenset(tasks - set(plan.keys()))
logger.debug(
f"Planning Finished | In Plan: {len(plan)} | Missing from Plan: {len(missing_from_plan)}",
)
return plan
|
Clockwork-main
|
planner/planner.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from __future__ import annotations
from dataclasses import dataclass
from typing import Optional
from algorithm.algorithm import (
SchedulingAlgorithm,
NullAlgorithm,
ReturnZero,
RightBased,
)
from algorithm.task_fetchers import (
TaskFetcher,
HardCodedTaskFetcher,
)
from planner.plan_writer import PlanWriter
__ALL__ = [
"PlannerConfig",
"TaskPoolConfig",
"get_algorithm",
"get_task_fetcher",
]
def get_algorithm(short_name: str, *args, **kwargs) -> SchedulingAlgorithm:
"""
This is the official registry of all available planning algorithms
"""
registry = {
"do_nothing": NullAlgorithm,
"return_zero": ReturnZero,
"right_based": RightBased,
}
return registry[short_name](*args, **kwargs)
def get_task_fetcher(short_name: str, *args, **kwargs) -> TaskFetcher:
"""
This is the official registry of all available task fetchers
"""
registry = {
"hard_coded": HardCodedTaskFetcher,
}
return registry[short_name](*args, **kwargs)
@dataclass
class TaskPoolConfig:
task_fetcher: TaskFetcher
scheduling_algorithm: SchedulingAlgorithm
class PlannerConfig:
def __init__(
self,
task_fetcher: TaskFetcher,
scheduling_algorithm: SchedulingAlgorithm,
plan_writer: Optional[PlanWriter] = None,
) -> None:
self.task_pool: TaskPoolConfig = TaskPoolConfig(
task_fetcher, scheduling_algorithm
)
self.plan_writer: Optional[PlanWriter] = plan_writer
|
Clockwork-main
|
planner/config.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.