python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn.functional as F
import numpy as np
from tqdm import tqdm
from scipy.spatial import KDTree
from libs.chamfer_distance import ChamferDistance
from utils.strandsutils import spline_strand, pad_strand, natural_cubic_spline_coeffs, NaturalCubicSpline
def uncompress_strand(strands_pc, strands_sep):
sidx = 0
strands = []
for v in strands_sep:
strands.append(strands_pc[sidx:sidx+v])
sidx += v
return strands
def strands_kdtree_query(input_pc, target_kdtree, target_pc, k=10, radius=None):
if radius:
idx = target_kdtree.query_ball_point(input_pc, radius)
else:
k = np.arange(k) + 1
dis, idx = target_kdtree.query(input_pc, k)
idx = idx.reshape(-1)
idx = np.unique(idx)
knn_target_pc = target_pc[0, idx, :]
knn_target_pc = knn_target_pc[None, :]
return knn_target_pc, dis, idx
def densify_pc(input_pc, density, dup=4):
dup_sep = 256 // dup
dense_pc = []
if torch.is_tensor(input_pc):
for i in range(len(input_pc)):
dense_pc.append(input_pc[i].detach().cpu().numpy().tolist())
num_dup = density[i] // dup_sep
for j in range(int(num_dup)):
dense_pc.append(input_pc[i].detach().cpu().numpy().tolist())
dense_pc = torch.tensor(dense_pc)[None, :].cuda()
else:
print("Densifying point cloud...")
for i in tqdm(range(len(input_pc))):
dense_pc.append(input_pc[i])
num_dup = density[i] // dup_sep
for j in range(int(num_dup)):
dense_pc.append(input_pc[i])
dense_pc = np.array(dense_pc)
print("Number of origina points: %d, number of densified points: %d"%(input_pc.shape[0], dense_pc.shape[0]))
return dense_pc
def compute_len_loss(strands_pc, strands_pc_next, strands_sep, losstype="l2", **kwargs):
strands = []
loss = 0
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
for s, s_next in zip(strands, strands_next):
delta1 = s[:-1] - s[1:]
delta2 = s_next[:-1] - s_next[1:]
delta1 = torch.sqrt(torch.sum(delta1**2, dim=-1))
delta2 = torch.sqrt(torch.sum(delta2**2, dim=-1))
delta = delta1 - delta2
if losstype == "l2":
loss += torch.mean(delta**2)
elif losstype == "l1":
loss += torch.mean(torch.abs(delta))
else:
raise NotImplementedError(f"losstype {losstype} is not implemented for compute_len_loss")
loss = loss / len(strands)
return loss
def compute_len2_loss(strands_pc, strands_pc_next, strands_sep, losstype="max", max_ratio=0.1, **kwargs):
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
loss = 0
for s_ori, s_next in zip(strands, strands_next):
delta_ori = s_ori[:-2] - s_ori[2:]
delta_next = s_next[:-2] - s_next[2:]
delta_ori = torch.sqrt(torch.sum(delta_ori**2, dim=-1))
delta_next = torch.sqrt(torch.sum(delta_next**2, dim=-1))
if losstype == "l1":
loss += torch.mean(torch.abs(delta_next - delta_ori))
elif losstype == "l2":
loss += torch.mean((delta_next - delta_ori)**2)
elif losstype == "max":
dismat = torch.abs(delta_next - delta_ori)
thres = max_ratio * delta_ori
dismat = F.relu(dismat - thres)
loss += torch.mean(dismat)
else:
raise NotImplementedError(f"losstype {losstype} is not defined for compute_len2_loss")
loss = loss / len(strands)
return loss
def compute_tangential_loss(strands_pc, strands_pc_next, strands_sep, losstype="l2", cycle=False, **kwargs):
loss = 0
strands = uncompress_strand(strands_pc, strands_sep)
strands_next = uncompress_strand(strands_pc_next, strands_sep)
for s, s_next in zip(strands, strands_next):
delta = s_next - s
hair_dirs = s[1:] - s[:-1]
hair_dirs_normalized = F.normalize(hair_dirs, p=2, dim=-1)
dot_root = torch.sum(delta[:-1] * hair_dirs_normalized, dim=-1)
dot_child = torch.sum(delta[1:] * hair_dirs_normalized, dim=-1)
if cycle:
hair_dirs_next = s_next[1:] - s_next[:-1]
hair_dirs_next_normalized = F.normalize(hair_dirs_next, p=2, dim=-1)
dot_root_next = torch.sum(delta[:-1] * hair_dirs_next_normalized, dim=-1)
dot_child_next = torch.sum(delta[1:] * hair_dirs_next_normalized, dim=-1)
if losstype == "l2":
loss += torch.mean((dot_root - dot_child)**2)
if cycle:
loss += torch.mean((dot_root_next - dot_child_next)**2)
elif losstype == "l1":
loss += torch.mean(torch.abs(dot_root - dot_child))
if cycle:
loss += torch.mean(torch.abs(dot_root_next - dot_child_next))
else:
raise NotImplementedError(f"losstype {losstype} is not implemented for compute_tangential_loss")
loss = loss / len(strands)
return loss
class StrandsOptimizerNeuralCubic():
def __init__(self, input_strands, target_pc, target_density, num_strd_pts=128, num_strands_per_opt=1600):
self.target_pc = target_pc
self.target_density = target_density * 255
self.target_pc = densify_pc(self.target_pc, self.target_density)
print('Building KDTree for target point cloud...')
self.target_kdtree = KDTree(self.target_pc)
self.num_strands_per_opt = num_strands_per_opt
num_origi_strands = input_strands.shape[0]
filtered_strands = self.filtering_strands(input_strands)
self.num_strands = len(filtered_strands)
print('Number original strands: %d, filtered strands: %d'%(num_origi_strands, self.num_strands))
print('Pre-padding strands for neural cubic interpolation...')
self.num_strd_pts = num_strd_pts
self.input_strands = []
self.times = []
self.input_num_strds_pts = []
for i_strd in tqdm(range(self.num_strands)):
strand = filtered_strands[i_strd][:, :3].astype(np.float32)
if strand.shape[0] > self.num_strd_pts:
strand = spline_strand(strand, num_strand_points=self.num_strd_pts)
self.input_num_strds_pts.append(strand.shape[0])
strand, time = pad_strand(strand, num_strand_points=self.num_strd_pts)
self.input_strands.append(strand)
self.times.append(time)
self.input_strands = np.array(self.input_strands)
self.times = np.array(self.times)
if not torch.is_tensor(self.target_pc):
self.target_pc = torch.tensor(self.target_pc).float().cuda()[None, :]
self.epoch = 80
self.eps = 1e-1
self.chamfer_dis = ChamferDistance().cuda()
self.learning_rate = 1e-1
self.forward_weight = 1.0
self.backward_weight = 1.0
self.length_weight = 100.0
self.tangent_weight = 100.0
def filtering_strands(self, input_strands, eps=3.0):
print("Filtering strands outliers...")
num_strands = input_strands.shape[0]
filtered_strands = []
for i_strd in tqdm(range(num_strands)):
strand = np.array(input_strands[i_strd]).astype(np.float32)[:, :3]
_, dis, _ = strands_kdtree_query(strand, self.target_kdtree, self.target_pc[None, :])
if (np.mean(dis) < eps):
filtered_strands.append(strand)
return filtered_strands
def diff_spline(self, strands, times):
coeffs = natural_cubic_spline_coeffs(times, strands)
spline = NaturalCubicSpline(coeffs)
time_pts = torch.arange(self.num_strd_pts).to(strands.device) / (self.num_strd_pts - 1)
time_pts = time_pts.repeat(strands.shape[0], 1)
splined_points = spline.evaluate(time_pts)
return splined_points
def optimization(self, regularization=True):
num_opts = self.num_strands // self.num_strands_per_opt + 1
ori_splined_points = []
opted_splined_points = []
opted_strands_pc = []
strands_seps = np.ones(self.num_strands).astype(np.int16) * self.num_strd_pts
print('Start optimization...')
for i_opt in tqdm(range(num_opts)):
i_start = i_opt * self.num_strands_per_opt
i_end = min((i_opt + 1) * self.num_strands_per_opt, self.num_strands)
num_strds_this_opt = i_end - i_start
strands = torch.tensor(self.input_strands[i_start:i_end]).cuda()
times = torch.tensor(self.times[i_start:i_end]).cuda()
strands_noroots = strands[:, 1:, :].clone().detach()
strands_roots = strands[:, 0:1, :].clone().detach()
strands_noroots = strands_noroots.requires_grad_(True)
strands_roots = strands_roots.requires_grad_(True)
self.optimizer = torch.optim.Adam([strands_noroots], lr=self.learning_rate)
# before optimization
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
ori_splined_points.extend(splined_points.view(-1, 3).detach().cpu().numpy().tolist())
constraint_pc = splined_points.view(-1, 3).clone().detach()
strands_sep = np.ones(num_strds_this_opt).astype(np.int16) * self.num_strd_pts
for i_epoch in range(self.epoch):
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
input_pc = splined_points.view(1, -1, 3)
input_pc_numpy = input_pc.clone().detach().cpu().numpy()[0]
knn_target_pc, _, knn_idx = strands_kdtree_query(input_pc_numpy, self.target_kdtree, self.target_pc)
dist1, dist2 = self.chamfer_dis(input_pc, knn_target_pc)
chamfer_loss = self.forward_weight * torch.mean(dist1) + self.backward_weight * torch.mean(dist2)
if regularization:
len_loss = compute_len_loss(constraint_pc, input_pc[0], strands_sep)
len2_loss = compute_len2_loss(constraint_pc, input_pc[0], strands_sep)
tangent_loss = compute_tangential_loss(constraint_pc, input_pc[0], strands_sep)
loss = chamfer_loss + \
self.length_weight * len_loss + self.length_weight * len2_loss + \
self.tangent_weight * tangent_loss
else:
loss = chamfer_loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
print('\topts: %d/%d, epochs: %d/%d, number of points: %d, current loss: %f'%(i_opt, num_opts, i_epoch, self.epoch, input_pc.shape[1], loss.data), end='\r')
# after optimization
strands = torch.concat((strands_roots, strands_noroots), dim=1)
splined_points = self.diff_spline(strands, times)
opted_splined_points.extend(splined_points.view(-1, 3).detach().cpu().numpy().tolist())
# original control points
num_strds_pts = self.input_num_strds_pts[i_start:i_end]
strands_pc = np.zeros((np.sum(num_strds_pts, keepdims=False), 3))
sidx = 0
for i_strd in range(num_strds_this_opt):
strands_pc[sidx:sidx + num_strds_pts[i_strd]] = strands.detach().cpu().numpy()[i_strd, :num_strds_pts[i_strd]]
sidx += num_strds_pts[i_strd]
opted_strands_pc.extend(strands_pc.tolist())
return np.array(ori_splined_points), np.array(opted_splined_points), strands_seps, np.array(opted_strands_pc), self.input_num_strds_pts
|
CT2Hair-main
|
CT2Hair/modules/strands_opt.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import csv
import torch
import torch.nn as nn
from modules.networks import *
from utils.utils import batched_index_select
from utils.strandsutils import natural_cubic_spline_coeffs, NaturalCubicSpline
class StrandEncoder1dCNN(nn.Module):
def __init__(self, do_vae, num_pts=100, out_channels=64):
super(StrandEncoder1dCNN, self).__init__()
self.do_vae = do_vae
self.num_pts = num_pts
self.training = False
out_channels *= 2 # not that we do vae the features are dobules so that we can get mean and variance
in_channels = 0
in_channels += 3 # 3 for the xyz
in_channels += 3 # 3 for the direction
if num_pts == 100:
self.cnn_encoder = torch.nn.Sequential(
Conv1dWN(in_channels, 32, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(32, 32, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(32, 64, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(64, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU(),
Conv1dWN(128, 128, kernel_size=4, stride=2, padding=1, padding_mode='zeros'), torch.nn.SiLU())
# after runnign cnn we still end up with some elments per strand, and we want to pool over them with something better than an avg pool
self.final_cnn_aggregator=torch.nn.Sequential(
LinearWN(128*3, 128), torch.nn.SiLU(),
LinearWN(128, out_channels))
elif num_pts == 256:
self.cnn_encoder = torch.nn.Sequential(
Conv1dWN(in_channels, 32, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(32, 32, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(32, 64, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(64, 128, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(128, 256, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU(),
Conv1dWN(256, 256, kernel_size=3, stride=2, padding=1, padding_mode='replicate'), torch.nn.SiLU())
self.final_cnn_aggregator = torch.nn.Sequential(
LinearWN(256 * int(self.num_pts / 64), 256), torch.nn.SiLU(),
LinearWN(256, out_channels))
else:
print("Number of points %d is not supported."%(num_pts))
exit(0)
self.pred_mean = torch.nn.Sequential(
torch.nn.SiLU(),
LinearWN(out_channels, out_channels), torch.nn.SiLU(),
LinearWN(out_channels, int(out_channels / 2))
)
self.pred_logstd = torch.nn.Sequential(
torch.nn.SiLU(),
LinearWN(out_channels, out_channels), torch.nn.SiLU(),
LinearWN(out_channels, int(out_channels / 2))
)
self.apply(lambda x: swish_init(x, False))
swish_init(self.pred_mean, True)
swish_init(self.pred_logstd, True)
self.pe = LearnedPE(in_channels=1, num_encoding_functions=5, logsampling=True)
def forward(self, points):
points = points.view(-1, self.num_pts, 3) # nr_strands, points_per_strand, xyz
original_points = points
points = points.permute(0, 2, 1) ## nr_strands, xyz, 100
nr_strands = points.shape[0]
### get also the direciton from point to the next
cur_points = original_points[:, 0:self.num_pts - 1, : ]
next_points = original_points[:, 1:self.num_pts, :]
direction = next_points - cur_points
# pad_zero=torch.zeros(nr_strands,1,3).cuda()
# direction = torch.cat([direction,pad_zero],1) # make the direction nr_strands, 100, 3
last_dir = direction[:, self.num_pts - 2:self.num_pts - 1, :]
direction = torch.cat([direction, last_dir],1) # make the direction nr_strands, 100, 3
direction = direction.permute(0, 2, 1) # nr_strands, xyz, 100
# direction=direction * 100 # (we multiply by the nr of segments so that the value is not so small and is closer to our desired range)
per_point_features = torch.cat([points, direction] ,1)
strand_features = self.cnn_encoder(per_point_features) # nr_strands, 128(nr_features), 3(elements per string)
strand_features = strand_features.view(nr_strands, -1).contiguous()
strand_features = self.final_cnn_aggregator(strand_features) # outputs nr_strands x 128
s = self.pred_mean(strand_features)
s_mean_and_logstd_dict = {}
if self.do_vae:
s_mean = s
# print("s_mean has mean std ", s_mean.mean(), s_mean.std())
s_logstd = 0.1 * self.pred_logstd(strand_features)
s_mean_and_logstd_dict["mean"] = s_mean
s_mean_and_logstd_dict["logstd"] = s_logstd
# print("s_logstd has mean std ", s_logstd.mean(), s_logstd.std())
if self.training:
std = torch.exp(s_logstd)
eps = torch.empty_like(std).normal_()
s = s + std * eps
# print("strand std min max", std.min(), " ", std.max())
return s, s_mean_and_logstd_dict
class StrandGeneratorSiren(nn.Module):
# a siren network which predicts various direction vectors along the strand similar ot FakeODE.
# the idea is that siren works well when periodic thing needs to be predicted and the strand can be seen as some periodic direction vectors being repeted at some points on the strand
# the idea is similar to modulation siren https://arxiv.org/pdf/2104.03960.pdf
def __init__(self, in_channels, modulation_hidden_dim, siren_hidden_dim, scale_init, decode_direct_xyz, decode_random_verts, num_pts=100):
super(StrandGeneratorSiren, self).__init__()
self.num_pts = num_pts
self.decode_direct_xyz = decode_direct_xyz
self.decode_random_verts = decode_random_verts
self.swish = torch.nn.SiLU()
self.tanh = torch.nn.Tanh()
self.nr_layers = 3
cur_nr_channels = in_channels
# cur_nr_channels+=1 #+1 for the time t
self.modulation_layers = torch.nn.ModuleList([])
for i in range(self.nr_layers):
self.modulation_layers.append(LinearWN(cur_nr_channels, modulation_hidden_dim))
cur_nr_channels = modulation_hidden_dim+in_channels # at the end we concatenate the input z
self.decode_dir = LinearWN(siren_hidden_dim, 3)
self.apply(lambda x: swish_init(x, False))
swish_init(self.decode_dir, True)
self.siren_layers = torch.nn.ModuleList([])
self.siren_layers.append(BlockSiren(in_channels=1, out_channels=siren_hidden_dim, is_first_layer=True, scale_init=scale_init))
for i in range(self.nr_layers-1):
self.siren_layers.append(BlockSiren(in_channels=siren_hidden_dim, out_channels=siren_hidden_dim))
def forward(self, strand_features):
nr_verts_to_create = self.num_pts - 1 # we create only 99 because the frist one is just the origin
if self.decode_random_verts:
nr_verts_to_create = 1
nr_strands = strand_features.shape[0]
strand_features = strand_features.view(nr_strands, 1, -1).repeat(1, nr_verts_to_create, 1) # nr_strands x 100 x nr_channels
# sampling t
t = torch.linspace(0, 1, self.num_pts).cuda()
t = t.view(self.num_pts, 1)
if self.decode_direct_xyz:
t = t[1:self.num_pts, :] # we don't create the root because it's already given
else: # we are decoding direction therefore the first direction should be computed but the last direction should be ingored because the tip doesnt need a direction
t = t[0:self.num_pts - 1, :]
# repeat strand featues to be nr_strands x nr_vert x nr_channels
# concat for each vertex the positional encoding
t = t.view(1, self.num_pts - 1, -1).repeat(nr_strands, 1, 1) #nrstrands, nr_verts, nr_channels
# strand_features_with_time=torch.cat([strand_features,t],2)
point_indices = None
if self.decode_random_verts:
# choose a random t for each strand
# we can create only up until the very last vertex, except the tip, we need to be able to sample the next vertex so as to get a direction vector
probability = torch.ones([nr_strands, self.num_pts - 2], dtype=torch.float32, device=torch.device("cuda"))
point_indices = torch.multinomial(probability, nr_verts_to_create, replacement=False) # size of the chunk size we selected
# add also the next vertex on the strand so that we can compute directions
point_indices = torch.cat([point_indices, point_indices + 1], 1)
t = batched_index_select(t, 1, point_indices)
# decode xyz
h_siren = t
# z_scaling=0.001 #this has to be initialized so that the h_modulation is something like 0.2.If its lower,
# then no gradient will flow into Z and then the network will not be optimized. You might need to do one run and check the gradients of the network with model.summary to see if the gradients don't vanish
z_scaling = 1.0
z = strand_features
z_initial = z * z_scaling
z = z * z_scaling
with_checkpointing = True
for i in range(self.nr_layers):
h_modulation = self.swish( self.modulation_layers[i](z))
s = self.siren_layers[i](h_siren)
h_siren = (1 - h_modulation) * s
# for next iter
z = torch.cat([z_initial, h_modulation], 2)
if self.decode_direct_xyz:
points_dir = self.decode_dir(h_siren) * 0.1
if self.decode_random_verts:
pred_strands = points_dir
else:
start_positions = torch.zeros(nr_strands, 1, 3).cuda()
pred_strands = torch.cat([start_positions, points_dir], 1)
else:
# divide by the nr of points on the strand otherwise the direction will have norm=1 and then when integrated you end up with a gigantic strand that has 100 units
hair_dir = self.decode_dir(h_siren) * 0.01
pred_strands = torch.cumsum(hair_dir, dim=1) # nr_strands, nr_verts-1, 3
# we know that the first vertex is 0,0,0 so we just concatenate that one
start_positions = torch.zeros(nr_strands, 1, 3).cuda()
pred_strands = torch.cat([start_positions, pred_strands], 1)
return pred_strands, point_indices
'''
uses only one Z tensor and predicts the strands using SIREN. There is no normalization apart from moving the strands to origin
is used to predict and regress only strand data, with no scalp
'''
class StrandCodec(nn.Module):
def __init__(self, do_vae, decode_direct_xyz, decode_random_verts, train_params, is_train=True):
super(StrandCodec, self).__init__()
self.do_vae = do_vae
self.decode_direct_xyz = decode_direct_xyz
self.decode_random_verts = decode_random_verts
self.nr_verts_per_strand = train_params['num_pts']
if self.decode_random_verts:
self.nr_verts_per_strand = 2
self.cosine_embed_loss = nn.CosineEmbeddingLoss()
if is_train:
self.weight_pts = train_params['weight_pts']
self.weight_dir = train_params['weight_dir'] # 0.001
self.weight_kl= train_params['weight_kl'] # 0.0001
# encode
self.strand_encoder_for_shape = StrandEncoder1dCNN(self.do_vae, self.nr_verts_per_strand, train_params['code_channels']) # predicts 64 vector of shape, gets the inputs after they were normalized
# decoder
self.strand_generator = StrandGeneratorSiren(in_channels=train_params['code_channels'], modulation_hidden_dim=32, siren_hidden_dim=32,
scale_init=5, decode_direct_xyz=decode_direct_xyz, decode_random_verts=decode_random_verts,
num_pts=self.nr_verts_per_strand) # generate a whoel strand from 64 dimensional shape vector
def save(self, root_folder, experiment_name, iter_nr):
models_path = os.path.join(root_folder, experiment_name, str(iter_nr), "models")
if not os.path.exists(models_path):
os.makedirs(models_path, exist_ok=True)
torch.save(self.state_dict(), os.path.join(models_path, "strand_codec.pt"))
# write csv with some info
out_info_path = os.path.join(models_path, "strand_codec_info.csv")
with open(out_info_path, "w") as f: #we need to put the writer in a block so that it closes the file automaticaly afterwards
w = csv.writer(f)
w.writerow(["do_vae", self.do_vae])
w.writerow(["decode_direct_xyz", self.decode_direct_xyz])
w.writerow(["decode_random_verts", self.decode_random_verts])
def diff_spline(self, hair_data_dict):
points = hair_data_dict["points"].cuda()
times = hair_data_dict["times"].cuda()
coeffs = natural_cubic_spline_coeffs(times, points)
spline = NaturalCubicSpline(coeffs)
time_pts = torch.arange(self.nr_verts_per_strand).cuda() / (self.nr_verts_per_strand - 1)
time_pts = time_pts.repeat(points.shape[0], 1)
self.splined_points = spline.evaluate(time_pts)
self.splined_points = self.splined_points.detach()
def encode(self):
s_shape, s_shape_mean_and_logstd_dict = self.strand_encoder_for_shape(self.splined_points)
encoded_dict = {}
encoded_dict["s_shape"] = s_shape
encoded_dict["s_shape_mean_and_logstd_dict"] = s_shape_mean_and_logstd_dict
return encoded_dict
def decode(self, encoded_dict):
s_shape = encoded_dict["s_shape"]
# generate the strand points
pred_points, point_indices = self.strand_generator(s_shape)
prediction_dict = {}
prediction_dict["pred_points"] = pred_points
prediction_dict["point_indices"] = point_indices
return prediction_dict
def compute_loss(self, prediction_dict, encoded_dict):
loss_l2 = self.compute_loss_l2(prediction_dict)
loss_dir = self.compute_loss_dir(prediction_dict)
loss_kl = self.compute_loss_kl(encoded_dict)
loss = self.weight_pts * loss_l2 + self.weight_dir * loss_dir + self.weight_kl * loss_kl
# loss = loss_l2 + loss_dir * 0.01 + loss_kl * 0.001
# loss = loss_l2 + loss_dir * 0.1 + loss_kl * 0.001 # this gives the lowest kl and the autodecoding looks nice
loss_dict = {}
loss_dict['loss'] = loss
loss_dict['loss_l2'] = loss_l2
loss_dict['loss_dir'] = loss_dir
loss_dict['loss_kl'] = loss_kl
return loss_dict
def compute_loss_l2(self, prediction_dict):
pred_points = prediction_dict["pred_points"].view(-1, self.nr_verts_per_strand, 3)
loss_l2 = ((pred_points - self.splined_points) ** 2).mean()
return loss_l2
def compute_loss_dir(self, prediction_dict):
pred_points = prediction_dict["pred_points"].view(-1, self.nr_verts_per_strand, 3)
# get also a loss for the direciton, we need to compute the direction
cur_points = pred_points[:, 0:self.nr_verts_per_strand - 1, : ]
next_points = pred_points[:, 1:self.nr_verts_per_strand, :]
pred_deltas = next_points - cur_points
pred_deltas = pred_deltas.view(-1, 3)
gt_cur_points = self.splined_points[:, 0:self.nr_verts_per_strand - 1, : ]
gt_next_points = self.splined_points[:, 1:self.nr_verts_per_strand, :]
gt_dir = gt_next_points - gt_cur_points
gt_dir = gt_dir.view(-1, 3)
loss_dir = self.cosine_embed_loss(pred_deltas, gt_dir, torch.ones(gt_dir.shape[0]).cuda())
return loss_dir
def compute_loss_kl(self, encoded_dict):
#get input data
kl_loss = 0
if self.do_vae:
#kl loss
s_shape_mean_and_logstd_dict = encoded_dict["s_shape_mean_and_logstd_dict"]
kl_shape = self.kl( s_shape_mean_and_logstd_dict["mean"], s_shape_mean_and_logstd_dict["logstd"])
# free bits from IAF-VAE. so that if the KL drops below a certan value, then we stop reducing the KL
kl_shape = torch.clamp(kl_shape, min=0.25)
kl_loss = kl_shape.mean()
return kl_loss
def kl(self, mean, logstd):
kl = (-0.5 - logstd + 0.5 * mean ** 2 + 0.5 * torch.exp(2 * logstd))
return kl
def forward(self, hair_data_dict):
self.diff_spline(hair_data_dict)
encoded_dict=self.encode()
prediction_dict=self.decode(encoded_dict)
return prediction_dict, encoded_dict
|
CT2Hair-main
|
CT2Hair/modules/strands_codec.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import copy
import torch
import inspect
import numpy as np
from typing import Dict, List, Optional, Tuple
from torch.nn.utils.weight_norm import WeightNorm, remove_weight_norm
class LearnedPE(torch.nn.Module):
def __init__(self, in_channels, num_encoding_functions, logsampling):
super(LearnedPE, self).__init__()
self.in_channels = in_channels
self.num_encoding_functions = num_encoding_functions
self.logsampling = logsampling
out_channels = in_channels * self.num_encoding_functions * 2
self.conv = torch.nn.Linear(in_channels, int(out_channels / 2), bias=True).cuda() #in the case we set the weight ourselves
self.init_weights()
def init_weights(self):
with torch.no_grad():
num_input = self.in_channels
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
# print("weight is ", self.conv.weight.shape) #60x3
# we make the same as the positonal encoding, which is mutiplying each coordinate with this linespaced frequencies
lin = 2.0 ** torch.linspace(0.0,
self.num_encoding_functions - 1,
self.num_encoding_functions,
dtype=torch.float32,
device=torch.device("cuda"))
lin_size = lin.shape[0]
weight = torch.zeros([self.in_channels, self.num_encoding_functions * self.in_channels], dtype=torch.float32, device=torch.device("cuda"))
for i in range(self.in_channels):
weight[i : i + 1, i * lin_size : i * lin_size + lin_size ] = lin
weight = weight.t().contiguous()
self.conv.weight = torch.nn.Parameter(weight)
def forward(self, x):
x_proj = self.conv(x)
return torch.cat([torch.sin(x_proj), torch.cos(x_proj), x], -1).contiguous()
class BlockSiren(torch.nn.Module):
def __init__(self, in_channels, out_channels, bias=True, activ=torch.sin, is_first_layer=False, scale_init=90):
super(BlockSiren, self).__init__()
self.bias = bias
self.activ = activ
self.is_first_layer = is_first_layer
self.scale_init = scale_init
self.conv = torch.nn.Linear(in_channels, out_channels, bias=self.bias).cuda()
# if self.activ==torch.sin or self.activ==None:
with torch.no_grad():
if self.activ == torch.sin:
num_input = in_channels
# See supplement Sec. 1.5 for discussion of factor 30
if self.is_first_layer:
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
else:
self.conv.weight.uniform_(-np.sqrt(6 / num_input) , np.sqrt(6 / num_input))
elif self.activ == None:
# self.conv.weight.normal_(0, 0.1)
swish_init(self.conv, True)
def forward(self, x):
x = self.conv(x)
if self.activ == torch.sin:
if self.is_first_layer:
x = self.scale_init * x
else:
x = x * 1
x = self.activ(x)
elif self.activ is not None:
x = self.activ(x)
return x
def check_args_shadowing(name, method, arg_names):
spec = inspect.getfullargspec(method)
init_args = {*spec.args, *spec.kwonlyargs}
for arg_name in arg_names:
if arg_name in init_args:
raise TypeError(f"{name} attempted to shadow a wrapped argument: {arg_name}")
# For backward compatibility.
class TensorMappingHook(object):
def __init__(
self,
name_mapping: List[Tuple[str, str]],
expected_shape: Optional[Dict[str, List[int]]] = None,
):
"""This hook is expected to be used with "_register_load_state_dict_pre_hook" to
modify names and tensor shapes in the loaded state dictionary.
Args:
name_mapping: list of string tuples
A list of tuples containing expected names from the state dict and names expected
by the module.
expected_shape: dict
A mapping from parameter names to expected tensor shapes.
"""
self.name_mapping = name_mapping
self.expected_shape = expected_shape if expected_shape is not None else {}
def __call__(
self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys, error_msgs
):
for old_name, new_name in self.name_mapping:
if prefix + old_name in state_dict:
tensor = state_dict.pop(prefix + old_name)
if new_name in self.expected_shape:
tensor = tensor.view(*self.expected_shape[new_name])
state_dict[prefix + new_name] = tensor
def weight_norm_wrapper(cls, name="weight", g_dim=0, v_dim=0):
"""Wraps a torch.nn.Module class to support weight normalization. The wrapped class
is compatible with the fuse/unfuse syntax and is able to load state dict from previous
implementations.
Args:
name: str
Name of the parameter to apply weight normalization.
g_dim: int
Learnable dimension of the magnitude tensor. Set to None or -1 for single scalar magnitude.
Default values for Linear and Conv2d layers are 0s and for ConvTranspose2d layers are 1s.
v_dim: int
Of which dimension of the direction tensor is calutated independently for the norm. Set to
None or -1 for calculating norm over the entire direction tensor (weight tensor). Default
values for most of the WN layers are None to preserve the existing behavior.
"""
class Wrap(cls):
def __init__(self, *args, name=name, g_dim=g_dim, v_dim=v_dim, **kwargs):
# Check if the extra arguments are overwriting arguments for the wrapped class
check_args_shadowing(
"weight_norm_wrapper", super().__init__, ["name", "g_dim", "v_dim"]
)
super().__init__(*args, **kwargs)
# Sanitize v_dim since we are hacking the built-in utility to support
# a non-standard WeightNorm implementation.
if v_dim is None:
v_dim = -1
self.weight_norm_args = {"name": name, "g_dim": g_dim, "v_dim": v_dim}
self.is_fused = True
self.unfuse()
# For backward compatibility.
self._register_load_state_dict_pre_hook(
TensorMappingHook(
[(name, name + "_v"), ("g", name + "_g")],
{name + "_g": getattr(self, name + "_g").shape},
)
)
def fuse(self):
if self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"] + "_g"
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to fuse frozen module.")
remove_weight_norm(self, self.weight_norm_args["name"])
self.is_fused = True
def unfuse(self):
if not self.is_fused:
return
# Check if the module is frozen.
param_name = self.weight_norm_args["name"]
if hasattr(self, param_name) and param_name not in self._parameters:
raise ValueError("Trying to unfuse frozen module.")
wn = WeightNorm.apply(
self, self.weight_norm_args["name"], self.weight_norm_args["g_dim"]
)
# Overwrite the dim property to support mismatched norm calculate for v and g tensor.
if wn.dim != self.weight_norm_args["v_dim"]:
wn.dim = self.weight_norm_args["v_dim"]
# Adjust the norm values.
weight = getattr(self, self.weight_norm_args["name"] + "_v")
norm = getattr(self, self.weight_norm_args["name"] + "_g")
norm.data[:] = torch.norm_except_dim(weight, 2, wn.dim)
self.is_fused = False
def __deepcopy__(self, memo):
# Delete derived tensor to avoid deepcopy error.
if not self.is_fused:
delattr(self, self.weight_norm_args["name"])
# Deepcopy.
cls = self.__class__
result = cls.__new__(cls)
memo[id(self)] = result
for k, v in self.__dict__.items():
setattr(result, k, copy.deepcopy(v, memo))
if not self.is_fused:
setattr(result, self.weight_norm_args["name"], None)
setattr(self, self.weight_norm_args["name"], None)
return result
return Wrap
def is_weight_norm_wrapped(module):
for hook in module._forward_pre_hooks.values():
if isinstance(hook, WeightNorm):
return True
return False
LinearWN = weight_norm_wrapper(torch.nn.Linear, g_dim=0, v_dim=None)
Conv1dWN = weight_norm_wrapper(torch.nn.Conv1d, g_dim=0, v_dim=None)
def swish_init(m, is_linear, scale=1):
# normally relu has a gain of sqrt(2)
# however swish has a gain of sqrt(2.952) as per the paper https://arxiv.org/pdf/1805.08266.pdf
gain=np.sqrt(2.952)
# gain=np.sqrt(2)
if is_linear:
gain = 1
# gain = np.sqrt(2.0 / (1.0 + 1 ** 2))
if isinstance(m, torch.nn.Conv1d):
ksize = m.kernel_size[0]
n1 = m.in_channels
n2 = m.out_channels
# std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
std = gain / np.sqrt(n1 * ksize)
elif isinstance(m, torch.nn.Conv2d):
ksize = m.kernel_size[0] * m.kernel_size[1]
n1 = m.in_channels
n2 = m.out_channels
# std = gain * np.sqrt(2.0 / ((n1 + n2) * ksize))
std = gain / np.sqrt(n1 * ksize)
elif isinstance(m, torch.nn.Linear):
n1 = m.in_features
n2 = m.out_features
std = gain / np.sqrt((n1))
else:
return
is_wnw = is_weight_norm_wrapped(m)
if is_wnw:
m.fuse()
m.weight.data.normal_(0, std*scale)
if m.bias is not None:
m.bias.data.zero_()
if is_wnw:
m.unfuse()
|
CT2Hair-main
|
CT2Hair/modules/networks.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
sys.path.append('CT2Hair/')
from interp import neural_interp
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
strands_name = conf['output']['name'] \
+ '_guide.bin'
conf['strands']['guide_strds'] = os.path.join(strands_out_dir, strands_name)
if not os.path.exists(os.path.join(strands_out_dir, strands_name)):
print(colored("Guide hair strands not found, please run scripts/gen_guide_strands.py first.", "red"))
exit(1)
print(colored("Running interpolation:", "yellow"))
neural_interp(conf)
|
CT2Hair-main
|
scripts/interpolation.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
sys.path.append('CT2Hair/')
from optim import strands_opt
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
pc_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
strands_name = conf['output']['name'] \
+ '_merged.bin'
conf['pc']['pc_path'] = os.path.join(strands_out_dir, pc_name)
conf['strands']['interp_strds'] = os.path.join(strands_out_dir, strands_name)
if not os.path.exists(os.path.join(strands_out_dir, strands_name)):
print(colored("Interpolated hair strands not found, please run scripts/interpolation.py first.", "red"))
exit(1)
print(colored("Running optimization:", "yellow"))
strands_opt(conf)
|
CT2Hair-main
|
scripts/optimization.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import argparse
from pyhocon import ConfigFactory
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
if not os.path.exists(str(conf['vdb']['path'])):
print("Input VDB file does not exists.")
exit(1)
oriens_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
os.makedirs(oriens_out_dir, exist_ok=True)
oriens_out_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
if platform.system() == 'Linux':
exe_path = 'CT2Hair/GuideHairStrands/GuideHairStrands'
elif platform.system() == 'Windows':
exe_path = 'CT2Hair\\GuideHairStrands\\Release\\GuideHairStrands.exe'
cmd = '{} 0 '.format(exe_path) \
+ str(conf['vdb']['path']) + ' ' \
+ str(conf['vdb']['voxel_size']) + ' ' \
+ conf['head']['roots_path'] + ' ' \
+ str(conf['guide']['wignet_dis']) + ' ' \
+ os.path.join(oriens_out_dir, oriens_out_name)
print(colored("Running command:", "yellow"), colored(cmd, "green"))
os.system(cmd)
|
CT2Hair-main
|
scripts/est_orientations.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
import os
import platform
import argparse
from shutil import copyfile
from pyhocon import ConfigFactory
from termcolor import colored
parser = argparse.ArgumentParser()
parser.add_argument('--conf', type=str, default='conf/data/Curly.conf')
parser.add_argument('--datapath', type=str, default='data')
parser.add_argument('--gpu', type=str, default='0')
k_args = parser.parse_args()
if __name__ == '__main__':
conf_text = open(k_args.conf).read()
conf = ConfigFactory.parse_string(conf_text)
conf_text = conf_text.replace('DATAPATH', k_args.datapath)
conf_text = conf_text.replace('CASENAME', conf['output']['name'])
conf = ConfigFactory.parse_string(conf_text)
strands_out_dir = os.path.join(conf['output']['dir'], conf['output']['name'])
oriens_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '.ply'
if not os.path.exists(os.path.join(strands_out_dir, oriens_name)):
print(colored("Orientations not found, please run scripts/est_orientations.py first.", "red"))
exit(1)
strands_out_name = conf['output']['name'] \
+ '_oriens' \
+ '_wd_' + str(conf['guide']['wignet_dis']) \
+ '_nr_' + str(conf['guide']['nei_radius']) \
+ '_se_' + str(conf['guide']['sigma_e']) \
+ '_so_' + str(conf['guide']['sigma_o']) \
+ '_ts_' + str(conf['guide']['thres_shift']) \
+ '_nrs_' + str(conf['guide']['nei_radius_seg']) \
+ '_to_' + str(conf['guide']['thres_orient']) \
+ '_tl_' + str(conf['guide']['thres_length']) \
+ '_tnrd_' + str(conf['guide']['thres_nn_roots_dis']) \
+ '_tlg_' + str(conf['guide']['thres_length_grow']) \
+ '.ply'
strands_out_name_simp = conf['output']['name'] + '_guide.bin'
if platform.system() == 'Linux':
exe_path = 'CT2Hair/GuideHairStrands/GuideHairStrands'
elif platform.system() == 'Windows':
exe_path = 'CT2Hair\\GuideHairStrands\\Release\\GuideHairStrands.exe'
cmd = '{} 1 '.format(exe_path) \
+ os.path.join(strands_out_dir, oriens_name) + ' ' \
+ os.path.join(strands_out_dir, strands_out_name) + ' ' \
+ str(conf['guide']['nei_radius']) + ' ' \
+ str(conf['guide']['sigma_e']) + ' ' \
+ str(conf['guide']['sigma_o']) + ' ' \
+ str(conf['guide']['thres_shift']) + ' ' \
+ str(conf['guide']['use_cuda']) + ' ' \
+ k_args.gpu + ' ' \
+ str(conf['guide']['nei_radius_seg']) + ' ' \
+ str(conf['guide']['thres_orient']) + ' ' \
+ str(conf['guide']['thres_length']) + ' ' \
+ conf['head']['roots_path'] + ' ' \
+ str(conf['guide']['thres_nn_roots_dis']) + ' ' \
+ str(conf['guide']['thres_length_grow'])
print(colored("Running command:", "yellow"), colored(cmd, "green"))
os.system(cmd)
copyfile(os.path.join(strands_out_dir, strands_out_name).replace('ply', 'bin'),
os.path.join(strands_out_dir, strands_out_name_simp))
|
CT2Hair-main
|
scripts/gen_guide_strands.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to decode and produce an answer.
Answer decoder for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
Support two kinds of decoders:
(a) Generative: A recurrent neural network based language model that can
generate novel answers. At test time, all candidate answers are scored
based on loglikelihood of the language model.
(b) Discriminative: A discriminative classifier to identify the correct
answer from a pool of candidate options at train time.
At test time, options are ranked based on class probabilities.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import dropout
from tensorflow.contrib.layers import fully_connected as FC
from util import support
class AnswerDecoder:
def __init__(self, inputs, output_pool, params):
"""Initialize answer decoder.
Args:
inputs:
output_pool:
params:
"""
self.params = params
# keep track of inputs and outputs
used_inputs = []
outputs = {}
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
# begin decoding
with tf.variable_scope(self.params['embed_scope'], reuse=True):
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat')
output = tf.nn.embedding_lookup(embed_mat, inputs['ans_in'])
used_inputs.extend(['ans_in', 'ans_out', 'ans_len'])
# recurrent neural network cell
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
# decide the source based on train / evaluation
source = output_pool if params['train_mode'] else inputs
# concatenate question to both
concat_list = []
# add program context vector
concat_list.append(source['context'])
# adding last hidden size
concat_list.append(source['enc_dec_h'][-1])
used_inputs.extend(['enc_dec_h', 'enc_dec_c'])
if not params['train_mode']:
used_inputs.append('context')
#--------------------------------------------------------------------------
# stack all the vectors
stack_vec = tf.concat(concat_list, axis=1)
stack_vec = FC(stack_vec, params['lstm_size'])
# construct encoder decoder H
enc_dec_h = [source['enc_dec_h'][ii]
for ii in range(params['num_layers'] - 1)]
enc_dec_h.append(stack_vec)
# construct encoder decoder C
enc_dec_c = [source['enc_dec_c'][ii] for ii in range(params['num_layers'])]
init_state = [tf.contrib.rnn.LSTMStateTuple(cc, hh)
for cc, hh in zip(enc_dec_c, enc_dec_h)]
if params['decoder'] == 'gen':
for ii in range(params['num_layers']):
# dynamic rnn
output, _ = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['ans_len'],
initial_state=init_state[ii],
dtype=tf.float32, scope='layer_%d' % ii)
# predict the output words
output = FC(output, params['text_vocab_size'], activation_fn=None)
# create a mask
mask = tf.not_equal(inputs['ans_out'], params['pad_id'])
mask = tf.cast(mask, tf.float32)
# multiply by mask for variable length sequences
answer_loss = criterion(logits=output, labels=inputs['ans_out'])
masked_answer_loss = tf.multiply(answer_loss, mask)
token_likelihood = tf.reduce_sum(masked_answer_loss)
num_tokens = tf.maximum(tf.reduce_sum(mask), 1)
outputs['ans_token_loss'] = token_likelihood/num_tokens
outputs['per_sample_loss'] = tf.reduce_sum(masked_answer_loss, 1)
# extract the probabilities
out_softmax = tf.nn.log_softmax(output)
out_softmax_flat = tf.reshape(out_softmax, [-1, params['text_vocab_size']])
orig_shape = tf.shape(inputs['ans_out'])
ans_out_flat = tf.reshape(inputs['ans_out'], [-1])
inds = [tf.range(0, tf.shape(ans_out_flat)[0]), ans_out_flat]
inds = tf.stack(inds, axis=1)
prob_tokens = tf.gather_nd(out_softmax_flat, inds)
prob_tokens = tf.reshape(prob_tokens, orig_shape)
prob_tokens = tf.multiply(prob_tokens, mask)
# compute the loglikelihood
outputs['llh'] = tf.reduce_sum(prob_tokens, 1)
# compute mean instead of sum
num_tokens = tf.maximum(tf.reduce_sum(mask, 1), 1)
outputs['llh_mean'] = outputs['llh'] / num_tokens
elif params['decoder'] == 'disc':
# embed options and encode via lstm
with tf.variable_scope(self.params['embed_scope'], reuse=True):
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat')
opt_embed = tf.nn.embedding_lookup(embed_mat, inputs['opt'])
# transpose and merging batch and option dimension
opt_embed = tf.transpose(opt_embed, [0, 2, 1, 3])
shape = opt_embed.shape.as_list()
opt_embed = tf.reshape(opt_embed, [-1, shape[2], shape[3]])
opt_len = tf.reshape(inputs['opt_len'], [-1])
output, _ = tf.nn.dynamic_rnn(cell, opt_embed,
sequence_length=opt_len,
dtype=tf.float32, scope='opt_layer_0')
for ii in range(1, params['num_layers']):
# dynamic rnn
output, _ = tf.nn.dynamic_rnn(cell, output, \
sequence_length=opt_len,
dtype=tf.float32,
scope='opt_layer_%d' % ii)
opt_encode = support.last_relevant(output, opt_len)
# reshape back
opt_encode = tf.reshape(opt_encode, [-1, shape[1], params['lstm_size']])
# score the options with context vector
score_vec = tf.matmul(opt_encode, tf.expand_dims(stack_vec, -1))
score_vec = tf.squeeze(score_vec, -1)
scores = criterion(logits=score_vec, labels=inputs['gt_ind'])
outputs['ans_token_loss'] = tf.reduce_mean(scores)
outputs['scores'] = score_vec
used_inputs.extend(['opt', 'opt_len', 'gt_ind'])
# setup the inputs and outputs
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#----------------------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#----------------------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None):
"""Produces the feed dict for this subcomponent.
Args:
batch: Batch returned from dataloader
output_pool: Outputs from previous subcomponents, mostly when evaluating
Returns:
feed_dict: Returns the feed dictionary
"""
feed_dict = {}
for key in ['ans_in', 'ans_out', 'ans_len']:
feed_dict[self.inputs[key]] = batch[key]
# if not in train mode, use output_pool
if not self.params['train_mode']:
for key in ['context', 'enc_dec_h', 'enc_dec_c']:
feed_dict[self.inputs[key]] = output_pool[key]
# additional feeds for discriminative decoder
if self.params['decoder'] == 'disc':
feed_dict[self.inputs['opt']] = np.stack(batch['opt_out'], -1)
feed_dict[self.inputs['opt_len']] = np.stack(batch['opt_len'], -1)
feed_dict[self.inputs['gt_ind']] = batch['gt_ind']
return feed_dict
#----------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/decoder.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Add a reasonable description to the file.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_relu_layer as conv_relu
from tensorflow.contrib.layers import fully_connected as FC
from tensorflow.contrib.rnn import LSTMStateTuple
from util import support
def _get_valid_tokens(X, W, b):
constraints_validity = tf.greater_equal(tf.tensordot(X, W, axes=1) - b, 0)
token_validity = tf.reduce_all(constraints_validity, axis=2)
return tf.stop_gradient(token_validity)
#------------------------------------------------------------------------------
def _update_decoding_state(X, s, P):
X = X + tf.nn.embedding_lookup(P, s) # X = X + S P
return tf.stop_gradient(X)
#------------------------------------------------------------------------------
def _get_lstm_cell(num_layers, lstm_dim, apply_dropout):
if isinstance(lstm_dim, list): # Different layers have different dimensions
if not len(lstm_dim) == num_layers:
raise ValueError('the length of lstm_dim must be equal to num_layers')
cell_list = []
for l in range(num_layers):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim[l], state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout and l < num_layers-1:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list.append(dropout_cell)
else: # All layers has the same dimension.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list = [dropout_cell] * (num_layers-1) + [lstm_cell]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
#------------------------------------------------------------------------------
# Sequence to Sequence with attention
class AttSeq2Seq:
def __init__(self, holders, use_gt_prog, assembler, params, reuse=None):
self.T_decoder = params['max_dec_len']
self.encoder_num_vocab = params['text_vocab_size']
self.encoder_embed_dim = params['text_embed_size']
self.decoder_num_vocab = params['prog_vocab_size']
self.decoder_embed_dim = params['prog_embed_size']
self.lstm_dim = params['lstm_size']
self.num_layers = params['num_layers']
self.EOS_token = assembler.EOS_idx
self.embed_scope = params['embed_scope']
self.temperature = params.get('temperature', 1)
# if word vectors need to be used or lstm outputs for attention
params['use_word_vectors'] = 'wv-att' in params['model']
params['generator'] = params.get('generator', 'ques')
self.params = params
# decoding transition variables
self.P = to_T(assembler.P, dtype=tf.int32)
self.W = to_T(assembler.W, dtype=tf.int32)
self.b = to_T(assembler.b, dtype=tf.int32)
self.encoder_dropout = params['enc_dropout']
self.decoder_dropout = params['dec_dropout']
self.decoder_sampling = params['dec_sampling']
# detect fake inputs
if 'fake' in holders: scope = 'enc_dec_cap'
else: scope = 'enc_dec'
with tf.variable_scope(scope, reuse=reuse):
# build a special encoder, if needed
if 'fake' not in holders and params['generator'] == 'mem':
self._build_memory_encoder(holders)
else:
# build a normal encoder
self._build_encoder(holders['ques'], holders['ques_len'])
self._build_decoder(use_gt_prog, holders['prog_gt'])
# build a usual encoder, ques based
def _build_encoder(self, input_seq_batch, seq_len_batch, scope='encoder',
reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
with tf.variable_scope(scope, reuse=reuse):
#T = tf.shape(input_seq_batch)[0]
T = input_seq_batch.shape.as_list()[0]
N = tf.shape(input_seq_batch)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embedding_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embedded_seq = tf.nn.embedding_lookup(embedding_mat, input_seq_batch)
self.embedded_input_seq = embedded_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell, embedded_seq,
seq_len_batch,
dtype=tf.float32,
time_major=True,
scope='lstm')
self.encoder_outputs = encoder_outputs
self.encoder_states = encoder_states
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished has shape [T, N, 1], where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
seq_len_batch[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
# build a special encoder
def _build_memory_encoder(self, holders, scope='encoder', reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
input_seq = holders['ques']
input_seq_len = holders['ques_len']
# facts/memories
hist_size = holders['hist'].shape.as_list()
hist_flat = tf.reshape(holders['hist'], [-1, hist_size[2]])
hist_len_flat = tf.reshape(holders['hist_len'], [-1])
with tf.variable_scope(scope, reuse=reuse):
T = input_seq.shape.as_list()[0]
N = tf.shape(input_seq)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embed_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embed_seq = tf.nn.embedding_lookup(embed_mat, input_seq)
self.embedded_input_seq = embed_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
embed_seq, input_seq_len, dtype=tf.float32,
time_major=True, scope='lstm')
self.encoder_outputs = encoder_outputs
# batch first encoder outputs
batch_encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
ques_enc = support.last_relevant(batch_encoder_outputs, input_seq_len)
size = [-1, self.params['num_rounds'], self.params['lstm_size']]
ques_enc = tf.reshape(ques_enc, size)
self.encoder_states = encoder_states
# similarly encode history
hist_out = tf.nn.embedding_lookup(embed_mat, hist_flat)
# rnns to encode history
cell = tf.contrib.rnn.BasicLSTMCell(self.params['lstm_size'])
for ii in range(0, self.params['num_layers']):
# dynamic rnn
hist_out, states = tf.nn.dynamic_rnn(cell, hist_out, \
sequence_length=hist_len_flat, \
dtype=tf.float32, scope='hist_layer_%d' % ii)
# get output from last timestep
hist_enc = support.last_relevant(hist_out, hist_len_flat)
# reshape back
size = [-1, hist_size[1], self.params['lstm_size']]
hist_enc = tf.reshape(hist_enc, size)
# concatenate, mlp and tanh
num_r = self.params['num_rounds']
# dot product
attention = tf.matmul(ques_enc, hist_enc, transpose_b=True)
# a very small large number
u_mat = np.full((num_r, num_r), -1e10)
suppress_mat = tf.constant(np.triu(u_mat, 1), dtype=tf.float32)
l_mat = np.full((num_r, num_r), 1)
mask_mat = tf.constant(np.tril(l_mat), dtype=tf.float32)
attention = tf.nn.softmax(tf.multiply(attention, mask_mat)
+ suppress_mat)
self.att_history = attention
att_hist_enc = tf.matmul(attention, hist_enc)
# flatten out
size = [-1, self.params['lstm_size']]
att_hist_flat = tf.reshape(att_hist_enc, size)
# concatenate attended history and encoder state for the last layer
concat = tf.concat([encoder_states[-1].h, att_hist_flat], -1)
new_state = LSTMStateTuple(encoder_states[-1].c,
FC(concat, self.params['lstm_size']))
# make it mutable
encoder_states = list(encoder_states)
encoder_states[-1] = new_state
self.encoder_states = tuple(encoder_states)
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
input_seq_len[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
def _build_decoder(self, use_gt_layout, gt_layout_batch, scope='decoder',
reuse=None):
# The main difference from before is that the decoders now takes another
# input (the attention) when computing the next step
# T_max is the maximum length of decoded sequence (including <eos>)
#
# This function is for decoding only. It performs greedy search or sampling.
# the first input is <go> (its embedding vector) and the subsequent inputs
# are the outputs from previous time step
# num_vocab does not include <go>
#
# use_gt_layout is None or a bool tensor, and gt_layout_batch is a tenwor
# with shape [T_max, N].
# If use_gt_layout is not None, then when use_gt_layout is true, predict
# exactly the tokens in gt_layout_batch, regardless of actual probability.
# Otherwise, if sampling is True, sample from the token probability
# If sampling is False, do greedy decoding (beam size 1)
N = self.N
encoder_states = self.encoder_states
T_max = self.T_decoder
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.decoder_dropout
EOS_token = self.EOS_token
sampling = self.decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
embedding_mat = tf.get_variable('embedding_mat',
[self.decoder_num_vocab, self.decoder_embed_dim])
# we use a separate embedding for <go>, as it is only used in the
# beginning of the sequence
go_embedding = tf.get_variable('go_embedding', [1, self.decoder_embed_dim])
with tf.variable_scope('att_prediction'):
v = tf.get_variable('v', [lstm_dim])
W_a = tf.get_variable('weights', [lstm_dim, lstm_dim],
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable('biases', lstm_dim,
initializer=tf.constant_initializer(0.))
# The parameters to predict the next token
with tf.variable_scope('token_prediction'):
W_y = tf.get_variable('weights', [lstm_dim*2, self.decoder_num_vocab],
initializer=tf.contrib.layers.xavier_initializer())
b_y = tf.get_variable('biases', self.decoder_num_vocab,
initializer=tf.constant_initializer(0.))
# Attentional decoding
# Loop function is called at time t BEFORE the cell execution at time t,
# and its next_input is used as the input at time t (not t+1)
# c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
mask_range = tf.reshape(tf.range(self.decoder_num_vocab, dtype=tf.int32),
[1, -1])
if use_gt_layout is not None:
gt_layout_mult = tf.cast(use_gt_layout, tf.int32)
pred_layout_mult = 1 - gt_layout_mult
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
next_cell_state = encoder_states
next_input = tf.tile(go_embedding, to_T([N, 1]))
else: # time > 0
next_cell_state = cell_state
# compute the attention map over the input sequence
# a_raw has shape [T, N, 1]
att_raw = tf.reduce_sum(
tf.tanh(tf.nn.xw_plus_b(cell_output, W_a, b_a) +
self.encoder_h_transformed) * v,
axis=2, keep_dims=True)
# softmax along the first dimension (T) over not finished examples
# att has shape [T, N, 1]
att = tf.nn.softmax(att_raw, dim=0)*self.seq_not_finished
att = att / tf.reduce_sum(att + 1e-10, axis=0, keep_dims=True)
# d has shape [N, lstm_dim]
d2 = tf.reduce_sum(att*self.encoder_outputs, axis=0)
# token_scores has shape [N, num_vocab]
token_scores = tf.nn.xw_plus_b(
tf.concat([cell_output, d2], axis=1),
W_y, b_y)
decoding_state = loop_state[2]
# token_validity has shape [N, num_vocab]
token_validity = _get_valid_tokens(decoding_state, self.W, self.b)
token_validity.set_shape([None, self.decoder_num_vocab])
if use_gt_layout is not None:
# when there's ground-truth layout, do not re-normalize prob
# and treat all tokens as valid
token_validity = tf.logical_or(token_validity, use_gt_layout)
validity_mult = tf.cast(token_validity, tf.float32)
# predict the next token (behavior depending on parameters)
if sampling:
token_scores_valid = token_scores - (1-validity_mult) * 50
# TODO:debug
sampled_token = tf.cast(tf.reshape(
tf.multinomial(token_scores_valid/self.temperature, 1), [-1]), tf.int32)
# make sure that the predictions are ALWAYS valid
# (it can be invalid with very small prob)
# If not, just fall back to min cases
# pred_mask has shape [N, num_vocab]
sampled_mask = tf.equal(mask_range, tf.reshape(sampled_token, [-1, 1]))
is_sampled_valid = tf.reduce_any(
tf.logical_and(sampled_mask, token_validity),
axis=1)
# Fall back to max score (no sampling)
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
max_score_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
predicted_token = tf.where(is_sampled_valid, sampled_token, max_score_token)
else:
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
# predicted_token has shape [N]
predicted_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
if use_gt_layout is not None:
predicted_token = (gt_layout_batch[time-1] * gt_layout_mult
+ predicted_token * pred_layout_mult)
# a robust version of softmax
# all_token_probs has shape [N, num_vocab]
all_token_probs = tf.nn.softmax(token_scores) * validity_mult
# tf.check_numerics(all_token_probs, 'NaN/Inf before div')
all_token_probs = all_token_probs / tf.reduce_sum(all_token_probs + 1e-10, axis=1, keep_dims=True)
# tf.check_numerics(all_token_probs, 'NaN/Inf after div')
# mask has shape [N, num_vocab]
mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1]))
# token_prob has shape [N], the probability of the predicted token
# although token_prob is not needed for predicting the next token
# it is needed in output (for policy gradient training)
# [N, num_vocab]
token_prob = tf.reduce_sum(all_token_probs * tf.cast(mask, tf.float32), axis=1)
# tf.assert_positive(token_prob)
neg_entropy = tf.reduce_sum(
all_token_probs * tf.log(all_token_probs + (1-validity_mult) + 1e-10),
axis=1)
# update states
updated_decoding_state = _update_decoding_state(
decoding_state, predicted_token, self.P)
# the prediction is from the cell output of the last step
# timestep (t-1), feed it as input into timestep t
next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token)
elements_finished = tf.greater_equal(time, T_max)
# loop_state is a 5-tuple, representing
# 1) the predicted_tokens
# 2) the prob of predicted_tokens
# 3) the decoding state (used for validity)
# 4) the negative entropy of policy (accumulated across timesteps)
# 5) the attention
if loop_state is None: # time == 0
# Write the predicted token into the output
predicted_token_array = tf.TensorArray(dtype=tf.int32, size=T_max,
infer_shape=False)
token_prob_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
init_decoding_state = tf.tile(to_T([[0, 0, T_max]], dtype=tf.int32), to_T([N, 1]))
att_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
next_loop_state = (predicted_token_array,
token_prob_array,
init_decoding_state,
tf.zeros(to_T([N]), dtype=tf.float32),
att_array)
else: # time > 0
t_write = time-1
next_loop_state = (loop_state[0].write(t_write, predicted_token),
loop_state[1].write(t_write, token_prob),
updated_decoding_state,
loop_state[3] + neg_entropy,
loop_state[4].write(t_write, att))
return (elements_finished, next_input, next_cell_state, cell_output,
next_loop_state)
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
_, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm')
predicted_tokens = decodes_ta[0].stack()
token_probs = decodes_ta[1].stack()
neg_entropy = decodes_ta[3]
# atts has shape [T_decoder, T_encoder, N, 1]
atts = decodes_ta[4].stack()
# static dimension recast
atts = tf.reshape(atts, [self.T_decoder, self.T_encoder, -1, 1])
self.atts = atts
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(atts*self.embedded_input_seq, axis=1)
predicted_tokens.set_shape([None, None])
token_probs.set_shape([None, None])
neg_entropy.set_shape([None])
#word_vecs.set_shape([None, None, self.encoder_embed_dim])
# static shapes
word_vecs.set_shape([self.T_decoder, None, self.encoder_embed_dim])
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.neg_entropy = neg_entropy
self.word_vecs = word_vecs
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/generator_attnet.py
|
corefnmn-main
|
models_vd/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Write a description about what this file contains and what
it does.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_vd import modules as lm
# the number of attention input to each module
_module_input_num = {
'_Find': 0,
'_Refer': 0,
'_Exclude': 0,
'_Transform': 1,
'_And': 2,
'_Describe': 1
}
# output type of each module
_module_output_type = {
'_Find': 'att',
'_Refer': 'att',
'_Exclude': 'att',
'_Transform': 'att',
'_And': 'att',
'_Describe': 'ans'
}
INVALID_EXPR = 'INVALID_EXPR'
# decoding validity: maintaining a state x of [#att, #ans, T_remain]
# when T_remain is T_decoder when decoding the first module token
# a token s can be predicted iff all(<x, w_s> - b_s >= 0)
# the validity token list is
# XW - b >= 0
# the state transition matrix is P, so the state update is X += S P,
# where S is the predicted tokens (one-hot vectors)
def _build_validity_mats(module_names):
state_size = 3
num_vocab_nmn = len(module_names)
num_constraints = 4
P = np.zeros((num_vocab_nmn, state_size), np.int32)
W = np.zeros((state_size, num_vocab_nmn, num_constraints), np.int32)
b = np.zeros((num_vocab_nmn, num_constraints), np.int32)
# collect the input and output numbers of each module
att_in_nums = np.zeros(num_vocab_nmn)
att_out_nums = np.zeros(num_vocab_nmn)
ans_out_nums = np.zeros(num_vocab_nmn)
for n_s, s in enumerate(module_names):
if s != '<eos>':
att_in_nums[n_s] = _module_input_num[s]
att_out_nums[n_s] = _module_output_type[s] == 'att'
ans_out_nums[n_s] = _module_output_type[s] == 'ans'
# construct the trasition matrix P
for n_s, s in enumerate(module_names):
P[n_s, 0] = att_out_nums[n_s] - att_in_nums[n_s]
P[n_s, 1] = ans_out_nums[n_s]
P[n_s, 2] = -1
# construct the validity W and b
att_absorb_nums = (att_in_nums - att_out_nums)
max_att_absorb_nonans = np.max(att_absorb_nums * (ans_out_nums == 0))
max_att_absorb_ans = np.max(att_absorb_nums * (ans_out_nums != 0))
for n_s, s in enumerate(module_names):
if s != '<eos>':
# constraint: a non-<eos> module can be outputted iff all the following
# hold:
# * 0) there's enough att in the stack
# #att >= att_in_nums[n_s]
W[0, n_s, 0] = 1
b[n_s, 0] = att_in_nums[n_s]
# * 1) for answer modules, there's no extra att in the stack
# #att <= att_in_nums[n_s]
# -#att >= -att_in_nums[n_s]
# for non-answer modules, T_remain >= 3
# (the last two has to be AnswerType and <eos>)
if ans_out_nums[n_s] != 0:
W[0, n_s, 1] = -1
b[n_s, 1] = -att_in_nums[n_s]
else:
W[2, n_s, 1] = 1
b[n_s, 1] = 3
# * 2) there's no answer in the stack (otherwise <eos> only)
# #ans <= 0
# -#ans >= 0
W[1, n_s, 2] = -1
# * 3) there's enough time to consume the all attentions, output answer
# plus <eos>
# 3.1) for non-answer modules, we already have T_remain>= 3 from
# constraint 2
# In maximum (T_remain-3) further steps
# (plus 3 steps for this, ans, <eos>) to consume atts
# (T_remain-3) * max_att_absorb_nonans + max_att_absorb_ans +
# att_absorb_nums[n_s] >= #att
# T_remain*MANA - #att >= 3*MANA - MAA - A[s]
# - #att + MANA * T_remain >= 3*MANA - MAA - A[s]
# 3.2) for answer modules, if it can be decoded then constraint 0&1
# ensures that there'll be no att left in stack after decoding
# this answer, hence no further constraints here
if ans_out_nums[n_s] == 0:
W[0, n_s, 3] = -1
W[2, n_s, 3] = max_att_absorb_nonans
b[n_s, 3] = (3 * max_att_absorb_nonans - max_att_absorb_ans -
att_absorb_nums[n_s])
else: # <eos>-case
# constraint: a <eos> token can be outputted iff all the following holds
# * 0) there's ans in the stack
# #ans >= 1
W[1, n_s, 0] = 1
b[n_s, 0] = 1
return P, W, b
#------------------------------------------------------------------------------
class Assembler:
def __init__(self, module_vocab_file):
# read the module list, and record the index of each module and <eos>
with open(module_vocab_file) as f:
self.module_names = [s.strip() for s in f.readlines()]
# find the index of <eos>
for n_s in range(len(self.module_names)):
if self.module_names[n_s] == '<eos>':
self.EOS_idx = n_s
break
# build a dictionary from module name to token index
self.name2idx_dict = {name: n_s for n_s, name in enumerate(self.module_names)}
self.num_vocab_nmn = len(self.module_names)
self.P, self.W, self.b = _build_validity_mats(self.module_names)
def module_list2tokens(self, module_list, T=None):
layout_tokens = [self.name2idx_dict[name] for name in module_list]
if T is not None:
if len(module_list) >= T:
raise ValueError('Not enough time steps to add <eos>')
layout_tokens += [self.EOS_idx]*(T-len(module_list))
return layout_tokens
def _layout_tokens2str(self, layout_tokens):
return ' '.join([self.module_names[idx] for idx in layout_tokens])
def assemble_refer(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
logits = []
for find_arg in reuse_stack:
# compute the weights for each of the attention map
inputs = (text_att, find_arg[1], round_id, find_arg[2])
logits.append(weaver.align_text(*inputs))
# exponential each logit
weights = []
for ii in logits: weights.append(weaver.exp(ii))
# normalize the weights
if len(weights) < 2:
norm = weights[0]
else:
norm = weaver.add(weights[0], weights[1])
for ii in weights[2:]: norm = weaver.add(norm, ii)
for index, ii in enumerate(weights):
weights[index] = weaver.divide(ii, norm)
# multiply the attention with softmax weight
prev_att = []
for (att, _, _, _, _), weight in zip(reuse_stack, weights):
prev_att.append(weaver.weight_attention(att, weight))
# add all attentions to get the result
if len(prev_att) < 2: out = prev_att[0]
else:
out = weaver.add_attention(prev_att[0], prev_att[1])
for ii in prev_att[2:]:
out = weaver.add_attention(out, ii)
return out, weights, logits
def assemble_exclude(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
weights = []
exclude_att = reuse_stack[0][0]
if len(reuse_stack) > 1:
for find_arg in reuse_stack:
exclude_att = weaver.max_attention(exclude_att, find_arg[0])
return weaver.normalize_exclude(exclude_att)
# code to check if the program makes sense
# typically contains all the checks from the _assemble_program method
def sanity_check_program(self, layout):
decode_stack = []
for t_id, cur_op_id in enumerate(layout):
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
return False, 'Insufficient inputs'
# read the inputs
inputs = []
for ii in range(num_inputs):
arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
return False, 'Intermediate not attention'
decode_stack.append(_module_output_type[cur_op_name])
# Check if only one element is left
if len(decode_stack) != 1:
return False, 'Left with more than one outputs'
# final output is not answer type
elif decode_stack[0] != 'ans':
return False, 'Final output not an answer'
return True, 'Valid program'
def assemble(self, layout_tokens, executor, visualize=False):
# layout_tokens_batch is a numpy array with shape [T, N],
# containing module tokens and <eos>, in Reverse Polish Notation.
# internalize executor and weaver
self.executor = executor
# build a weaver
if hasattr(self, 'weaver'): del self.weaver
weaver = executor.create_weaver()
self.weaver = weaver
# visualize flag
self.visualize = visualize
# get extent of layout tokens
max_time, batch_size = layout_tokens['ques'].shape
num_rounds = executor.params['num_rounds']
batch_size = batch_size // num_rounds
outputs = []
reuse = [[]] * batch_size
cap_invalid_prog = []
ques_invalid_prog = []
# program on questions and captions, if needed
cap_tokens = layout_tokens.get('caption', None)
ques_tokens = layout_tokens['ques']
for b_id in range(batch_size):
image = weaver.batch_input(executor._loom_types['image'], b_id)
if executor.params['use_fact']:
fact = weaver.batch_input(executor._loom_types['fact'], b_id)
else: fact = None
# run module networks on captions only if needed
if 'nmn-cap' in executor.params['model']:
# convert caption to text type
cap = weaver.batch_input(executor._loom_types['caption'], b_id)
cap_text = weaver.convert_cap_in(cap)
# convert cap feature to text feature for alignment
cap_feat = weaver.batch_input(executor._loom_types['cap_feat'], b_id)
cap_feat = weaver.convert_cap_feat(cap_feat)
# collect root node outputs for down the rounds
tokens = cap_tokens[:, num_rounds * b_id : num_rounds * (b_id + 1)]
inputs = (image, cap_text, None, cap_feat, tokens, [])
out, reuse[b_id], invalid_prog = self._assemble_program(*inputs)
cap_invalid_prog.extend(invalid_prog)
# convert context to align type
cap_out = [weaver.convert_cap_out(ii) for ii in out['comp']]
outputs.extend(cap_out)
# add the visualization outputs, if needed
if visualize:
outputs.extend([ii[0] for ii in out['vis']['att'] if ii[1]==0])
# Now run program on questions
text = weaver.batch_input(executor._loom_types['text'], b_id)
text_feat = weaver.batch_input(executor._loom_types['text_feat'], b_id)
# collect root node outputs for down the rounds
# tuples are immutable, recreate to ensure caption is round 0
round_zero = weaver.batch_input(executor._loom_types['round'], 0)
cur_reuse = [(ii[0], ii[1], round_zero, ii[3], ii[4])
for ii in reuse[b_id] if ii[3] == 0]
tokens = ques_tokens[:, num_rounds*b_id : num_rounds*(b_id+1)]
inputs = (image, text, fact, text_feat, tokens, cur_reuse)
out, _, invalid_prog = self._assemble_program(*inputs)
ques_invalid_prog.extend(invalid_prog)
outputs.extend(out['comp'])
if visualize:
outputs.extend([ii for ii, _ in out['vis']['att']])
outputs.extend(out['vis']['weights'])
invalid_prog = {'ques': ques_invalid_prog, 'cap': cap_invalid_prog}
return weaver, outputs, invalid_prog
def _assemble_program(self, image, text, fact, text_feat, tokens, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# get extent of layout tokens
max_time, batch_size = tokens.shape
num_rounds = executor.params['num_rounds']
outputs = []
validity = []
# for visualizing internal nodes
vis_outputs = {'att': [], 'weights': [], 'logits': []}
for r_id in range(num_rounds):
layout = tokens[:, r_id]
invalid_prog = False
round_id = weaver.batch_input(executor._loom_types['round'], r_id)
if fact is not None: fact_slice = weaver.slice_fact(fact, round_id)
# valid layout must contain <eos>. Assembly fails if it doesn't.
if not np.any(layout == self.EOS_idx): invalid_prog = True
decode_stack = []
penult_out = None # penultimate output
for t_id in range(len(layout)):
weights = None
time = weaver.batch_input(executor._loom_types['time'], t_id)
text_att = weaver.slice_text(text, round_id, time)
# slice the text feature
text_feat_slice = weaver.slice_text_feat(text_feat, round_id, time)
cur_op_id = layout[t_id]
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
invalid_prog = True
break
# read the inputs
inputs = []
for ii in range(num_inputs):
arg, arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
invalid_prog = True
break
inputs.append(arg)
# switch cases
if cur_op_name == '_Find':
out = weaver.find(image, text_att)
# collect in reuse stack (always)
#if fact is None:
reuse_stack.append((out, text_feat_slice, round_id, r_id, t_id))
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
if cur_op_name == '_Refer':
if len(reuse_stack) == 0:
print('Something wrong with Refer')
continue
# if baseline is in the model, take the last output
if 'baseline' in self.executor.params['model']:
out = reuse_stack[-1][0]
else:
inputs = (text_feat_slice, round_id, reuse_stack)
out, weights, logits = self.assemble_refer(*inputs)
if cur_op_name == '_Exclude':
# clean up reuse stack to avoid current finds
neat_stack = reuse_stack.copy()
for prev_time in range(t_id - 1, 0, -1):
if neat_stack[-1][-2] == prev_time: neat_stack.pop(-1)
inputs = (text_att, round_id, neat_stack)
out = self.assemble_exclude(*inputs)
# collect in reuse stack
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
elif cur_op_name == '_Transform':
out = weaver.transform(inputs[0], image, text_att)
elif cur_op_name == '_Describe':
out = weaver.describe(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_And':
out = weaver.and_op(inputs[0], inputs[1])
# collect outputs from all modules (visualize)
if self.visualize:
if _module_output_type[cur_op_name] == 'att':
vis_outputs['att'].append((out, r_id))
if weights is not None:
vis_outputs['weights'].extend(weights)
#vis_outputs['logits'].extend(logits)
# also add weights to usual outputs
#if weights is not None: print(r_id, len(weights))
if weights is not None:
if executor.params['train_mode']: outputs.extend(logits)
decode_stack.append((out, _module_output_type[cur_op_name]))
# Check if only one element is left
if len(decode_stack) != 1: invalid_prog = True
# final output is not answer type
elif decode_stack[0][1] != 'ans': invalid_prog = True
# record program validity
validity.append(invalid_prog)
# if program is invalid, return zeros
if invalid_prog: outputs.append(weaver.invalid(image))
else:
outputs.append(decode_stack[-1][0])
if fact is not None:
# record fact embedding against penultimate output
reuse_stack.append((penult_out, fact_slice, round_id, r_id, -1))
return {'comp': outputs, 'vis': vis_outputs}, reuse_stack, validity
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/assembler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main CorefNMN model class.
Explicit visual coreference resolution in visual dialog using neural module
networks. Takes parameters and assemblers as input.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from models_vd.generator import ProgramGenerator
from models_vd.executor import ProgramExecutor
from models_vd.decoder import AnswerDecoder
from util import support
class CorefNMN:
def __init__(self, params, assemblers, reuse=None):
# train mode
params['train_mode'] = 'test_split' not in params
print('Building model with train_model as: ' + str(params['train_mode']))
self.params = params
self.assemblers = assemblers
# module phases
self.phases = ['generate_program', 'execute_program', 'generate_answer']
# initializing input and output placeholders
self.inputs = {ii: {} for ii in self.phases}
self.outputs = self.inputs.copy()
# build place holders for inputs and outputs in the tensorflow graph
holders = self._build_placeholders(params)
self.holders = holders
with tf.variable_scope(params['model'], reuse=reuse):
# keep track of all outputs
output_pool = {}
# Part 1: Seq2seq RNN to generate module layout tokens
with tf.variable_scope('generate_program'):
self.generator = ProgramGenerator(holders, assemblers['ques'], params)
self.inputs['generate_program'] = self.generator.get_inputs()
self.outputs['generate_program'] = self.generator.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['generate_program'])
# Part 2: Neural Module Network
with tf.variable_scope('execute_program'):
self.executor = ProgramExecutor(holders, output_pool,
assemblers['cap'], params)
self.inputs['execute_program'] = self.executor.get_inputs()
self.outputs['execute_program'] = self.executor.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['execute_program'])
# Part 3: Seq2Seq decoding of the answer
with tf.variable_scope('generate_answer'):
self.decoder = AnswerDecoder(holders, output_pool, params)
self.inputs['generate_answer'] = self.decoder.get_inputs()
self.outputs['generate_answer'] = self.decoder.get_outputs()
# pool up all the outputs
pooled_dict = []
outputs = self.outputs.copy()
for ii in outputs:
pooled_dict += outputs[ii].items()
self.pooled_outputs = dict(pooled_dict)
#---------------------------------------------------------------------------
def _build_placeholders(self, params):
inputs = {}
# Phase 1 - program generation
size = [params['max_enc_len'], None]
inputs['ques'] = tf.placeholder(tf.int32, size, 'ques')
inputs['ques_len'] = tf.placeholder(tf.int32, [None], 'ques_len')
inputs['prog_gt'] = tf.placeholder(tf.int32, [None, None], 'prog')
size = [None, params['max_enc_len']]
inputs['cap'] = tf.placeholder(tf.int32, size, 'caption')
inputs['cap_len'] = tf.placeholder(tf.int32, [None], 'cap_len')
inputs['cap_prog_gt'] = tf.placeholder(tf.int32, [None, None],
'cap_prog_gt')
# mask for pairwise program token loss
inputs['prog_att_mask'] = tf.placeholder(tf.float32, [None, None, None],
'mask')
# for supervising placeholders
if params['supervise_attention']:
size = [params['max_dec_len'], params['max_enc_len'], None, 1]
inputs['prog_att_gt'] = tf.placeholder(tf.float32, size, 'gt_att')
inputs['cap_att_gt'] = tf.placeholder(tf.float32, size, 'cap_att')
# masking out relevant parts for complete supervision
inputs['ques_super_mask'] = tf.placeholder(tf.float32, size, 'q_mask')
inputs['cap_super_mask'] = tf.placeholder(tf.float32, size, 'c_mask')
inputs['supervise_switch'] = tf.placeholder(tf.bool, [],
'supervise_switch')
# tie encoder and decoder
size = [params['num_layers'], None, params['lstm_size']]
inputs['enc_dec_h'] = tf.placeholder(tf.float32, size, 'enc_dec_h')
inputs['enc_dec_c'] = tf.placeholder(tf.float32, size, 'enc_dec_c')
# Phase 2 - program execution
size = [None, params['h_feat'], params['w_feat'], params['d_feat']]
inputs['img_feat'] = tf.placeholder(tf.float32, size, 'img_feat')
inputs['prog_validity'] = tf.placeholder(tf.bool, [None])
# Phase 2.5 - caption execution
inputs['align_gt'] = tf.placeholder(tf.int32, [None], 'align_cap')
inputs['prog_validity_cap'] = tf.placeholder(tf.bool, [None])
# Phase 3 - answer generation
inputs['ans_in'] = tf.placeholder(tf.int32, [None, None], 'ans_in')
inputs['ans_out'] = tf.placeholder(tf.int32, [None, None], 'ans_out')
inputs['ans'] = tf.placeholder(tf.int32, [None, None], 'ans')
inputs['ans_len'] = tf.placeholder(tf.int32, [None], 'ans_len')
# if discriminative, encode options
# NOTE: num_options hard coded to 100
num_options = 100
size = [None, params['max_enc_len'], num_options]
inputs['opt'] = tf.placeholder(tf.int32, size, 'opt_out')
inputs['opt_len'] = tf.placeholder(tf.int32, [None, num_options], 'opt_len')
inputs['gt_ind'] = tf.placeholder(tf.int32, [None], 'gt_ind')
# history
size = [None, params['num_rounds'], 2 * params['max_enc_len']]
inputs['hist'] = tf.placeholder(tf.int32, size, 'history')
size = [None, params['num_rounds']]
inputs['hist_len'] = tf.placeholder(tf.int32, size, 'hist_len')
# place holders for fact
size = [None, params['max_enc_len']]
inputs['fact'] = tf.placeholder(tf.int32, size, 'fact')
inputs['fact_len'] = tf.placeholder(tf.int32, [None], 'fact_len')
if not self.params['train_mode']:
# additional placeholders during evaluation
size = [None, params['lstm_size']]
inputs['context'] = tf.placeholder(tf.float32, size, 'context')
size = [1, 1, None, params['lstm_size']]
inputs['cap_enc'] = tf.placeholder(tf.float32, size, 'cap_enc')
size = [None, None, None, params['lstm_size']]
inputs['ques_enc'] = tf.placeholder(tf.float32, size, 'ques_enc')
size = [None, params['lstm_size']]
inputs['hist_enc'] = tf.placeholder(tf.float32, size, 'hist_enc')
size = [params['max_dec_len'], None, params['text_embed_size']]
inputs['ques_attended'] = tf.placeholder(tf.float32, size, 'ques_att')
inputs['cap_attended'] = tf.placeholder(tf.float32, size, 'cap_att')
return inputs
#---------------------------------------------------------------------------
# method to initialize training related attributes
def setup_training(self):
# answer prediction loss
total_loss = self.outputs['generate_answer']['ans_token_loss']
# supervised sequence prediction loss
total_loss += self.outputs['generate_program']['prog_pred_loss']
if 'nmn-cap' in self.params['model'] and self.params['cap_alignment']:
total_loss += self.outputs['execute_program']['cap_align_loss']
# add the total loss to the list of outputs
self.pooled_outputs['total_loss'] = total_loss
# setters and getters
def get_total_loss(self):
return self.pooled_outputs['total_loss']
def add_solver_op(self, op):
self.pooled_outputs['solver'] = op
#---------------------------------------------------------------------------
def run_train_iteration(self, batch, sess):
iter_loss = {}
# collect feeds from all subcomponents
feeder = self.generator.produce_feed_dict(batch)
feeder.update(self.executor.produce_feed_dict(batch))
feeder.update(self.decoder.produce_feed_dict(batch))
# run all subcomponents together
output = sess.run(self.pooled_outputs, feed_dict=feeder)
# record all the loss values
iter_loss['prog'] = output['prog_pred_loss']
if 'nmn-cap' in self.params['model']:
iter_loss['align'] = output['cap_align_loss']
else:
iter_loss['align'] = 0.
iter_loss['ans'] = output['ans_token_loss']
iter_loss['total'] = output['total_loss']
return iter_loss, None
#---------------------------------------------------------------------------
def run_evaluate_iteration(self, batch, sess, eval_options=True):
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.run(self.outputs['generate_program'], feed_dict=feeder)
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['execute_program'], feed_dict=feeder))
if 'pred_tokens' in output:
output['matches'] = [batch['gt_layout'] == output['pred_tokens']]
# if options are not to be scored
if not eval_options: return None, outputs
# Part 3: Run the answer generation language model (disc | gen)
if self.params['decoder'] == 'gen':
option_batch = output.copy()
option_batch.update(batch)
phase_output = self.outputs['generate_answer']['llh']
num_options = len(batch['opt_len'])
batch_size = batch['opt_len'][0].shape[0]
option_scores = np.zeros((batch_size, num_options))
option_probs = np.zeros((batch_size, num_options))
for opt_id in range(num_options):
option_batch['ans_in'] = batch['opt_in'][opt_id]
option_batch['ans_out'] = batch['opt_out'][opt_id]
option_batch['ans_len'] = batch['opt_len'][opt_id]
feeder = self.decoder.produce_feed_dict(option_batch, output)
scores = sess.run(phase_output, feed_dict=feeder)
option_scores[:, opt_id] = scores
# Part 3: Run the decoder model
elif self.params['decoder'] == 'disc':
batch_size = batch['opt_len'][0].shape[0]
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
option_scores = output['scores']
# extract ground truth score, and get ranks
gt_scores = option_scores[(range(batch_size), batch['gt_ind'])]
ranks = np.sum(option_scores > gt_scores.reshape(-1, 1), axis=1) + 1
output['scores'] = option_scores
return ranks, output
#---------------------------------------------------------------------------
def run_visualize_iteration(self, batch, sess, eval_options=True):
output = batch.copy()
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output.update(sess.run(self.outputs['generate_program'], feeder))
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output, True)
output.update(sess.run(self.outputs['execute_program'], feeder))
# segregate weights and attention maps
output['intermediates'] = self.executor.segregrate_outputs(output)
if not eval_options: return None, output
# Part 3: Run the answer generation language model
if self.params['decoder'] == 'gen':
option_batch = output.copy()
option_batch.update(batch)
phase_output = self.outputs['generate_answer']['llh']
# Part 3: Run the answer generation language model for each option
num_options = len(batch['opt_len'])
batch_size = batch['opt_len'][0].shape[0]
option_scores = np.zeros((batch_size, num_options))
for opt_id in range(num_options):
option_batch['ans_in'] = batch['opt_in'][opt_id]
option_batch['ans_out'] = batch['opt_out'][opt_id]
option_batch['ans_len'] = batch['opt_len'][opt_id]
feeder = self.decoder.produce_feed_dict(option_batch, output)
scores = sess.run(phase_output, feed_dict=feeder)
option_scores[:, opt_id] = scores
# Part 3: Run the decoder model
elif self.params['decoder'] == 'disc':
batch_size = batch['opt_len'][0].shape[0]
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
option_scores = output['scores']
# extract ground truth score, and get ranks
gt_scores = option_scores[(range(batch_size), batch['gt_ind'])]
ranks = np.sum(option_scores > gt_scores.reshape(-1, 1), axis=1) + 1
output['scores'] = option_scores
return ranks, output
#-------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/model.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to generate programs for questions and captions.
Program generator for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
This subcomponent uses memory network augmentation to figure out if an entity
has been seen before and/or if it needs resolution using history.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from models_vd.generator_attnet import AttSeq2Seq
from util import support
# alias
linear = tf.contrib.layers.fully_connected
# behavior based on type of model
class ProgramGenerator:
def __init__(self, inputs, assembler, params):
"""Initialize program generator.
Args:
inputs:
assembler:
params:
"""
self.params = params
outputs = {}
used_inputs = []
# create embedding matrix
with tf.variable_scope('embed', reuse=None) as embed_scope:
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat', size)
# remember the scope for further use
params['embed_scope'] = embed_scope
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
#--------------------------------------------------------
# if program is to be predicted
if 'prog' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
self.rnn = AttSeq2Seq(inputs, use_gt_prog, assembler, params)
# if memory based generator is used
if params['generator'] == 'mem':
used_inputs.extend(['hist', 'hist_len'])
outputs['encoder_output'] = self.rnn.encoder_outputs
outputs['pred_tokens'] = self.rnn.predicted_tokens
outputs['neg_entropy'] = tf.reduce_mean(self.rnn.neg_entropy)
# check if attHistory exists
if hasattr(self.rnn, 'att_history'):
outputs['att_history'] = self.rnn.att_history
# also add the encoder states (based on the flag)
concat_list = [ii.h for ii in self.rnn.encoder_states]
outputs['enc_dec_h'] = tf.stack(concat_list)
concat_list = [ii.c for ii in self.rnn.encoder_states]
outputs['enc_dec_c'] = tf.stack(concat_list)
# alias
attention = self.rnn.atts
# if attention is to be supervised
if params['supervise_attention']:
# get mask out of the program supervision
mask = tf.cast(inputs['prog_att_gt'] > 0, tf.float32)
used_inputs.append('prog_att_gt')
# binary supervision loss
sum_mask = tf.reduce_sum(mask, 1)
sum_mask = tf.expand_dims(sum_mask, 1)
sum_mask = tf.cast(sum_mask > 0, tf.float32)
tile_size = (1, self.params['max_enc_len'], 1, 1)
tile_mask = tf.tile(sum_mask, tile_size)
num_tokens = tf.maximum(tf.reduce_sum(tile_mask), 1)
# stop gradients
num_tokens = tf.stop_gradient(num_tokens)
tile_mask = tf.stop_gradient(tile_mask)
criterion = tf.nn.sigmoid_cross_entropy_with_logits
att_loss = criterion(labels=mask,logits=attention)
att_loss = tf.reduce_sum(tf.multiply(att_loss, tile_mask))
att_loss = att_loss / num_tokens
outputs['att_loss'] = att_loss
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn.embedded_input_seq, axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention'] = attention
outputs['ques_attended'] = word_vecs
#outputs['ques_attended'] = self.rnn.word_vecs
# log probability of each generated sequence
outputs['log_seq_prob'] = tf.reduce_sum(
tf.log(self.rnn.token_probs + 1e-10), axis=0)
outputs['ques_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob'])
q_output = tf.transpose(self.rnn.encoder_outputs, perm=[1, 0, 2])
q_output = support.last_relevant(q_output, inputs['ques_len'])
# bloat the first two dimensions
q_output = tf.expand_dims(q_output, axis=0)
outputs['ques_enc'] = tf.expand_dims(q_output, axis=0)
# keep track of inputs actually used
used_inputs.extend(['ques', 'ques_len', 'prog_gt'])
#------------------------------------------------------------------
# programs for captions
if 'nmn-cap' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
# pretend captions to be questions for code reusability
fake_ins = {'ques': tf.transpose(inputs['cap'], perm=[1, 0]),
'ques_len': inputs['cap_len'],
'prog_gt': inputs['cap_prog_gt']}
function_ins = [fake_ins, use_gt_prog, assembler, params]
# if captions and questions share encoder
# default value for sharing encoding
self.params['share_encoder'] = self.params.get('share_encoder', False)
if not self.params['share_encoder']:
function_ins[0]['fake'] = True
else:
function_ins += [True]
self.rnn_cap = AttSeq2Seq(*function_ins)
used_inputs.extend(['cap', 'cap_len', 'cap_prog_gt'])
outputs['pred_tokens_cap'] = self.rnn_cap.predicted_tokens
outputs['neg_entropy_cap'] = tf.reduce_mean(self.rnn_cap.neg_entropy)
#------------------------------------------------------------------
# alias
attention = self.rnn_cap.atts
# if attention is to be supervised
if params['supervise_attention']:
# get mask out of the program supervision
mask = tf.cast(inputs['cap_att_gt'] > 0, tf.float32)
# binary supervision loss
sum_mask = tf.reduce_sum(mask, 1)
sum_mask = tf.expand_dims(sum_mask, 1)
sum_mask = tf.cast(sum_mask > 0, tf.float32)
tile_size = (1, self.params['max_enc_len'], 1, 1)
tile_mask = tf.tile(sum_mask, tile_size)
num_tokens = tf.maximum(tf.reduce_sum(tile_mask), 1)
# stop gradients
num_tokens = tf.stop_gradient(num_tokens)
tile_mask = tf.stop_gradient(tile_mask)
criterion = tf.nn.sigmoid_cross_entropy_with_logits
att_loss = criterion(labels=mask,logits=attention)
att_loss = tf.reduce_sum(tf.multiply(att_loss, tile_mask))
att_loss_cap = att_loss / num_tokens
# additional add the multiplier
outputs['att_loss_cap'] = att_loss_cap
used_inputs.append('cap_att_gt')
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn_cap.embedded_input_seq,
axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention_cap'] = attention
outputs['cap_attended'] = word_vecs
#outputs['cap_attended'] = self.rnn_cap.word_vecs
#------------------------------------------------------------------
# log probability of each generated sequence
log_prob_cap_token = tf.log(self.rnn_cap.token_probs + 1e-10)
outputs['log_seq_prob_cap'] = tf.reduce_sum(log_prob_cap_token, axis=0)
outputs['cap_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob_cap'])
c_output = tf.transpose(self.rnn_cap.encoder_outputs, perm=[1, 0, 2])
c_output = support.last_relevant(c_output, inputs['cap_len'])
# bloat the first two dimensions
c_output = tf.expand_dims(c_output, axis=0)
outputs['cap_enc'] = tf.expand_dims(c_output, axis=0)
used_inputs.extend(['cap', 'cap_len'])
#------------------------------------------------------------------
# setup the inputs and outputs
# should have at least one loss
total_loss = (outputs.get('ques_prog_loss', tf.constant(0.0)) +
outputs.get('cap_prog_loss', tf.constant(0.0)) +
outputs.get('att_loss', tf.constant(0.0)) +
outputs.get('att_loss_cap', tf.constant(0.0)))
outputs['prog_pred_loss'] = total_loss
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, prev_output=None):
feed_dict = {}
feed_dict[self.inputs['ques']] = batch['ques']
feed_dict[self.inputs['ques_len']] = batch['ques_len']
# add program
if 'prog' in self.params['model']:
feed_dict[self.inputs['prog_gt']] = batch['gt_layout']
# attention for program
if self.params['supervise_attention']:
feed_dict[self.inputs['prog_att_gt']] = batch['gt_att']
# add captions
if 'cap' in self.params['model']:
feed_dict[self.inputs['cap']] = batch['cap']
feed_dict[self.inputs['cap_len']] = batch['cap_len']
# add history
if self.params['generator'] == 'mem':
feed_dict[self.inputs['hist']] = batch['hist']
feed_dict[self.inputs['hist_len']] = batch['hist_len']
# nmn on captions
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['cap']] = batch['sh_cap']
feed_dict[self.inputs['cap_len']] = batch['sh_cap_len']
feed_dict[self.inputs['cap_prog_gt']] = batch['sh_cap_prog']
if self.params['supervise_attention']:
feed_dict[self.inputs['cap_att_gt']] = batch['sh_cap_att']
return feed_dict
#------------------------------------------------------------
|
corefnmn-main
|
models_vd/generator.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Module definitions for Loom API.
Explicit visual coreference resolution in visual dialog using neural module
networks. Neural module definitions.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow_fold.public import loom
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
def add_spatial_coord_map(image_feat_grid):
image_feat_shape = tf.shape(image_feat_grid)
N = image_feat_shape[0]
# static dimensions
#H = image_feat_shape[1]
#W = image_feat_shape[2]
H, W = image_feat_grid.shape.as_list()[1:3]
x_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., W), [1, 1, -1, 1]),
to_T([N, H, 1, 1]))
y_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., H), [1, -1, 1, 1]),
to_T([N, 1, W, 1]))
# stop gradient on coords_map (needed to fix the tile grad error on TF 1.0.0)
coords_map = tf.stop_gradient(tf.concat([x_map, y_map], axis=3))
image_feat_with_coords = tf.concat([image_feat_grid, coords_map], axis=3)
# set shapes of the new feature maps
image_feat_static_shape = image_feat_grid.get_shape().as_list()
image_feat_static_shape[3] += 2
image_feat_with_coords.set_shape(image_feat_static_shape)
image_feat_static_shape[3] = 2
coords_map.set_shape(image_feat_static_shape)
return image_feat_with_coords, coords_map
#------------------------------------------------------------------------------
# Simple tensorflow ops as loom ops
class BinaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(BinaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
arg1, arg2 = inputs
return [self._op(arg1, arg2)]
#------------------------------------------------------------------------------
class UnaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(UnaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, arg):
return [self._op(arg[0])]
#------------------------------------------------------------------------------
# slice text attention
class SliceTextLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
text, round_id, time = inputs
round_squeeze = tf.squeeze(round_id, -1)
time_squeeze = tf.squeeze(time, -1)
# select the right round
shape = text.shape.as_list()
B = tf.shape(text)[0]
num_rounds, T, text_dim = shape[1], shape[2], shape[3]
indices = round_squeeze + num_rounds * tf.range(B)
# flatten
result = tf.gather(tf.reshape(text, [-1, T, text_dim]), indices)
# select the right time
indices = time_squeeze + T * tf.range(B)
# flatten
result = tf.gather(tf.reshape(result, [-1, text_dim]), indices)
return [result]
#------------------------------------------------------------------------------
# slice answer embeddding
class SliceAnswerLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceAnswerLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
answer, round_id = inputs
round_squeeze = tf.squeeze(round_id, -1)
# select the right round
shape = answer.shape.as_list()
B = tf.shape(answer)[0]
num_rounds, text_dim = shape[1], shape[2]
indices = round_squeeze + num_rounds * tf.range(B)
result = tf.gather(tf.reshape(answer, [-1, text_dim]), indices)
return [result]
#--------------------------------------------------------------------
# attention weighting
class AttentionWeightLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(AttentionWeightLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
vis_att, scalar = inputs
# simple weighting
scalar = tf.expand_dims(tf.expand_dims(scalar, -1), -1)
att_grid = tf.multiply(vis_att, scalar)
return [att_grid]
#--------------------------------------------------------------------
# identity op to convert types
class IdentityLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(IdentityLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
return inputs
#--------------------------------------------------------------------
# normalize and complementary attention
class NormalizeExcludeLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(NormalizeExcludeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
att_grid = inputs[0]
# complement the attention
max_entry = tf.reduce_max(tf.reduce_max(att_grid, 1), 1)
max_entry = tf.expand_dims(tf.expand_dims(max_entry, 1), 1)
att_grid = att_grid / max_entry
att_grid = 1 - att_grid
# normalize
norms = tf.reduce_sum(tf.reduce_sum(att_grid, 1), 1)
norms = tf.expand_dims(tf.expand_dims(norms, 1), 1)
# cutoff
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#-------------------------------------------------------------------
class AlignTextLoomOp(loom.LoomOp):
"""
Takes in two text attention and computes the alignment between them
Mapping: text_param x text_param -> scalar
Input:
text_param: [N, D_txt]
text_param: [N, D_txt]
Output:
scalar: [N, 1]
Implementation:
Parameters typically contain:
map_dim = 1024
module_scope = alignTextOp
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'alignTextOp')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AlignTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
text_att1, text_att2, round_id1, round_id2 = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att1.shape.as_list()[-1]
map_dim = self._params['map_dim']
embed_dim = self._params['text_embed_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# concat both text attentions, along with round diff (if need be)
concat_list = [text_att1, text_att2]
# additional weight for the distance to the past
if self._params['amalgam_text_feats']:
round_diff = tf.cast(round_id1 - round_id2, tf.float32)
concat_list.append(round_diff)
concat = tf.concat(concat_list, axis=-1)
# deeper 2 layer align network
weights = tf.contrib.layers.fully_connected(concat, embed_dim)
weights = tf.contrib.layers.fully_connected(weights, 1,
activation_fn=None)
return [weights]
#--------------------------------------------------------------------
# Modules as Loom Ops
class FindLoomOp(loom.LoomOp):
"""
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Elementwise multiplication between image_feat_grid and text_param
2. L2-normalization
3. Linear classification
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'find_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(FindLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
class AndLoomOp(loom.LoomOp):
"""
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'and_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AndLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.minimum(input1, input2)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class InvalidLoomOp(loom.LoomOp):
"""
Mapping: returns a context of zeros
Output:
context: [N, encodeSize] of zeros
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'invalid_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(InvalidLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
img_feat = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
N = tf.shape(img_feat)[0]
context = tf.zeros([N, encode_size], tf.float32)
return [context]
#------------------------------------------------------------------------------
class DescribeLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> context vector
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, outputSize]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Element-wise multiplication of the two, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'describe_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DescribeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
# att_feat, att_feat_1 has shape [N, D_vis]
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
img_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, map_dim])
# nonlinearity
img_map = tf.nn.relu(img_map)
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 1)
context = fc('fc_eltwise', eltwise_mult, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class TransformLoomOp(loom.LoomOp):
"""
Mapping: att_grid x text_param -> att_grid
Input:
input_0: [N, H, W, 1]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Convolve image features to map_dim
4. Element-wise multiplication of the three, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'transform_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(TransformLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# nonlinearity
text_map = tf.nn.relu(text_map)
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
att_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map * att_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_vd/modules.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to execute programs using tensorflow fold loom API.
Program execution for explicit visual coreference resolution model in visual
dialog using neural module networks. Uses low-level loom API in tensorflow
fold:
https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/py/loom.md
for dynamic creation and execution of computation graphs.
Author: Satwik Kottur
"""
import math
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow_fold.public import loom
import models_vd.modules as lm
from models_vd.assembler import INVALID_EXPR, _module_output_type
class ProgramExecutor:
def __init__(self, inputs, output_pool, assembler, params):
"""Initialize program execution subcomponent.
Args:
inputs:
output_pool:
assembler:
params:
"""
self.params = params
# assembler dynamically assembles the graph at run time
self._assembler = assembler
#--------------------------------------------------------------------------
# A. Create loom data inputs
loom_inputs, used_inputs = self._build_loom_inputs(inputs, output_pool)
# B. Create loom data types
types = self._build_loom_types()
self._loom_types = types
# C. Create loom operations
loom_ops_dict = self._build_loom_ops()
self._loom_ops = loom_ops_dict
# create a loom object
keys = ['text', 'image', 'answer', 'caption', 'time',
'fact', 'round', 'text_feat', 'cap_feat']
batch_ins = {types[k]: loom_inputs[k] for k in keys if k in loom_inputs}
self._loom = loom.Loom(batch_inputs=batch_ins, named_ops=loom_ops_dict)
# setup the inputs and outputs
self.outputs = {'context': self.get_loom_output(),
'att': self.get_loom_output(types['attention']),
'logits': self.get_loom_output(types['float'])}
# build alignment networks
if 'nmn-cap' in params['model']:
# binary classification over the alignment
align_context = self.get_loom_output(types['align'])
align_loss = self._build_align_network(align_context, inputs['align_gt'])
self.outputs['cap_align_loss'] = align_loss
used_inputs.append('align_gt')
# add invalid prog to used inputs
used_inputs.extend(['prog_validity', 'prog_validity_cap'])
self.inputs = {ii: inputs[ii] for ii in used_inputs}
# time/round place holder
self.inputs['time'] = loom_inputs['time']
self.inputs['round'] = loom_inputs['round']
def create_weaver(self):
"""Creates a weaver object within the current loom object.
"""
return self._loom.make_weaver()
def get_loom_output(self, type_shape=None):
"""Return the loom output given the type and shape.
"""
# default output is the context vector
if type_shape is None:
type_shape = self._loom_types['context']
return self._loom.output_tensor(type_shape)
#---------------------------------------------------------
def _adjust_text(self, text):
"""
takes text attention output from generator
modifies it to have certain dimensions
"""
params = self.params
# transpose text to have batch first dimension
text_mod = tf.transpose(text, [1, 0, 2])
# split across rounds
shape = text_mod.shape.as_list()
new_size = [-1, params['num_rounds'], shape[1], shape[2]]
return tf.reshape(text_mod, new_size)
def _build_fact_encoder(self, inputs):
"""
"""
# local alias
params = self.params
with tf.variable_scope(self.params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
# flatten
# embed the words
output = tf.nn.embedding_lookup(embed_mat, inputs['fact'])
# pass through encoder
cell = tf.contrib.rnn.BasicLSTMCell(params['text_embed_size'])
# begin decoding
for ii in range(0, params['num_layers']):
# dynamic rnn
output, states = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['fact_len'],
dtype=tf.float32,
scope='fact_layer_%d' % ii)
# split roundwise
fact_embed = states[1]
text_dim = fact_embed.shape.as_list()[-1]
fact_embed = tf.reshape(fact_embed, [-1, params['num_rounds'], text_dim])
return fact_embed
def _build_align_network(self, align_vec, align_gt):
"""
Takes the caption alignment vector in and produces a binary
classifier
"""
params = self.params
with tf.variable_scope('cap_align'):
# construct an mlp on top to a binary classification
align_vec = tf.contrib.layers.fully_connected(align_vec,
params['lstm_size']//2)
align_vec = tf.contrib.layers.fully_connected(align_vec, 2,
activation_fn=None)
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
align_loss = criterion(logits=align_vec, labels=align_gt)
align_loss = tf.reduce_mean(align_loss)
return align_loss
def _build_loom_inputs(self, inputs, output_pool):
'''
Sub routine to build the inputs to loom
'''
# --------- grab required inputs -------------
loom_inputs = {}
params = self.params
# A. image
loom_inputs['image'], _ = lm.add_spatial_coord_map(inputs['img_feat'])
#loom_inputs['image'] = inputs['img_feat']
used_inputs = ['img_feat']
# B. text -- both question and caption
key = 'ques_attended'
if params['train_mode']: text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
adjusted_text = self._adjust_text(text)
loom_inputs['text'] = adjusted_text
batch_size = tf.shape(adjusted_text)[0]
# C. Facts
if params['use_fact']:
loom_inputs['fact'] = self._build_fact_encoder(inputs)
used_inputs.extend(['fact', 'fact_len'])
concat_list = [adjusted_text]
loom_inputs['text_feat'] = tf.concat(concat_list, -1)
# C. Get captions, if needed
if 'nmn-cap' in params['model']:
key = 'cap_attended'
if params['train_mode']: text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
loom_inputs['caption'] = self._adjust_text(text)
loom_inputs['cap_feat'] = loom_inputs['caption']
# D. time steps (internal placeholder)
loom_inputs['time'] = tf.placeholder(tf.int32, (None, 1), 'time')
loom_inputs['round'] = tf.placeholder(tf.int32, (None, 1), 'round')
return loom_inputs, used_inputs
def _build_loom_types(self):
"""Method to build loom types for given setting.
"""
params = self.params
encode_size = params['lstm_size']
# create and save loom types
types = {}
types['time'] = loom.TypeShape('int32', (1,), 'time')
types['round'] = loom.TypeShape('int32', (1,), 'round')
types['float'] = loom.TypeShape('float32', (1,))
types['context'] = loom.TypeShape('float32', (encode_size,), 'context')
types['align'] = loom.TypeShape('float32', (encode_size,), 'align')
size = (params['num_rounds'], params['text_embed_size'])
types['fact'] = loom.TypeShape('float32', size, 'fact')
size = (params['num_rounds'], params['max_dec_len'],
params['text_embed_size'])
types['text'] = loom.TypeShape('float32', size, 'text')
types['caption'] = loom.TypeShape('float32', size, 'caption')
size = (params['text_embed_size'],)
types['text_slice'] = loom.TypeShape('float32', size, 'text_slice')
# this depends on whether we want all features
concat_dim = params['text_embed_size']
size = (params['num_rounds'], params['max_dec_len'], concat_dim)
types['text_feat'] = loom.TypeShape('float32', size, 'text_feat')
types['cap_feat'] = loom.TypeShape('float32', size, 'cap_feat')
size = (concat_dim,)
types['text_feat_slice'] = loom.TypeShape('float32', size, 'text_feat_slice')
# include spatial dimensions (x, y), add 2
size = (params['h_feat'], params['w_feat'], params['d_feat'] + 2)
types['image'] = loom.TypeShape('float32', size, 'image')
size = (params['h_feat'], params['w_feat'], 1)
types['attention'] = loom.TypeShape('float32', size, 'att')
return types
def _build_loom_ops(self):
"""TODO(satwik): Some helper text here
"""
params = self.params
types = self._loom_types
# create all modules under the same scope
wt = params.get('priority_weight', 1.0)
op_params = {'map_dim': 1024, 'priority_weight': wt}
with tf.variable_scope('loom_modules') as module_scope:
op_params['module_scope'] = module_scope
# creating ops
loom_ops_dict = {}
in_types = [types['float'], types['float']]
out_types = [types['float']]
loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
in_types = [types['float']]
loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
tf.maximum)
# basic attention manipulation ops
in_types = [types['attention'], types['float']]
out_types = [types['attention']]
loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
out_types)
in_types = [types['text_feat_slice'], types['text_feat_slice'],
types['round'], types['round']]
out_types = [types['float']]
op_params['amalgam_text_feats'] = params['amalgam_text_feats']
op_params['text_embed_size'] = params['text_embed_size']
loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)
# slicing ops
in_types = [types['text'], types['round'], types['time']]
out_types = [types['text_slice']]
loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)
in_types = [types['text_feat'], types['round'], types['time']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)
# slice_answer_embedding
in_types = [types['fact'], types['round']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)
# normalize and complement
in_types = [types['attention']]
out_types = [types['attention']]
loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
out_types)
#------------------------------------------------------------------
# find module
in_types = [types['image'], types['text_slice']]
out_types = [types['attention']]
loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)
# and module
in_types = [types['attention'], types['attention']]
loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
# transform module
in_types = [types['attention'], types['image'], types['text_slice']]
loom_ops_dict['transform'] = lm.TransformLoomOp(in_types, out_types, op_params)
# describe module
out_types = [types['context']]
op_params['encode_size'] = params['lstm_size']
loom_ops_dict['describe'] = lm.DescribeLoomOp(in_types, out_types, op_params)
# invalid Module
in_types = [types['image']]
loom_ops_dict['invalid'] = lm.InvalidLoomOp(in_types, out_types, op_params)
#------------------------------------------------------------------
# type converter ops
in_types, out_types = [types['caption']], [types['text']]
loom_ops_dict['convert_cap_in'] = lm.IdentityLoomOp(in_types, out_types)
in_types, out_types = [types['context']], [types['align']]
loom_ops_dict['convert_cap_out'] = lm.IdentityLoomOp(in_types, out_types)
in_types, out_types = [types['cap_feat']], [types['text_feat']]
loom_ops_dict['convert_cap_feat'] = lm.IdentityLoomOp(in_types, out_types)
return loom_ops_dict
#---------------------------------------------------------
# setters and getters
def get_outputs(self): return self.outputs
def get_inputs(self): return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None, visualize=False):
if 'prog' not in self.params['model']: return
# dynamically assemble the graph, based on predicted tokens
if self.params['train_mode']:
ques_programs = batch['gt_layout']
if 'nmn-cap' in self.params['model']:
cap_programs = batch['sh_cap_prog']
else:
ques_programs = output_pool['pred_tokens']
if 'nmn-cap' in self.params['model']:
cap_programs = output_pool['pred_tokens_cap']
tokens = {'ques': ques_programs}
if 'nmn-cap' in self.params['model']: tokens['caption'] = cap_programs
weaver, loom_outputs, invalid_prog \
= self._assembler.assemble(tokens, self, visualize)
# build feed dict from loom
feed_dict = weaver.build_feed_dict(loom_outputs)
# additional feeds
feed_dict.update(self._produce_add_feeds(batch, output_pool, invalid_prog))
return feed_dict
#------------------------------------------------------------
def _produce_add_feeds(self, batch, output_pool, invalid_prog):
feed_dict = {}
# feed invalid Prog
feed_dict[self.inputs['prog_validity']] = np.array(invalid_prog['ques'])
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['prog_validity_cap']] = np.array(invalid_prog['cap'])
# additional feeds
feed_dict[self.inputs['img_feat']] = batch['img_feat']
if self.params['use_fact']:
feed_dict[self.inputs['fact']] = batch['fact']
feed_dict[self.inputs['fact_len']] = batch['fact_len']
if 'nmn-cap' in self.params['model']:
feed_dict[self.inputs['align_gt']] = batch['align_gt']
max_time = self.params['max_dec_len']
feed_dict[self.inputs['time']] = np.arange(max_time).reshape([-1, 1])
round_ranges = np.arange(self.params['num_rounds']).reshape([-1, 1])
feed_dict[self.inputs['round']] = round_ranges
if not self.params['train_mode']:
# list of labels to read from output pool conditionally
labels = ['ques_attended', 'cap_attended', 'ques_enc', 'cap_enc']
for label in labels:
if label in self.inputs:
feed_dict[self.inputs[label]] = output_pool[label]
feed_dict[self.inputs['img_feat']] = batch['img_feat']
return feed_dict
#------------------------------------------------------------
# segregating the outputs
def segregrate_outputs(self, output):
'''
Go over the outputs, cap tokens and ques tokens
'''
if 'nmn-cap' in self.params['model']:
cap_tokens = output['pred_tokens_cap'][:, 0]
ques_tokens = output['pred_tokens']
mod_out_type = _module_output_type
mod_dict = self._assembler.module_names
att = output['att']
# logits -> weights when visualizing
weights = output['logits']
# segregrated outputs
sep_att = []
sep_wts = []
wt_labels = []
num_reuse = 0
att_ind = 0
weight_ind = 0
# go over caption
if 'nmn-cap' in self.params['model']:
for t_id in range(self.params['max_dec_len']):
cur_module = mod_dict[cap_tokens[t_id]]
if cur_module == '<eos>': break
if mod_out_type[cur_module] == 'att':
sep_att.append(('cap', t_id, 0, att[att_ind]))
att_ind += 1
if cur_module == '_Find':
wt_labels.append('C_%d' % t_id)
num_reuse += 1
# assume a batch size of 1
for r_id in range(self.params['num_rounds']):
for t_id in range(self.params['max_dec_len']):
cur_module = mod_dict[ques_tokens[t_id, r_id]]
if cur_module == '<eos>':
# even answer has a weight now
if self.params['use_fact']:
wt_labels.append('A%d' % r_id)
num_reuse += 1
break
if mod_out_type[cur_module] == 'att':
sep_att.append(('ques', t_id, r_id, att[att_ind]))
att_ind += 1
if cur_module == '_Refer':
st = weight_ind
end = weight_ind + num_reuse
sep_wts.append((r_id, weights[st:end], wt_labels))
weight_ind += num_reuse
if cur_module == '_Find':
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
# do not assert if baseline
if 'baseline' in self.params['model']:
return sep_att, sep_wts
for arg in sep_wts:
assert(abs(np.sum(arg[1]) - 1.0) < 1e-5)
# Sanity checks to ensure Refer is not doing anything weird.
assert(weight_ind == weights.shape[0])
assert(att_ind == att.shape[0])
return sep_att, sep_wts
|
corefnmn-main
|
models_vd/executor.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Methods to compute metrics given the list of ranks.
Author: Satwik Kottur
"""
import numpy as np
# static list of metrics
metric_list = ['r1', 'r5', 'r10', 'mean', 'mrr']
# +1 - greater the better
# -1 - lower the better
trends = [1, 1, 1, -1, -1, 1]
def evaluate_metric(ranks, metric):
"""
Args:
ranks: List of ranks
metric: Name of the metric to be computed
Returns:
Appropriate evaluation of the metric
"""
if metric == 'r1':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 1)/float(ranks.shape[0])
if metric == 'r5':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 5)/float(ranks.shape[0])
if metric == 'r10':
ranks = ranks.reshape(-1)
return 100 * np.sum(ranks <= 10)/float(ranks.shape[0])
if metric == 'mean':
ranks = ranks.reshape(-1)
return np.mean(ranks)
if metric == 'mrr':
ranks = ranks.reshape(-1)
return np.mean(1/ranks)
def compute_metrics(ranks, silent=False):
"""Compute standard metrics, given the ranks.
Args:
ranks: List of ranks
silent: To decide the verbosity
Returns:
results: Dictionary of metrics
"""
results = {metric: evaluate_metric(ranks, metric) for metric in metric_list}
# pretty print metrics
if not silent:
pretty_print_metrics(results)
return results
def pretty_print_metrics(results):
"""Pretty print the metrics given as a dictionary.
"""
# pretty print metrics
print('\n')
for metric in metric_list: print('\t%s : %.3f' % (metric, results[metric]))
class ExponentialSmoothing:
"""Class responsible to exponentially smooth and track losses.
"""
def __init__(self):
self.value = None
self.blur = 0.95
self.op = lambda x, y: self.blur * x + (1 - self.blur) * y
# add a new value
def report(self, new_val):
if self.value == None:
self.value = new_val
else:
self.value = {key: self.op(value, new_val[key])
for key, value in self.value.items()}
return self.value
|
corefnmn-main
|
util/metrics.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script with supporting functions for the main train program.
"""
import os
import sys
import subprocess
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from skimage import transform, filters
from PIL import Image
def last_relevant(output, length):
batch_size = tf.shape(output)[0]
max_length = tf.shape(output)[1]
out_size = int(output.shape[2])
index = tf.range(0, batch_size) * max_length + (length - 1)
flat = tf.reshape(output, [-1, out_size])
relevant = tf.gather(flat, index)
return relevant
# blending attention map with an image
# source:
# github.com/abhshkdz/neural-vqa-attention/blob/master/attention_visualization.ipynb
def get_blend_map(img, att_map, blur=True, overlap=True):
# range it from -1 to 1
att_map -= att_map.min()
if att_map.max() != 0: att_map /= att_map.max()
image_size = img.shape[:2]
att_map = transform.resize(att_map, image_size, order = 3)
if blur:
att_map = filters.gaussian(att_map, 0.05 * max(img.shape))
#att_map -= att_map.min()
att_map /= att_map.max()
cmap = plt.get_cmap('jet')
att_map_v = cmap(att_map)
att_map_v = np.delete(att_map_v, 3, 2)
att_map_v *= 255
if overlap:
#vis_im = att_map_v * att_map + (1-att_reshaped)*all_white
#vis_im = att_map_v*im + (1-att_reshaped)*all_white
att_map = 1 * (1 - att_map**0.7).reshape(att_map.shape + (1,)) * img \
+ (att_map**0.7).reshape(image_size + (1,)) * att_map_v
return att_map
# pretty prints dictionary
def pretty_print_dict(parsed):
max_len = max([len(ii) for ii in parsed.keys()])
fmt_string = '\t%' + str(max_len) + 's : %s'
print('Arguments:')
#for key_pair in parsed.items(): print(fmt_string % key_pair)
# sort in alphabetical order
keys = [ii for ii in parsed.keys()]
keys.sort()
for key in keys: print(fmt_string % (key, parsed[key]))
# softmax
# correct solution:
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum() # only difference
# interpolate attention
def interpolate_attention(im, att):
# steps:
# 1. reshape the attention to image size (with cubic)
#soft_att = softmax(att)
soft_att = att
att_reshaped = transform.resize(soft_att, im.shape[:2], order=3)
att_reshaped /= np.max(att_reshaped)
att_reshaped = att_reshaped[..., np.newaxis]
# heat map
#cmap = plt.get_cmap('jet')
#vis_im = cmap(att_reshaped)
#vis_im *= (255 if im.dtype == np.uint8 else 1)
# white + image
all_white = np.ones_like(im) * (255 if im.dtype == np.uint8 else 1)
vis_im = att_reshaped * im + (1 - att_reshaped) * all_white
vis_im = vis_im.astype(im.dtype)
return vis_im
# shuffling data for image - caption to train alignment
def shuffle(arg_list, batch_size):
# get the batch size
#batch_size = arg_list[0].shape[0] // 10
# first five remain the same
indices = np.random.randint(0, batch_size-1, 10*batch_size)
for ii in range(batch_size):
indices[10*ii:10*ii+5] = ii
diag = indices[10*ii+5:10*ii+10]
diag[diag >= ii] += 1
indices[10*ii+5:10*ii+10] = diag
shuffled = [None for args in arg_list]
for ii, args in enumerate(arg_list):
assert batch_size == args.shape[0]
shuffled[ii] = args[indices]
return shuffled
# loading an image and converting to numpy
def load_image(file_name) :
img = Image.open(file_name)
img.load()
data = np.asarray(img, dtype="int32")
return data
# temporary launching of evaluation job (slurm)
def launch_evaluation_job(output_path, checkpoint):
script_path = 'run_slurm_eval_mnist.sh'
# read and edit accordingly
with open(script_path, 'r') as file_id:
template = file_id.read();
# write a temporary script, run and remove
temp_path = script_path.replace('.sh', '_temp.sh');
with open(temp_path, 'w') as file_id:
file_id.write(template % (output_path, checkpoint));
subprocess.call('sbatch %s' % temp_path, shell=True);
def save_batch(batch, save_path, terminate=False):
"""Saves a batch to visualize or debug.
Args:
batch: List of intermediate outputs (see visualize_sl.py for example)
save_path: Path to save the batch
terminate: In debug mode, terminate the program
"""
print('Saved batch: {0}'.format(save_path))
np.save(save_path, batch);
assert not terminate, 'Program terminated!'
|
corefnmn-main
|
util/support.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
# Script that contains methods for processing answers and questions
# coding: utf-8
import re, pdb
from unidecode import unidecode
# Method used to clean up and convert non ascii to unicode
def clean_non_ascii(text):
try:
text = text.decode('ascii')
except:
# Contains non-ascii symbols
# Check if it needs to be converted to unicode
try: text = unicode(text, encoding = 'utf-8')
except: pass
text = unidecode(text)
return text
|
corefnmn-main
|
util/clean.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script that converts Stanford parser outputs to neural module
network layout outputs.
"""
import argparse
import copy
import json
import os
import pdb
import re
import sys
import sexpdata
import numpy as np
from models_vd.assembler import Assembler
from tqdm import tqdm as progressbar
def extract_parse(p):
"""Given string, extracts a parse.
"""
if isinstance(p, sexpdata.Symbol):
return p.value()
elif isinstance(p, int):
return str(p)
elif isinstance(p, bool):
return str(p).lower()
elif isinstance(p, float):
return str(p).lower()
return tuple(extract_parse(q) for q in p)
def parse_tree(p):
if "'" in p:
p = "none"
parsed = sexpdata.loads(p)
extracted = extract_parse(parsed)
return extracted
parse2module_dict = {'find': '_Find',
'relate': '_Transform',
'and': '_And',
'is': '_Describe', # All the top modules go to '_Describe'
'describe': '_Describe'
}
def flatten_layout(parse):
# Postorder traversal to generate Reverse Polish Notation (RPN)
if isinstance(parse, str):
return [parse2module_dict[parse]]
RPN = []
head = parse[0]
body = parse[1:]
module = parse2module_dict[head]
for m in body:
RPN += flatten_layout(m)
RPN += [module]
return RPN
def extract_set(params):
# assembler to look for incorrect programs
assembler = Assembler(params.prog_vocab_file)
# manual correction to layouts
layout_correct = {('_Find', '_Transform', '_And', '_Describe')
:['_Find', '_Transform', '_Describe'],
('_Transform', '_Describe')
:['_Find', '_Transform', '_Describe'],
('_Transform', '_Transform', '_And', '_Describe')
:['_Find', '_Transform', '_Transform', '_Describe'],
('_Describe',)
:['_Find', '_Describe'],
('_Transform', '_Find', '_And', '_Describe')
:['_Find', '_Transform', '_Describe']}
with open(params.nmn_file) as f:
# drop the spans
read_layouts = [re.sub(r'\[\d*,\d*\]', '', ll) for ll in f.readlines()]
layouts = [flatten_layout(parse_tree(ll)) for ll in read_layouts]
layouts = [layout_correct.get(tuple(ii), tuple(ii)) for ii in layouts]
with open(params.nmn_file) as f:
# extracting spans as well
lines = [ii for ii in f.readlines()]
attentions = []
for index, ii in enumerate(lines):
layout = layouts[index]
# extract the spans
matches = re.findall('(\w\w)\[(\d*),(\d*)\]', ii)
# match module with attention, if present
att = []
for token in layout:
candidates = []
if token == '_Find':
candidates = [jj for jj in matches if jj[0] == 'nd']
if token == '_Transform':
candidates = [jj for jj in matches if jj[0] == 'te']
if token == '_Describe':
candidates = [jj for jj in matches
if jj[0] != 'te' or jj[0] != 'nd']
if len(candidates) >= 1:
att.append((int(candidates[0][1]), int(candidates[0][2])))
matches.remove(candidates[0])
else:
att.append((0, 0))
# record attentions and layouts
attentions.append(att)
# correct the layouts according to the above dictionary
layouts = [layout_correct.get(tuple(ii), ii) for ii in layouts]
layout_set = {tuple(l) for l in layouts}
print('Found %d unique layouts' % len(layout_set))
for l in layout_set:
print(' ', ' '.join(list(l)))
# check whether the layout is valid
for l in layout_set:
batch = assembler.module_list2tokens(l, T=20)
validity, error = assembler.sanity_check_program(batch)
if not validity:
raise Exception('invalid expr:' + str(l) + ' ' + error)
# read the original data path
with open(params.visdial_file, 'r') as file_id:
vd_data = json.load(file_id)
# question id to layout dictionary
if params.question:
qid2layout_dict = {}
for datum in progressbar(vd_data['data']['dialogs']):
img_id = datum['image_id']
for r_id, round_datum in enumerate(datum['dialog']):
q_id = img_id * 10 + r_id
q_layout = layouts[round_datum['question']]
# record
qid2layout_dict[q_id] = q_layout
np.save(params.save_path, np.array(qid2layout_dict))
else:
np.save(params.save_path, np.array(layouts))
print('Saving to: ' + params.save_path)
save_file_att = params.save_path.replace('.layout', '.attention')
print('Saving (att) to: ' + save_file_att)
np.save(save_file_att, np.array(attentions))
set_layout_length = [len(l) for l in layouts]
return set_layout_length
def main(FLAGS):
# check if it is question or caption
FLAGS.question = 'ques' in FLAGS.nmn_file
FLAGS.save_path = FLAGS.nmn_file.replace('pgm', 'layout')
print('Saving at: %s' % FLAGS.save_path)
layout_length = extract_set(FLAGS)
print('Program length distribution:')
print(np.unique(layout_length, return_counts=True))
if __name__ == '__main__':
title = 'Converting parser outputs to neural module network programs'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--nmn_file', required=True,
help='Neural Module file path')
parser.add_argument('--visdial_file', required=True,
help='Path to the original visdial file')
parser.add_argument('--prog_vocab_file', required=True,
help='Path to program vocabulary file for the assembler')
FLAGS = parser.parse_args()
main(FLAGS)
|
corefnmn-main
|
util/convert_nmn_layouts.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Extracts coreference supervision for visdial dataset using off-the-shelf system.
"""
import argparse
import json
import sys
import neuralcoref
import spacy
from tqdm import tqdm as progressbar
def get_question_answer(data, i_dialog, i_question):
"""Extracts question + answer for a dialog turn.
Args:
data: Visdial data
i_dialog: Index for the dialog
i_question: Index for the turn
"""
dialog = data["data"]["dialogs"][i_dialog]["dialog"][i_question]
return (
data["data"]["questions"][dialog["question"]],
data["data"]["answers"][dialog["answer"]],
)
def get_coref_cluster_sentence(utterance_cluster):
"""Visualize the co-reference clusters as string using, e.g [1 ].
Args:
utterance_cluster: Cluster corresponding to the utterance
"""
sentence = ""
for utterance in utterance_cluster:
if not sentence:
# print(utterance["sentence"])
sentence = list(" " * (len(utterance["sentence"]) + 2))
s = utterance["start_char"]
sentence[s] = "["
sentence[utterance["end_char"]] = "]"
s += 1
if not sentence[s] == " ":
s += 2
id_str = str(utterance["cluster_id"])
sentence[s : (s + len(id_str))] = id_str
# print("".join(sentence))
return "".join(sentence)
def get_coref_cluster_list(utterance_cluster_map, ui):
if ui in utterance_cluster_map:
return utterance_cluster_map[ui]
else:
return []
def extract_corefs(data_file_name, out_file_name):
print("Reading: {}".format(data_file_name))
with open(data_file_name) as data_file:
data = json.load(data_file)
n_dialogs = len(data["data"]["dialogs"])
coref = neuralcoref.Coref()
# NOTE: neuralcoref gets stuck if there are numbers with an apostrophe.
# Replacing them with equally long strings as a temporary fix.
def remove_numbered_age(string):
REPLACE_STRINGS = {
"10's": "10ss",
"20's": "20ss",
"30's": "30ss",
"40's": "40ss",
"50's": "50ss",
"60's": "60ss",
"70's": "70ss",
"80's": "80ss",
"90's": "90ss",
"100's": "100ss",
}
final_string = string
for key, replacement in REPLACE_STRINGS.items():
final_string = final_string.replace(key, replacement)
return final_string
for i_dialog in progressbar(range(n_dialogs)):
dialog = data["data"]["dialogs"][i_dialog]
str_dialog = dialog["caption"] + ". "
list_dialog = [dialog["caption"] + "."]
for i_question in range(len(dialog["dialog"])):
q, a = get_question_answer(data, i_dialog, i_question)
str_dialog += q + "? " + a + ". "
list_dialog.append(q + "?")
list_dialog.append(a + ".")
list_dialog = [remove_numbered_age(ii) for ii in list_dialog]
clusters = coref.one_shot_coref(utterances=list_dialog)
mentions = coref.get_mentions()
cluster_keys = list(clusters.keys())
# match from utterance to cluster
utterance_cluster_map = {}
utterance_referrer_map = {}
utterance_reference_map = {}
for i_key in range(len(cluster_keys)):
# assume reference is the first occurance
reference = min(clusters[cluster_keys[i_key]])
cluster_dict_ref = {}
cluster_dict_ref["reference_sentence_id"] = mentions[
reference
].utterance_index
cluster_dict_ref["reference_start_word"] = mentions[reference].start
cluster_dict_ref["reference_end_word"] = mentions[reference].end
cluster_dict_ref["reference_start_char"] = mentions[reference].start_char
cluster_dict_ref["reference_end_char"] = mentions[reference].end_char
for i_mention in clusters[cluster_keys[i_key]]:
cluster_dict = {}
ui = mentions[i_mention].utterance_index
cluster_dict["cluster_id"] = i_key
cluster_dict["start_word"] = mentions[i_mention].start
cluster_dict["end_word"] = mentions[i_mention].end
cluster_dict["start_char"] = mentions[i_mention].start_char
cluster_dict["end_char"] = mentions[i_mention].end_char
cluster_dict["sentence"] = list_dialog[ui]
if ui not in utterance_cluster_map:
utterance_cluster_map[ui] = []
utterance_referrer_map[ui] = []
utterance_reference_map[ui] = []
utterance_cluster_map[ui].append(cluster_dict)
if i_mention == reference:
utterance_reference_map[ui].append(cluster_dict)
else:
cluster_dict.update(cluster_dict_ref)
utterance_referrer_map[ui].append(cluster_dict)
cluster_list = get_coref_cluster_list(utterance_cluster_map, 0)
data["data"]["dialogs"][i_dialog]["caption_coref_clusters"] = cluster_list
data["data"]["dialogs"][i_dialog][
"caption_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
data["data"]["dialogs"][i_dialog][
"caption_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, 0)
for i_question in range(len(dialog["dialog"])):
# set which utterance it came from
cluster_list = get_coref_cluster_list(
utterance_cluster_map, (i_question + 1) * 2
)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_coref_clusters"
] = cluster_list
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
cluster_list = get_coref_cluster_list(
utterance_cluster_map, (i_question) * 2 + 1
)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_coref_clusters"
] = cluster_list
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_coref_visualized"
] = get_coref_cluster_sentence(cluster_list)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_referrer_clusters"
] = get_coref_cluster_list(utterance_referrer_map, (i_question + 1) * 2)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_referrer_clusters"
] = get_coref_cluster_list(utterance_referrer_map, (i_question) * 2 + 1)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"answer_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, (i_question + 1) * 2)
data["data"]["dialogs"][i_dialog]["dialog"][i_question][
"question_reference_clusters"
] = get_coref_cluster_list(utterance_reference_map, (i_question) * 2 + 1)
print("Saving: {}".format(out_file_name))
with open(out_file_name, "w") as outfile:
json.dump(data, outfile)
return clusters, coref, data
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
"--input_data_path", required=True, help="Path to VisDial JSON files"
)
parser.add_argument(
"--output_save_path", default="-", help="Path to save the coreferences"
)
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
extract_corefs(parsed_args["input_data_path"], parsed_args["output_save_path"])
|
corefnmn-main
|
util/extract_coreference_supervision.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
import re
SENTENCE_SPLIT_REGEX = re.compile(r'(\W+)')
def tokenize(sentence):
tokens = SENTENCE_SPLIT_REGEX.split(sentence.lower())
tokens = [t.strip() for t in tokens if len(t.strip()) > 0]
return tokens
def load_str_list(fname):
with open(fname) as f:
lines = f.readlines()
lines = [l.strip() for l in lines]
return lines
class VocabDict:
def __init__(self, vocab_file):
self.word_list = load_str_list(vocab_file)
self.word2idx_dict = {w:n_w for n_w, w in enumerate(self.word_list)}
self.num_vocab = len(self.word_list)
self.UNK_idx = self.word2idx_dict['<unk>'] \
if '<unk>' in self.word2idx_dict else None
def idx2word(self, n_w):
return self.word_list[n_w]
def word2idx(self, w):
if w in self.word2idx_dict:
return self.word2idx_dict[w]
elif self.UNK_idx is not None:
return self.UNK_idx
else:
raise ValueError('word %s not in dictionary ' + \
'(while dictionary does not contain <unk>)' % w)
def tokenize_and_index(self, sentence):
inds = [self.word2idx(w) for w in tokenize(sentence)]
return inds
# add new tokens for decoding
def add_new_tokens(self, new_token_list):
for new_token in new_token_list:
if new_token in self.word_list:
print('%d already exists in vocabulary!' % new_token)
continue
print('Adding %s to vocabulary' % new_token)
self.word2idx_dict[self.num_vocab] = new_token
self.num_vocab += 1
|
corefnmn-main
|
util/text_processing.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Given the data file, create a vocabulary file and extract the glove features
for embedding initializations.
"""
import argparse
from collections import defaultdict
import json
import re
import sys
from unidecode import unidecode
import matplotlib.pyplot as plt
from nltk.tokenize import word_tokenize
import numpy as np
import spacy
def main(args):
# initialize vocab from file
print('Reading vocabulary from: %s' % args.vocab_file)
with open(args.vocab_file, 'r') as fileId:
vocab_dict = json.load(fileId)
vocab_set = set(vocab_dict['word2ind'].keys())
# Though we have collected all the words from source vocabulary, add <UNK>
# and add other tokens for answer decoding
# <start> <end> <pad>
vocab_set.add('<unk>')
vocab_set.add('<start>')
vocab_set.add('<end>')
vocab_set.add('<pad>')
print('Vocabulary size: %d, keeping all of them ..' % len(vocab_set))
vocab_list = list(vocab_set)
vocab_list.sort()
print('Saving vocabulary: ' + args.save_path)
with open(args.save_path, 'w') as file_id:
file_id.writelines([w.replace('\u2019', '') + '\n' for w in vocab_list])
# Collect glove vectors for the words, and save.
glove_dim = 300
glove_mat = np.zeros((len(vocab_list), glove_dim), np.float32)
nlp = spacy.load('en_vectors_web_lg')
for index, word in enumerate(vocab_list):
glove_mat[index] = nlp(word).vector
glove_mat_file = args.save_path.replace('.txt', '_glove.npy')
print('Saving glove vectors: ' + glove_mat_file)
np.save(glove_mat_file, glove_mat)
if __name__ == '__main__':
title = 'Restructure Stanford Parser to a single line'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--vocab_file', required=True,
help='Vocabulary file from original visdial code')
parser.add_argument('--save_path', required=True,
help=('Path to save the vocabulary text file and '
'glove embeddings for corefnmn code'))
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/collect_glove_features.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Final preprocessing script to create the image dialog database that
can be used to serve batches by the batch loader while training and evaluation
for MNIST experiments.
"""
import argparse
from collections import defaultdict
import copy
import json
import os
import pdb
import sys
import numpy as np
from nltk.tokenize import word_tokenize
from tqdm import tqdm as progressbar
from util import text_processing, clean, support
# program supervision
# question types vs layouts (manually done)
prog_ques_type = {
'Qa': '_Find _Exist',
'Qb': '_Find _Count',
'Qc': '_Find _Describe',
'Qd': '_Refer _Transform _Describe',
'Qe': '_Refer _Not _Find _And _Exist'
}
def build_imdb(data, split, vocab, ans_list, FLAGS):
"""Function to build the image dialog dataset, given the data split.
Args:
data: MNIST Dialog dataset json
split: Data split -- train | valid | test
vocab: Vocabulary object created from question vocabulary (train only)
ans_list: List of answers, created from train set
FLAGS: Command line arguments
Returns:
imdb: Image dialog database to train corefnmn
"""
print('Building imdb for %s' % split)
source = data['%sExamples' % split]
ans_dict = {word: ii for ii, word in enumerate(ans_list)}
# process and tokenize all questions and answers
tokenizer = lambda x: [vocab.word2idx(ii) for ii in
word_tokenize(clean.clean_non_ascii(x))]
print('Collecting and tokenizing questions')
ques_dict = {}
ques_list = []
for datum in progressbar(source):
for round_datum in datum['qa']:
ques = round_datum['question']
if ques in ques_dict: continue
else:
ques_list.append(ques)
ques_dict[ques] = len(ques_dict)
clean_ques = [tokenizer(ques.lower()) for ques in progressbar(ques_list)]
max_ques_len = max([len(ii) for ii in clean_ques])
ques_tokens = np.zeros((len(clean_ques), max_ques_len)).astype('int32')
ques_tokens.fill(vocab.word2idx('<pad>'))
ques_lens = np.zeros(len(clean_ques)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_ques)):
ques_lens[q_id] = len(tokens)
ques_tokens[q_id, :ques_lens[q_id]] = np.array(tokens)
#--------------------------------------------------------------------------
imdb = {}
# number of entries in the database
num_dialogs = len(source)
imdb['data'] = [None] * num_dialogs
imdb['ans_inds'] = ans_list
imdb['ques'], imdb['ques_len'] = ques_tokens, ques_lens
#--------------------------------------------------------------------------
for dialog_id, datum in progressbar(enumerate(source)):
img_id = datum['img']
img_path = os.path.join(FLAGS.image_root, split, '%05d.jpg' % img_id)
# compact bundle with all the information
bundle = {'image_name': img_id, 'image_path': img_path,
'question_id': [], 'question_ind': [], 'answer_ind': [],
'gt_layout_tokens': []}
# bundle as questions in a conversation together
for r_id, round_data in enumerate(datum['qa']):
q_id = img_id * 10 + r_id
bundle['question_id'].append(q_id)
ques_ind = ques_dict[round_data['question']]
bundle['question_ind'].append(ques_ind)
answer = ans_dict.get(round_data['answer'], '<unk>')
bundle['answer_ind'].append(answer)
# sanity check
if answer == '<unk>':
print(answer)
# layout
layout = prog_ques_type[round_data['metaInfo'][0]]
# replace find with refer
if r_id > 0 and round_data['metaInfo'][0] in ['Qa', 'Qb']:
layout = layout.replace('_Find', '_Refer _Find _And');
if r_id > 0 and round_data['metaInfo'][0] == 'Qc':
layout = layout.replace('_Find', '_Refer');
"""Layout modifications for NMN version (baseline)
if round_data['metaInfo'][0] == 'Qd':
layout = layout.replace('Refer', 'Find')
if round_data['metaInfo'][0] == 'Qe':
layout = '_Find _Exist'
"""
# layout for independent questions
bundle['gt_layout_tokens'].append(layout)
# record
imdb['data'][dialog_id] = bundle
return imdb
def save_vocabularies(train_examples, FLAGS):
"""Extract and save vocabularies for questions and answers.
Args:
train_examples: Training examples
Returns:
words: Vocabulary (dictionary) extracted from the questions
ans_list: List of possible answers, extracted from train set
"""
words = {}
ans_list = {}
for datum in progressbar(train_examples):
for ques_datum in datum['qa']:
token = ques_datum['answer'].lower()
words[token] = words.get(token, 0) + 1
ans_list[token] = 1
for token in word_tokenize(ques_datum['question']):
token = token.lower()
words[token] = words.get(token, 0) + 1
# additional tokens
words['<pad>'] = 1
words['<start>'] = 1
words['<end>'] = 1
words['<unk>'] = 1
print('Saving to: ' + FLAGS.vocab_save_path)
with open(FLAGS.vocab_save_path, 'w') as file_id:
file_id.write('\n'.join(sorted(words.keys())))
# answer lists
ans_list = list(ans_list.keys())
ans_list.append('<unk>')
print('Saving to: ' + FLAGS.answers_save_path)
with open(FLAGS.answers_save_path, 'w') as file_id:
file_id.write('\n'.join(ans_list))
def save_mean_std_image(FLAGS):
"""Compute and save mean and std image from train images.
Args:
FLAGS: Commandline arguments
"""
import pdb
image_list = os.listdir(os.path.join(FLAGS.image_root, 'train'))
# compute the mean of the train images and save
mean_img = None
std_img = None
for image_name in progressbar(image_list):
image_path = os.path.join(FLAGS.image_root, 'train', image_name)
image = support.load_image(image_path)
if mean_img is None:
mean_img = image
std_img = image ** 2
else:
mean_img += image
std_img += image ** 2
mean_img = mean_img / len(image_list)
std_img = std_img / len(image_list)
mean_img = np.mean(np.mean(mean_img, 0), 0)
std_img = np.mean(np.mean(std_img, 0), 0)
std_img = np.sqrt(std_img - mean_img ** 2)
print('Saving mean and std at: %s' % FLAGS.mean_save_path)
np.save(FLAGS.mean_save_path, {'mean_img': mean_img, 'std_img': std_img})
def main(FLAGS):
"""Main function.
1. Extracts vocabularies from questions and answers.
2. Creates and saves image dialog databases for train | valid | test splits.
Args:
FLAGS: Command-line options.
"""
# Read the dataset.
with open(FLAGS.json_path) as file_id:
data = json.load(file_id)
# Extract vocabulary and answer list.
save_vocabularies(data['trainExamples'], FLAGS)
# Extract mean and std of train images.
save_mean_std_image(FLAGS)
# Read the vocabulary files (questions | answers) and create objects
vocab = text_processing.VocabDict(FLAGS.vocab_save_path)
with open(FLAGS.answers_save_path, 'r') as file_id:
ans_list = [ii.strip('\n') for ii in file_id.readlines()]
# data splits
for split in ['train', 'valid', 'test']:
imdb_split = build_imdb(data, split, vocab, ans_list, FLAGS)
save_path = os.path.join(FLAGS.imdb_save_path, 'imdb_%s.npy' % split)
print('Saving imdb build: %s' % save_path)
np.save(save_path, np.array(imdb_split))
if __name__ == '__main__':
title = 'Process all the information into a database for easier access'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--json_path', required=True,
help='Path to MNIST Dialog dataset json file')
parser.add_argument('--image_root', required=True,
help='Path to root folder of all the images')
parser.add_argument('--vocab_save_path', required=True,
help='Path to save the vocabulary from training set')
parser.add_argument('--answers_save_path', required=True,
help='Path to save the answers file from training set')
parser.add_argument('--imdb_save_path', required=True,
help='Path to save the image dialog dataset')
parser.add_argument('--mean_save_path', required=True,
help='Path to save the mean and std of train images')
FLAGS = parser.parse_args()
main(FLAGS)
|
corefnmn-main
|
util/build_imdb_mnist.py
|
corefnmn-main
|
util/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
def conv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
# input has shape [batch, in_height, in_width, in_channels]
input_dim = bottom.get_shape().as_list()[-1]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, in_channels, out_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
conv = tf.nn.conv2d(bottom, filter=weights,
strides=[1, stride, stride, 1], padding=padding)
if bias_term:
conv = tf.nn.bias_add(conv, biases)
return conv
def conv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
conv = conv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(conv)
return relu
def deconv_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None):
# input_shape is [batch, in_height, in_width, in_channels]
input_shape = bottom.get_shape().as_list()
batch_size, input_height, input_width, input_dim = input_shape
output_shape = [batch_size, input_height*stride, input_width*stride, output_dim]
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer_conv2d()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# filter has shape [filter_height, filter_width, out_channels, in_channels]
weights = tf.get_variable("weights",
[kernel_size, kernel_size, output_dim, input_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
deconv = tf.nn.conv2d_transpose(bottom, filter=weights,
output_shape=output_shape, strides=[1, stride, stride, 1],
padding=padding)
if bias_term:
deconv = tf.nn.bias_add(deconv, biases)
return deconv
def deconv_relu_layer(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None, biases_initializer=None, reuse=None):
deconv = deconv_layer(name, bottom, kernel_size, stride, output_dim, padding,
bias_term, weights_initializer, biases_initializer, reuse=reuse)
relu = tf.nn.relu(deconv)
return relu
def pooling_layer(name, bottom, kernel_size, stride):
pool = tf.nn.max_pool(bottom, ksize=[1, kernel_size, kernel_size, 1],
strides=[1, stride, stride, 1], padding='SAME', name=name)
return pool
def fc_layer(name, bottom, output_dim, bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
# flatten bottom input
# input has shape [batch, in_height, in_width, in_channels]
shape = bottom.get_shape().as_list()
input_dim = 1
for d in shape[1:]:
input_dim *= d
flat_bottom = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
if weights_initializer is None:
weights_initializer = tf.contrib.layers.xavier_initializer()
if bias_term and biases_initializer is None:
biases_initializer = tf.constant_initializer(0.)
# weights has shape [input_dim, output_dim]
weights = tf.get_variable("weights", [input_dim, output_dim],
initializer=weights_initializer)
if bias_term:
biases = tf.get_variable("biases", output_dim,
initializer=biases_initializer)
if not reuse:
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
tf.nn.l2_loss(weights))
if bias_term:
fc = tf.nn.xw_plus_b(flat_bottom, weights, biases)
else:
fc = tf.matmul(flat_bottom, weights)
return fc
def fc_relu_layer(name, bottom, output_dim, bias_term=True,
weights_initializer=None, biases_initializer=None, reuse=None):
fc = fc_layer(name, bottom, output_dim, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
relu = tf.nn.relu(fc)
return relu
|
corefnmn-main
|
util/cnn.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to flatten the dataset for Stanford parser.
"""
import argparse
import json
import sys
from unidecode import unidecode
from tqdm import tqdm as progressbar
def clean_non_ascii(text):
"""Method to clean up and convert non-ascii to unicode.
"""
try:
text = text.decode('ascii')
except:
# Contains non-ascii symbols
# Check if it needs to be converted to unicode
try:
text = unicode(text, encoding = 'utf-8')
except:
pass
text = unidecode(text)
return text
def main(args):
# reading data
print('Reading from: ' + args.data_file)
with open(args.data_file, 'r') as file_id:
data = json.load(file_id)
# open a text file to write the questions
save_path = args.data_file.replace('.json', '_ques_flat.txt')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
for ques in progressbar(data['data']['questions']):
file_id.write(clean_non_ascii(ques) + ' ?\n')
# open a text file to write the captions
save_path = args.data_file.replace('.json', '_cap_flat.txt')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
captions = [ii['caption'] for ii in data['data']['dialogs']]
for cap in captions:
file_id.write(clean_non_ascii(cap) + ' .\n')
if __name__ == '__main__':
title = 'Flattening the dataset to a text file'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--data_file', required=True,
help='Data file path')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/dataset_to_text.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read the data files and emit sentences as a file.
"""
import argparse
import sys
def main(args):
print('Reading : ' + args.parser_file)
with open(args.parser_file, 'r') as file_id:
lines = [ii.strip('\n') for ii in file_id.readlines()]
# compress trees from multiple lines -> single line
trees = []
cur_tree = ''
for line in lines:
if line == '':
trees.append(cur_tree)
cur_tree = ''
else:
cur_tree += line
# write back to another file
save_path = args.parser_file.replace('.sps', '_compress.sps')
print('Saving to: ' + save_path)
with open(save_path, 'w') as file_id:
file_id.write('\n'.join(trees))
if __name__ == '__main__':
title = 'Restructure Stanford Parser to a single line'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--parser_file', required=True,
help='Stanford parser output file')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
util/compress_parser_trees.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Final preprocessing script to create the image dialog database that
can be used to serve batches by the batch loader while training and evaluation.
"""
import argparse
from collections import defaultdict
import copy
import json
import os
import pdb
import sys
import numpy as np
from nltk.tokenize import word_tokenize
from tqdm import tqdm as progressbar
from util import text_processing, clean
stop_words = ['the', 'a', 'an', 'you', 'was', 'and', 'are']
def build_imdb(FLAGS):
"""Method to construct and save the image-database for the dataset
"""
print('Building imdb for visdial split: %s' % FLAGS.visdial_file)
qid2layout_dict = np.load(FLAGS.ques_prog_file)[()]
ques_att_file = FLAGS.ques_prog_file.replace('.layout', '.attention')
ques_prog_att = np.load(ques_att_file)[()]
cap_progs = np.load(FLAGS.cap_prog_file)[()]
cap_att_file = FLAGS.cap_prog_file.replace('.layout', '.attention')
cap_prog_att = np.load(cap_att_file)[()]
vocab = text_processing.VocabDict(FLAGS.vocab_file)
# load the data
with open(FLAGS.visdial_file, 'r') as file_id:
vd_data = json.load(file_id)
# load the reference data
with open(FLAGS.coreference_file, 'r') as file_id:
references = json.load(file_id)
references = references['data']['dialogs']
# coco_name = img_split + '2014'
# img_root = os.path.abspath(image_dir % coco_name)
# feat_root = os.path.abspath(feature_dir % coco_name)
# img_name_format = 'COCO_' + coco_name + '_%012d'
# process and tokenize all questions and answers
tokenizer = lambda x, suff: [vocab.word2idx(ii) for ii in
word_tokenize(clean.clean_non_ascii(x + suff))]
print('Tokenizing captions')
caption_list = [ii['caption'] for ii in vd_data['data']['dialogs']]
clean_cap = [tokenizer(cap, '') for cap in progressbar(caption_list)]
max_cap_len = max([len(ii) for ii in clean_cap])
cap_tokens = np.zeros((len(clean_cap), max_cap_len)).astype('int32')
cap_tokens.fill(vocab.word2idx('<pad>'))
cap_lens = np.zeros(len(clean_cap)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_cap)):
cap_lens[q_id] = len(tokens)
cap_tokens[q_id, :cap_lens[q_id]] = np.array(tokens)
print('Tokenizing questions')
question_list = vd_data['data']['questions']
clean_ques = [tokenizer(ques, '?') for ques in progressbar(question_list)]
max_ques_len = max([len(ii) for ii in clean_ques])
ques_tokens = np.zeros((len(clean_ques), max_ques_len)).astype('int32')
ques_tokens.fill(vocab.word2idx('<pad>'))
ques_lens = np.zeros(len(clean_ques)).astype('int32')
for q_id, tokens in progressbar(enumerate(clean_ques)):
ques_lens[q_id] = len(tokens)
ques_tokens[q_id, :ques_lens[q_id]] = np.array(tokens)
print('Tokenizing answers')
answer_list = vd_data['data']['answers']
clean_ans = [tokenizer(ans, '') for ans in progressbar(answer_list)]
max_ans_len = max([len(ii) for ii in clean_ans])
ans_tokens = np.zeros((len(clean_ans), max_ans_len)).astype('int32')
ans_tokens.fill(vocab.word2idx('<pad>'))
ans_lens = np.zeros(len(clean_ans)).astype('int32')
ans_in = np.zeros((len(clean_ans), max_ans_len + 1)).astype('int32')
ans_out = np.zeros((len(clean_ans), max_ans_len + 1)).astype('int32')
ans_in.fill(vocab.word2idx('<pad>'))
ans_out.fill(vocab.word2idx('<pad>'))
start_token_id = vocab.word2idx('<start>')
end_token_id = vocab.word2idx('<end>')
ans_in[:, 0] = start_token_id
for a_id, tokens in progressbar(enumerate(clean_ans)):
ans_lens[a_id] = len(tokens)
answer = np.array(tokens)
ans_tokens[a_id, :ans_lens[a_id]] = answer
ans_in[a_id, 1:ans_lens[a_id]+1] = answer
ans_out[a_id, :ans_lens[a_id]] = answer
ans_out[a_id, ans_lens[a_id]] = end_token_id
ans_lens += 1
imdb = {}
# number of entries in the database
num_dialogs = len(vd_data['data']['dialogs'])
imdb['data'] = [None] * num_dialogs
imdb['ans'], imdb['ans_len'] = ans_tokens, ans_lens
imdb['ans_in'], imdb['ans_out'] = ans_in, ans_out
imdb['ques'], imdb['ques_len'] = ques_tokens, ques_lens
imdb['cap'], imdb['cap_len'] = cap_tokens, cap_lens
imdb['cap_prog'], imdb['cap_prog_att'] = cap_progs, np.array(cap_prog_att)
for dialog_id, datum in progressbar(enumerate(vd_data['data']['dialogs'])):
img_id = datum['image_id']
img_path = FLAGS.image_path_format % img_id
feat_path = FLAGS.feature_path % img_id
# compact bundle with all the information
bundle = {'image_name': img_id, 'image_path': img_path,
'feature_path': feat_path, 'caption_ind': dialog_id,
'question_id': [], 'question_ind': [], 'answer_ind': [],
'option_ind': [], 'gt_ind' : [], 'gt_layout_tokens': [],
'gt_layout_att': []}
# reference datum
refer_datum = references[dialog_id]
assert(refer_datum['image_id'] == img_id)
# for each cluster, get the first mention
clusters = {}
caption_clusters = (refer_datum['caption_reference_clusters'] +
refer_datum['caption_coref_clusters'])
for ii in caption_clusters:
c_id = ii['cluster_id']
clusters[c_id] = clusters.get(c_id, 'c')
# each round
for r_id in range(10): # assuming 10 rounds for now
referrer = refer_datum['dialog'][r_id]
for ii in referrer['question_reference_clusters']:
c_id = ii['cluster_id']
clusters[c_id] = clusters.get(c_id, 'q%d' % r_id)
for ii in referrer['answer_reference_clusters']:
c_id = ii['cluster_id']
# to distinguish answer
clusters[c_id] = clusters.get(c_id, 'a%d' % r_id)
# bundle as questions in a conversation together
num_refers = 0
for r_id, round_data in enumerate(datum['dialog']):
q_id = img_id * 10 + r_id
bundle['question_id'].append(q_id)
bundle['question_ind'].append(round_data['question'])
bundle['answer_ind'].append(round_data['answer'])
bundle['option_ind'].append(round_data['answer_options'])
bundle['gt_ind'].append(round_data['gt_index'])
# gt attention for parsed layout
attention = np.array(ques_prog_att[round_data['question']])
# check if references is non-empty and replace with _Refer
layout = copy.deepcopy(list(qid2layout_dict[q_id]))
referrer = refer_datum['dialog'][r_id]['question_referrer_clusters']
if len(referrer) > 0:
refer = referrer[0]
# pick _Find module with max attention overlap
max_overlap = (0, 0)
for pos, token in enumerate(layout):
if token == '_Find':
start = max(attention[pos][0], refer['start_word'])
end = min(attention[pos][1], refer['end_word'])
overlap = min(0, end - start)
if max_overlap[1] < overlap: max_overlap = (pos, overlap)
# reset it to _Refer
pos, _ = max_overlap
layout[pos] = '_Refer'
attention[pos] = [refer['start_word'], refer['end_word']]
# get that cluster id, and corresponding history attention
num_refers += 1
bundle['gt_layout_tokens'].append(layout)
# check for the words attending to
ques_tokens = imdb['ques'][round_data['question']]
ques_words = [vocab.idx2word(ii) for ii in ques_tokens]
for index, pos in enumerate(attention):
# if single word, 'the', 'a', 'of', 'you'
try:
if (pos[1] - pos[0]) == 1 and ques_words[pos[0]] in stop_words:
attention[index] = [0, 0]
except: pdb.set_trace()
bundle['gt_layout_att'].append(attention)
# record
imdb['data'][dialog_id] = bundle
return imdb
if __name__ == '__main__':
title = 'Process all the information into a database for easier access'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--ques_prog_file', required=True,
help='Path to question ground truth programs')
parser.add_argument('--cap_prog_file', required=True,
help='Path to caption ground truth programs')
parser.add_argument('--image_path_format', required=True,
help='Path to find the image given the COCO id')
parser.add_argument('--feature_path', required=True,
help='Path to find the features given the COCO id')
parser.add_argument('--coreference_file', required=True,
help='Visdial file infused with coreference supervision')
parser.add_argument('--visdial_file', required=True,
help='Original visdial file')
parser.add_argument('--vocab_file', required=True,
help='Visual Dialog vocabulary file')
parser.add_argument('--save_path', required=True,
help='Path to save the image dialog dataset')
FLAGS = parser.parse_args()
imdb_data = build_imdb(FLAGS)
print('Saving imdb build: %s' % FLAGS.save_path)
np.save(FLAGS.save_path, np.array(imdb_data))
|
corefnmn-main
|
util/build_imdb.py
|
#!/usr/bin/env python2
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Parse the stanford output into NMN programs.
Adapted from: https://github.com/ronghanghu/n2nmn
"""
from nltk.tree import Tree, ParentedTree
import sys
import re, pdb
from tqdm import tqdm as progressbar
KEEP = [
("WHNP", "WH"),
("WHADVP", "WH"),
(r"NP", "NP"),
("VP", "VP"),
("PP", "PP"),
("ADVP", "AP"),
("ADJP", "AP"),
("this", "null"),
("these", "null"),
("it", "null"),
("EX", "null"),
("PRP$", "null"),
]
KEEP = [(re.compile(k), v) for k, v in KEEP]
def flatten(tree):
if not isinstance(tree, list):
return [tree]
return sum([flatten(s) for s in tree], [])
def collect_span(term):
parts = flatten(term)
lo = 1000
hi = -1000
for part in parts:
assert isinstance(part, tuple) and len(part) == 2
lo = min(lo, part[1][0])
hi = max(hi, part[1][1])
assert lo < 1000
assert hi > -1000
return (lo, hi)
def finalize(col, top=True):
dcol = despan(col)
is_wh = isinstance(dcol, list) and len(dcol) > 1 and flatten(dcol[0])[0] == "WH"
out = []
if not top:
rest = col
elif is_wh:
whspan = flatten(col[0])[0][1]
#out.append("describe")
out.append("describe[%s,%s]" % (whspan))
rest = col[1:]
else:
out.append("is")
rest = col
if len(rest) == 0:
return out
elif len(rest) == 1:
body = out
else:
body = ["and"]
out.append(body)
for term in rest:
if term[0][0] == "PP":
span_below = collect_span(term[1:])
span_full = term[0][1]
span_here = (span_full[0], span_below[0])
#body.append(["relate"])
body.append(["relate[%s,%s]" % span_here, finalize(term[1:], top=False)])
elif isinstance(term, tuple) and isinstance(term[0], str):
#body.append("find")
body.append("find[%s,%s]" % term[1])
else:
# TODO more structure here
#body.append("find")
body.append("find[%s,%s]" % collect_span(term))
if len(body) > 3:
del body[3:]
if isinstance(out, list) and len(out) == 1:
out = out[0]
return out
def strip(tree):
if not isinstance(tree, Tree):
label = tree
flat_children = []
span = ()
else:
label = tree.label()
# children = [strip(child) for child in tree.subtrees().next()]
children = [strip(child) for child in next(tree.subtrees())]
flat_children = sum(children, [])
leaves = tree.leaves()
span = (int(leaves[0]), int(leaves[-1]) + 1)
proj_label = [v for m, v in KEEP if m.match(label)]
if len(proj_label) == 0:
return flat_children
else:
return [[(proj_label[0], span)] + flat_children]
def despan(rr):
out = []
for r in rr:
if isinstance(r, tuple) and len(r) == 2 and isinstance(r[1], tuple):
out.append(r[0])
elif isinstance(r, list):
out.append(despan(r))
else:
out.append(r)
return out
def collapse(tree):
if not isinstance(tree, list):
return tree
rr = [collapse(st) for st in tree]
rr = [r for r in rr if r != []]
drr = despan(rr)
if drr == ["NP", ["null"]]:
return []
if drr == ["null"]:
return []
if drr == ["PP"]:
return []
members = set(flatten(rr))
if len(members) == 1:
return list(members)
if len(drr) == 2 and drr[0] == "VP" and isinstance(drr[1], list):
if len(drr[1]) == 0:
return []
elif drr[1][0] == "VP" and len(drr[1]) == 2:
return [rr[1][0], rr[1][1]]
return rr
def pp(lol):
if isinstance(lol, str):
return lol
return "(%s)" % " ".join([pp(l) for l in lol])
with open(sys.argv[1]) as ptb_f:
for line in progressbar(ptb_f):
tree = ParentedTree.fromstring(line)
# record the list of substitutions
lookup = {};
index = 0
for st in tree.subtrees():
if len(list(st.subtrees())) == 1:
lookup[index] = st[0];
st[0] = str(index)
index += 1
colparse = collapse(strip(tree))
final = finalize(colparse)
print(pp(final))
#print(lookup)
#print('')
#pdb.set_trace();
#print pp(final)
#print " ".join(tree.leaves())
#print colparse
#print finalize(colparse)
#print
|
corefnmn-main
|
util/parse.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
from __future__ import absolute_import, division, print_function
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import conv_layer as conv
def empty_safe_1x1_conv(name, bottom, output_dim, reuse=None):
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for 1x1 convolution in modules to avoid the crash.
bottom_shape = tf.shape(bottom)
N = bottom_shape[0]
# NOTE: these are now static shapes
H, W = bottom.shape.as_list()[1:3]
#H = bottom_shape[1]
#W = bottom_shape[2]
input_dim = bottom.get_shape().as_list()[-1]
bottom_flat = tf.reshape(bottom, [-1, input_dim])
# weights and biases variables
with tf.variable_scope(name, reuse=reuse):
# initialize the variables
weights_initializer = tf.contrib.layers.xavier_initializer()
biases_initializer = tf.constant_initializer(0.)
weights = tf.get_variable('weights', [input_dim, output_dim],
initializer=weights_initializer)
biases = tf.get_variable('biases', output_dim,
initializer=biases_initializer)
conv_flat = tf.nn.xw_plus_b(bottom_flat, weights, biases)
conv = tf.reshape(conv_flat, to_T([N, H, W, output_dim]))
return conv
# TensorFlow Fold can generate zero-size batch for conv layer
# which will crash cuDNN on backward pass. So use this
# for arbitrary convolution in modules to avoid the crash.
def empty_safe_conv(name, bottom, kernel_size, stride, output_dim, padding='SAME',
bias_term=True, weights_initializer=None,
biases_initializer=None, reuse=None):
g = tf.get_default_graph()
with g.gradient_override_map({'Conv2D': 'Conv2D_handle_empty_batch'}):
return conv(name, bottom, kernel_size, stride, output_dim,
padding, bias_term, weights_initializer,
biases_initializer, reuse=reuse)
@tf.RegisterGradient('Conv2D_handle_empty_batch')
def _Conv2DGrad(op, grad):
with tf.device('/cpu:0'):
return [tf.nn.conv2d_backprop_input(
tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format')),
tf.nn.conv2d_backprop_filter(op.inputs[0],
tf.shape(op.inputs[1]), grad,
op.get_attr('strides'),
op.get_attr('padding'),
op.get_attr('use_cudnn_on_gpu'),
op.get_attr('data_format'))]
# @tf.RegisterGradient('Conv2D_handle_empty_batch')
# def _Conv2DGrad(op, grad):
# def _input_nonempty():
# return tf.nn.conv2d_backprop_input(
# tf.shape(op.inputs[0]), op.inputs[1], grad, op.get_attr('strides'),
# op.get_attr('padding'), op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _filter_nonempty():
# return tf.nn.conv2d_backprop_filter(op.inputs[0],
# tf.shape(op.inputs[1]), grad,
# op.get_attr('strides'),
# op.get_attr('padding'),
# op.get_attr('use_cudnn_on_gpu'),
# op.get_attr('data_format'))
# def _input_empty():
# return tf.zeros_like(op.inputs[0])
# def _filter_empty():
# return tf.zeros_like(op.inputs[1])
# is_nonempty = tf.greater(tf.size(op.inputs[0]), 0)
# return [tf.cond(is_nonempty, _input_nonempty, _input_empty),
# tf.cond(is_nonempty, _filter_nonempty, _filter_empty)]
|
corefnmn-main
|
util/empty_safe_conv.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read command line flags.
Uses argparse library to read command line flags.
Author: Satwik Kottur
"""
import argparse
import os
import pdb
from util import support
# read command line arguments
def read_command_line():
title = 'Train explicit coreference resolution visual dialog model'
parser = argparse.ArgumentParser(description=title)
#-------------------------------------------------------------------------
# data input settings
parser.add_argument('--dataset', default='mnist', help='Visdial dataset type')
parser.add_argument('--input_img', default='data/resnet_res5c/',\
help='Path with image features')
parser.add_argument('--data_root', default='data/',\
help='HDF5 file with preprocessed questions')
parser.add_argument('--text_vocab_path', default='',
help='Path to the vocabulary for text')
parser.add_argument('--prog_vocab_path', default='',
help='Path to the vocabulary for programs')
parser.add_argument('--snapshot_path', default='checkpoints/',
help='Path to save checkpoints')
#--------------------------------------------------------------------------
# specify encoder/decoder
parser.add_argument('--model', default='nmn', help='Name of the model')
parser.add_argument('--generator', default='ques',
help='Name of the generator to use (ques | memory)')
parser.add_argument('--img_norm', default=1, type=int,
help='Normalize the image feature. 1=yes, 0=no')
#-------------------------------------------------------------------------
# model hyperparameters
parser.add_argument('--h_feat', default=7, type=int,
help='Height of visual conv feature')
parser.add_argument('--w_feat', default=7, type=int,
help='Width of visual conv feature')
parser.add_argument('--d_feat', default=64, type=int,
help='Size of visual conv feature')
parser.add_argument('--text_embed_size', default=32, type=int,
help='Size of embedding for text')
parser.add_argument('--map_size', default=128, type=int,
help='Size of the final mapping')
parser.add_argument('--prog_embed_size', default=32, type=int,
help='Size of embedding for program tokens')
parser.add_argument('--lstm_size', default=64, type=int,
help='Size of hidden state in LSTM')
parser.add_argument('--enc_dropout', default=True, type=bool,
help='Dropout in encoder')
parser.add_argument('--dec_dropout', default=True, type=bool,
help='Dropout in decoder')
parser.add_argument('--num_layers', default=1, type=int,
help='Number of layers in LSTM')
parser.add_argument('--max_enc_len', default=14, type=int,
help='Maximum encoding length for sentences (ques|cap)')
parser.add_argument('--max_dec_len', default=8, type=int,
help='Maximum decoding length for programs (ques|cap)')
parser.add_argument('--dec_sampling', default=False, type=bool,
help='Sample while decoding program')
parser.add_argument('--use_refer', dest='use_refer',
action='store_true', help='Flag for Refer Module')
parser.set_defaults(use_refer=False)
parser.add_argument('--remove_aux_find', dest='remove_aux_find',
action='store_true',
help='Flag to remove auxilliary find modules')
parser.set_defaults(remove_aux_find=False)
parser.add_argument('--use_fact', dest='use_fact',
action='store_true', help='Flag to use Q+A as fact')
parser.set_defaults(use_fact=False)
parser.add_argument('--amalgam_text_feats', dest='amalgam_text_feats',
action='store_true',
help='Flag to amalgamate text features')
parser.set_defaults(amalgam_text_feats=False)
#-------------------------------------------------------------------------
# optimization params
parser.add_argument('--batch_size', default=30, type=int,
help='Training batch size (adjust based on GPU memory)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate for training')
parser.add_argument('--dropout', default=0.5, type=float, help='Dropout')
parser.add_argument('--num_epochs', default=200, type=int,
help='Maximum number of epochs to run training')
parser.add_argument('--gpu_id', type=int, default=0,
help='GPU id to use for training, -1 for CPU')
#-------------------------------------------------------------------------
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if parsed_args['gpu_id'] < 0 else str(parsed_args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# pretty print arguments and return
support.pretty_print_dict(parsed_args)
return parsed_args
|
corefnmn-main
|
exp_mnist/options.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
# script to visualize intermediate outputs from a trained checkpoint
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import pdb, sys, argparse, os, json
from time import gmtime, strftime
from tqdm import tqdm as progressbar
from exp_mnist import options
from util import support
# read command line options
parser = argparse.ArgumentParser();
parser.add_argument('-checkpoint', required=True, \
help='Checkpoint to load the models');
parser.add_argument('-batchSize', type=int, default=10, \
help='Batch size for evaluation / visualization');
parser.add_argument('-testSplit', default='valid', \
help='Which split to run evaluation on');
parser.add_argument('-gpuID', type=int, default=0)
try: args = vars(parser.parse_args());
except (IOError) as msg: parser.error(str(msg));
# set the cuda environment variable for the gpu to use
if args['gpuID'] >= 0:
os.environ['CUDA_VISIBLE_DEVICES'] = str(args['gpuID']);
print(os.environ['CUDA_VISIBLE_DEVICES'])
else: os.environ['CUDA_VISIBLE_DEVICES'] = '';
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
sess = tf.Session(config=tf.ConfigProto(
gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False, log_device_placement=False))
from models_mnist.assembler import Assembler
from models_mnist.model import NMN3Model
from util.mnist_train.data_reader import DataReader
from util.metrics import computeMetrics, ExpSmoothing
# setting random seeds
np.random.seed(1234);
tf.set_random_seed(1234);
# read the train args from checkpoint
paramPath = args['checkpoint'].replace('.tmodel', '_params.json');
with open(paramPath, 'r') as fileId: savedArgs = json.load(fileId);
savedArgs.update(args);
args = savedArgs;
args['preloadFeats'] = False;
args['superviseAttention'] = False;
args['useFact'] = args.get('useFact', False);
print('Current model: ' + args['model'])
# Data files
imdbPathVal = os.path.join(args['dataRoot'],'imdb/imdb_%s.npy'%args['testSplit']);
imdbPathVal = imdbPathVal.replace('.npy', '_%s.npy' % args['dataLabel']);
# assembler
assembler = Assembler(args['progVocabPath']);
# dataloader for val
inputDict = {'path':imdbPathVal, 'shuffle':False, 'onePass':True, 'args':args,\
'assembler': assembler, 'useCount': False, 'fetchOptions': True};
valLoader = DataReader(inputDict);
# The model for training
evalParams = args.copy();
evalParams['useGTProg'] = False; # for training
evalParams['encDropout'] = False;
evalParams['decDropout'] = False;
evalParams['decSampling'] = False; # do not sample, take argmax
# for models trained later
if 'numRounds' not in evalParams:
evalParams['numRounds'] = valLoader.batchLoader.numRounds;
# model for evaluation
# create another assembler of caption
assemblers = {'ques': assembler, 'cap': Assembler(args['progVocabPath'])};
model = NMN3Model(evalParams, assemblers);
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None); # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint']);
print('Evaluating on %s' % args['testSplit'])
ansMatches = []; progMatches = [];
totalIter = int(valLoader.batchLoader.numInst / args['batchSize']);
maxIters = 100; curIter = 0;
toSave = {'output': [], 'batch': []};
for batch in progressbar(valLoader.batches(), total=totalIter):
_, outputs = model.runVisualizeIteration(batch, sess);
toSave['output'].append(outputs);
toSave['batch'].append(batch);
# debug -- also compute the ranks during visualization
#ranks.append(batchRanks);
curIter += 1;
if curIter >= maxIters: break;
# save the output + batch
batchPath = args['checkpoint'] + '.100_batches.npy';
print('Printing the batches: ' + batchPath)
support.saveBatch(toSave, batchPath);
# debug evaluate
#metrics = computeMetrics(np.hstack(ranks));
|
corefnmn-main
|
exp_mnist/visualize_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train Visual Dialog model using supervised learning.
Trains visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_mnist/eval_sl.py --gpu_id=0 --test_split='valid' \
--checkpoint='checkpoints/model_epoch_005.tmodel'
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_mnist import options
# read command line options
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True)
parser.add_argument('--test_split', default='valid', \
help='Which split to run evaluation on')
parser.add_argument('--gpu_id', type=int, default=0)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_mnist.assembler import Assembler
from models_mnist.model import CorefNMN
from loader_mnist.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assembler
question_assembler = Assembler(args['prog_vocab_path'])
copy_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'copy': copy_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# model for evaluation
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ans_matches = []
prog_matches = []
total_iter = int(val_loader.batch_loader.num_inst / args['batch_size'])
num_iters = 0
for batch in progressbar(val_loader.batches(), total=total_iter):
batch_matches, outputs = model.run_evaluate_iteration(batch, sess)
# batch['ans_ind'] = np.argmax(outputs['ans_logits'], 1)
# np.save('batch_model.npy', batch)
# sys.exit(1)
ans_matches.append(batch_matches)
if 'matches' in outputs:
prog_matches.append(outputs['matches'])
if len(prog_matches) > 0:
prog_matches = np.concatenate(prog_matches)
percent = 100 * np.sum(prog_matches) / prog_matches.size
print('Program accuracy: %f percent\n' % percent)
ans_matches = np.concatenate(ans_matches)
percent = 100 * np.sum(ans_matches) / ans_matches.size
print('Answer accuracy: %f percent\n' % percent)
|
corefnmn-main
|
exp_mnist/eval_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train MNIST Dialog model using supervised learning.
Trains mnist dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_mnist import options
# read command line options
args = options.read_command_line()
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_mnist.assembler import Assembler
from models_mnist.model import CorefNMN
from loader_mnist.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# Data files
args['data_root'] = os.path.join(args['data_root'], args['dataset'])
args['text_vocab_path'] = os.path.join(args['data_root'], 'vocabulary_mnist.txt')
root = args['data_root']
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_mnist.txt')
args['answer_list_path'] = os.path.join(root, 'answers_mnist.txt')
imdb_path_train = os.path.join(root, 'imdb_train.npy')
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
copy_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'copy': copy_assembler}
# Dataloader for train
input_dict = {'path': imdb_path_train, 'shuffle': True, 'one_pass': False,
'assembler': question_assembler, 'use_count': False,
'args': args}
train_loader = DataReader(input_dict)
# model params for training
train_params = args.copy()
# use the ground truth program for training
train_params['use_gt_prog'] = True
train_params['text_vocab_size'] = train_loader.batch_loader.vocab_dict.num_vocab
train_params['prog_vocab_size'] = len(question_assembler.module_names)
train_params['pad_id'] = train_loader.batch_loader.vocab_dict.word2idx('<pad>')
train_params['num_rounds'] = train_loader.batch_loader.num_rounds
train_params['num_choices'] = train_loader.num_choices
print('Using a vocab size: %d' % train_params['text_vocab_size'])
# model for training
model = CorefNMN(train_params, assemblers)
model.setup_training()
# train with Adam, optimization ops
solver = tf.train.AdamOptimizer(learning_rate=train_params['learning_rate'])
gradients = solver.compute_gradients(model.get_total_loss())
# clip gradients based on value
gradients = [(tf.clip_by_value(g, -2.0, 2.0), v) if g is not None else (g, v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# Training operation
# Partial-run can't fetch training operations
# some workaround to make partial-run work
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS);
with tf.control_dependencies([solver_op]):
model.set_train_step(tf.constant(0));
with tf.control_dependencies(update_ops):
model.set_train_step(tf.constant(0));
# add it to the output
# model.add_solver_op(solver_op)
# adjust snapshot to have a time stamp folder
cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())
args['snapshot_path'] = os.path.join(args['snapshot_path'], cur_time)
os.makedirs(args['snapshot_path'], exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
print('Saving checkpoints at: %s' % args['snapshot_path'])
# initialize all variables
sess.run(tf.global_variables_initializer())
# forget about embed and module scopes
del train_params['embed_scope']
if 'module_scope' in train_params:
del train_params['module_scope']
#-------------------------------------------------------------------------
print('Running training iteration..')
num_iter_per_epoch = int(train_loader.batch_loader.num_inst/args['batch_size'])
print('Number of iterations per epoch: %d' % num_iter_per_epoch)
# exponential smoothing for loss
smoother = metrics.ExponentialSmoothing()
for n_iter, batch in enumerate(train_loader.batches()):
# add epoch and iteration
epoch = float(n_iter) / num_iter_per_epoch
batch['epoch'] = epoch
batch['n_iter'] = n_iter
if n_iter >= args['num_epochs'] * num_iter_per_epoch:
break
# perform training iteration
losses, _ = model.run_train_iteration(batch, sess)
losses = smoother.report(losses)
# printing log
if n_iter % 10 == 0:
cur_time = time.strftime('%a %d%b%y %X', time.gmtime())
print_format = ('[%s][It: %d][Ep: %.2f][Loss: %.3f Prog: %.3f Ans: %.3f]')
print_info = (cur_time, n_iter, epoch, losses['total'], losses['prog'],
losses['ans'])
print(print_format % print_info)
# save snapshot after every epoch
if n_iter % num_iter_per_epoch == 0:
epoch = float(n_iter) / num_iter_per_epoch
# Save snapshot at every epoch
file_name = 'model_epoch_%03d.tmodel' % epoch
snapshot_path = os.path.join(args['snapshot_path'], file_name)
snapshot_saver.save(sess, snapshot_path, write_meta_graph=False)
# also save the arguments
params_path = snapshot_path.replace('.tmodel', '_params.json')
with open(params_path, 'w') as file_id:
json.dump(train_params, file_id)
print('Snapshot saved to: ' + snapshot_path)
print('Launching evaluation job')
log_path = snapshot_path.replace('.tmodel', '_eval.log')
support.launch_evaluation_job(log_path, snapshot_path)
|
corefnmn-main
|
exp_mnist/train_sl.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Dataloader file for Visual Dialog experiments.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import h5py
import json
import os
import threading
import queue
import numpy as np
from tqdm import tqdm as progressbar
from util import text_processing, support
class BatchLoaderMNIST:
"""Subclass to DataReader that serves batches during training.
"""
def __init__(self, imdb, params):
"""Initialize by reading the data and pre-processing it.
"""
self.imdb = imdb
self.params = params
self.num_inst = len(self.imdb['data'])
self.num_rounds = len(self.imdb['data'][0]['question_ind'])
# load vocabulary
vocab_path = params['text_vocab_path']
self.vocab_dict = text_processing.VocabDict(vocab_path)
self.T_encoder = params['max_enc_len']
# record special token ids
self.start_token_id = self.vocab_dict.word2idx('<start>')
self.end_token_id = self.vocab_dict.word2idx('<end>')
self.pad_token_id = self.vocab_dict.word2idx('<pad>')
# Load answers
with open(params['args']['answer_list_path'], 'r') as file_id:
choices = [ii.strip('\n') for ii in file_id.readlines()]
self.num_choices = len(choices)
self.choices2ind = {ii: index for index, ii in enumerate(choices)}
self.ind2choices = {index: ii for index, ii in enumerate(choices)}
# peek one example to see whether answer and gt_layout are in the data
test_data = self.imdb['data'][0]
self.load_gt_layout = test_data.get('gt_layout_tokens', False)
if 'load_gt_layout' in params:
self.load_gt_layout = params['load_gt_layout']
if self.load_gt_layout:
self.T_decoder = params['max_dec_len']
self.assembler = params['assembler']
# load the mean of the images
load_path = params['path'].split('/')[:-1] + ['train_image_mean.npy']
load_path = '/'.join(load_path)
print('Loading training image stats from: ' + load_path)
img_stats = np.load(load_path)[()]
mean_img = img_stats['mean_img'].reshape([1, 1, -1])
std_img = img_stats['std_img'].reshape([1, 1, -1])
# read all the images
images = {}
print('Reading images..')
#TODO: Change this back!
for datum in progressbar(self.imdb['data'][::3]):
img_path = datum['image_path']
if img_path not in images:
cur_img = support.load_image(img_path)
cur_img = (cur_img - mean_img) / std_img
images[img_path] = cur_img
self.images = images
# get the shape from random image
for _, sample in self.images.items():
self.img_size = sample.shape
break
# convert to tokens
self.digitizer = lambda x: [self.vocab_dict.word2idx(w) for w in x]
# use history if needed by the program generator
self.use_history = self.params['generator'] == 'mem'
if self.use_history:
self._construct_history()
# if fact is to be used
if self.params['use_fact']:
self._construct_fact()
#--------------------------------------------------------------------------
def _construct_fact(self):
"""Method to construct facts.
Facts are previous question and answers strings concatenated as one. These
serve as memory units that the model can refer back to.
For example, 'Q: What is the man wearing? A: Sweater.' will have a fact
'What is the man wearing? Sweater.' so that the model can address follow-up
questions like 'What color is it?' by referring to this fact.
"""
print('Constructing facts..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder + 1 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
fact = np.zeros((num_diags, num_rounds, max_len))
fact_len = np.zeros((num_diags, num_rounds))
fact.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans = self.vocab_dict.word2idx(self.ind2choices[a_id])
# handle overflow
bound = min(q_len, max_len)
fact[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
fact[diag_id, r_id, bound] = ans
fact_len[diag_id, r_id] = bound + 1
# flatten
self.imdb['fact'] = fact
self.imdb['fact_len'] = fact_len
#--------------------------------------------------------------------------
def _construct_history(self):
"""Method to construct history, which concatenates entire dialogs so far.
"""
print('Constructing history..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder + 1 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
history = np.zeros((num_diags, num_rounds, max_len))
hist_len = np.zeros((num_diags, num_rounds))
history.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans = self.vocab_dict.word2idx(self.ind2choices[a_id])
# handle overflow
bound = min(q_len, max_len)
history[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
history[diag_id, r_id, bound] = ans
hist_len[diag_id, r_id] = bound + 1
self.imdb['hist'] = history
self.imdb['hist_len'] = hist_len
#--------------------------------------------------------------------------
def load_one_batch(self, sample_ids):
"""Load data given the sample ids.
"""
actual_batch_size = len(sample_ids)
batch = {}
eos_token = self.assembler.name2idx_dict['<eos>']
num_rounds = self.num_rounds
# questions
ques_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_ind']]
ques_batch = self.imdb['ques'][ques_inds][:, :self.T_encoder].transpose()
ques_len = self.imdb['ques_len'][ques_inds]
ques_ids = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_id']]
# answers
ans_inds_batch = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['answer_ind']]
image_path = [None] * actual_batch_size
# load fact
if self.params['use_fact']:
fact = self.imdb['fact'][sample_ids]
fact_len = self.imdb['fact_len'][sample_ids]
# flatten
fact = np.reshape(fact, [-1, fact.shape[-1]])
fact_len = np.reshape(fact_len, [-1])
else:
fact, fact_len = None, None
# programs
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder,
num_rounds * actual_batch_size), np.int32)
gt_layout_batch.fill(eos_token)
# if features are needed, load images
if 'prog' in self.params['model']:
image_feats = np.zeros((actual_batch_size,) + self.img_size, np.float32)
for n in range(len(sample_ids)):
iminfo = self.imdb['data'][sample_ids[n]]
image_path[n] = iminfo['image_path']
image_feats[n] = self.images[iminfo['image_path']]
# programs
if self.load_gt_layout:
# go over all the questions
for r_id, layout in enumerate(iminfo['gt_layout_tokens']):
split_layout = layout.split(' ')
gt_layout_batch[:, num_rounds * n + r_id] = \
self.assembler.module_list2tokens(split_layout,
self.T_decoder)
# if history is needed
if self.use_history:
history = self.imdb['hist'][sample_ids]
hist_len = self.imdb['hist_len'][sample_ids]
else:
history, hist_len = None, None
batch = {'ques': ques_batch, 'ques_len': ques_len,
'fact': fact, 'fact_len': fact_len,
'hist': history, 'hist_len': hist_len,
'ans_ind': ans_inds_batch,
'img_path': image_path, 'imgs': image_feats,
'ques_id': ques_ids, 'gt_layout': gt_layout_batch}
return batch
class DataReader:
"""Main dataloader class for experiments on Visual Dialog.
"""
def __init__(self, params):
imdb_path = params['path']
print('Loading imdb from: %s' % params['path'])
if imdb_path.endswith('.npy'): imdb = np.load(imdb_path)
else: raise TypeError('unknown imdb format.')
self.imdb = imdb[()]
self.shuffle = params.get('shuffle', True)
self.one_pass = params.get('one_pass', False)
self.prefetch_num = params.get('num_prefetch', 8)
self.params = params
copy_args = {'max_enc_len', 'max_dec_len', 'text_vocab_path', 'model',
'batch_size', 'use_fact', 'answer_list_path', 'generator'}
self.params.update({ii: params['args'][ii] for ii in copy_args
if ii in params['args'] and
params['args'][ii] is not None})
# MNIST data loader
self.batch_loader = BatchLoaderMNIST(self.imdb, self.params)
self.num_choices = self.batch_loader.num_choices
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty(): pass
#print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle,
one_pass, params):
num_samples = len(imdb['data'])
batch_size = params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
|
corefnmn-main
|
loader_mnist/data_reader.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
"""
import pdb
import sys
import numpy as np
class HTML():
def __init__(self, cols, header_file='vis/jquery_header.html'):
self.template = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Strict//EN"+\
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd">' +\
'<html xmlns="http://www.w3.org/1999/xhtml"><head>'
self.template += '<style>'+\
'table#t01{width:100%; background-color:#fff}'\
+'table#t01 tr:nth-child(odd){background-color:#ddd;}'\
+'table#t01 tr:nth-child(even){background-color:#fff;}'\
+'table#t01 tr td tr:nth-child(odd){background-color:#ddd;}'\
+'table#t01 tr td tr:nth-child(even){background-color:#fff;}'\
+'table#t01 th{background-color:black;color:white}'+\
'</style>'
self.colors = ['maroon', 'red', 'purple', 'fuchsia',
'green', 'lime', 'olive', 'yellow',
'navy', 'blue', 'teal', 'aqua', 'orange']
with open(header_file, 'r') as file_id: self.template += file_id.read()
self.template += '</head><body><table id ="t01">'
self.end = '</table></body></html>'
self.content = ''
self.row_content = '<tr>'+'<td valign="top">%s</td>'*cols+'</tr>'
self.span_first_content = '<tr>'+'<td valign="top" rowspan="%s">%s</td>'+\
'<td valign="top">%s</td>' * (cols-1)+'</tr>'
self.span_other_content = '<tr>'+ '<td valign="top">%s</td>'*(cols-1)\
+'</tr>'
self.att_template = '<mark style="background-color:rgba(255,0,0,%f)"> %s </mark>|'
self.img_template = '<img src="%s" height="%d" width="%d"></img>'
# creating table
self.num_rows = None
self.num_cols = cols
# Add a new row
def add_spanning_row(self, mega_row, *entries):
# if first element is list, take it
if type(entries[0]) == list: entries = entries[0]
for index, ii in enumerate(entries):
if len(ii) != self.num_cols - 1:
print('Warning: Incompatible entries.\n_taking needed!')
if len(ii) < self.num_cols - 1: # add 'null'
for jj in range(self.num_cols - 1 - len(entries)):
entries[index].append('NULL')
num_rows = len(entries)
content = (num_rows, mega_row)+tuple(entries[0])
new_row = self.span_first_content % content
for ii in range(1, num_rows):
new_row += self.span_other_content % tuple(entries[ii])
# Add new_row to content
self.content += new_row
# Add a new row
def add_row(self, *entries):
# if first element is list, take it
if type(entries[0]) == list: entries = entries[0]
if len(entries) != self.num_cols:
print('Warning: Incompatible number of entries.\n_taking needed!')
if len(entries) < self.num_cols: # add 'null'
for ii in range(self.num_cols - len(entries)):
entries.append('NULL')
new_row = self.row_content % tuple(entries)
# Add new_row to content
self.content += new_row
# setting the title
def set_title(self, titles):
new_titles = []
for ii in titles: new_titles.append('<strong>%s</strong>' % ii)
self.add_row(new_titles)
# coloring text
def get_colored_text(self, text, group_id=None):
''' If group id is None, pick a random color '''
if group_id is None: color = self.colors[1]
else: color = self.colors[group_id % len(self.colors)]
return '<b><font color="%s">%s</font></b>' % (color, text)
# render and save page
def save_page(self, file_path):
# allow new page and tab space
self.content = self.content.replace('\n', '</br>')
self.content = self.content.replace('\t', ' '*10)
page_content = self.template + self.content + self.end
with open(file_path, 'w') as file_id: file_id.write(page_content)
print('Written page to: %s' % file_path)
# Return the string for an image
def link_image(self, img_path, caption=None, height=100):
# No caption provided
if caption == None: return self.img_template % (img_path, height, height)
string = 'Caption: %s</br>' % caption
return string + (self.img_template % (img_path, height, height))
# add table with question encoding
def add_question_attention(self, question, program, att):
table = '<table class="heat-map" id="heat-map-3"><thead><tr><th></th>'
row = ''.join(['<th>%s</th>' % ii for ii in program])
row += '</tr></thead><tbody>'
table += row
for ii in range(len(question)):
table += '<tr class="stats-row"><td class="stats-title">%s</td>'\
% question[ii]
table += ''.join(['<td>%2d</td>' % att[ii, jj] \
for jj in range(len(program))])
table += '</tr>'
table += '</tbody></table>'
return table
# add history attention
def add_history_attention(self, att_wt, att_labels = None):
num_ques = att_wt.size
if att_labels is None:
titles = ['Cap']
titles.extend(['%02d' % ii for ii in range(1, num_ques)])
else: titles = att_labels
max_att = np.max(att_wt)
string = ''
for ii in range(0, num_ques):
if ii % 6 == 0: string += '\n'
string += self.att_template % (att_wt[ii]/max_att, titles[ii])
return string
|
corefnmn-main
|
vis/html.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Visualizing the dialog output from the model.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
import argparse
import numpy as np
import sys
import h5py
import json
import os
from vis import html
from util import support
# PIL
from PIL import Image
import requests
from io import BytesIO
from util import support
from tqdm import tqdm as progressbar
from skimage import io, transform
def main(args):
titles = ['Image', 'Answers', 'Predictions', 'Modules', 'Attention']
# load the batch
data = np.load(args.batch_path)[()]
batch, outputs = data['batch'], data['output']
# load dictionary
with open(args.text_vocab_path, 'r') as file_id:
word2ind = {word.strip('\n'): ind
for ind, word in enumerate(file_id.readlines())}
ind2word = {ind: word for word, ind in word2ind.items()}
# get the program dictionary
with open(args.prog_vocab_path, 'r') as file_id:
word2ind_prog = {word.strip('\n'): ind
for ind, word in enumerate(file_id.readlines())}
ind2word_prog = {ind: word for word, ind in word2ind_prog.items()}
stringify = lambda vector: ' '.join([ind2word[w] for w in vector])
stringify_prog = lambda vector: ' '.join([ind2word_prog[w] for w in vector])
# Get html related info
page = html.HTML(len(titles))
page.set_title(titles)
template = 'Q%d: %s\nA [GT]: %s\nP [GT]: %s\nP: %s'
pred_template = 'GT Rank: %d\n_top-5: \n%s'
# saving intermediate outputs
end_prog_token = word2ind_prog['<eos>']
server_save = './attention/%d_%d_%d_%d.png'
local_save = os.path.join(args.image_save_root, 'attention/%d_%d_%d_%d.png')
# Create folders.
os.makedirs(args.image_save_root, exist_ok=True)
os.makedirs(os.path.join(args.image_save_root, 'attention'), exist_ok=True)
for ii in progressbar(range(args.num_examples)):
# Read image.
img_name = '/'.join(batch[ii]['img_path'][0].split('/')[-2:])
image = io.imread(os.path.join(args.image_load_root, img_name))
# Deal with black and white images.
if len(image.shape) < 3:
image = np.expand_dims(image, -1)
image = np.tile(image, [1, 1, 3])
# Caption.
if batch[ii]['cap_len'].ndim == 2:
cap_len = batch[ii]['cap_len'][0]
cap_string = stringify(batch[ii]['cap'][0, :cap_len])
else:
cap_len = batch[ii]['cap_len'][0]
cap_string = stringify(batch[ii]['cap'][0, :cap_len])
span_content = page.link_image('coco_images/' + img_name, cap_string, 400)
# decide length based on first appearance of 14 <eos>
if 'pred_tokens_cap' in outputs[ii]:
caption_prog = outputs[ii]['pred_tokens_cap']
prog_len = np.where(caption_prog[:, 0] == end_prog_token)[0][0]
cap_tokens = [ind2word[w] for w in batch[ii]['cap'][0, :cap_len]]
prog_tokens = [ind2word_prog[w] for w in caption_prog[:prog_len, 0]]
att = 100 * outputs[ii]['attention_cap'][:, :, 0, 0].transpose()
word_att_str = page.add_question_attention(cap_tokens, prog_tokens, att)
# caption module outputs
stack = outputs[ii]['intermediates'][0]
cap_stack = [datum for datum in stack if datum[0] == 'cap']
string = {'c_1':'', 'c_2':''}
for _, step, _, attention in cap_stack:
# reshape and renormalize
att = attention[:, :, 0]
att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (2, ii, 0, step), 'png')
# caption first row
string['c_1'] += page.link_image(server_save % (2, ii, 0, step))
att = attention[:, :, 0]
att_image = support.interpolate_attention(image, att)
#att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (3, ii, 0, step), 'png')
# caption second row
string['c_2'] += page.link_image(server_save % (3, ii, 0, step))
# add the neural module visualization for captions
span_content += '\n'.join(['', string['c_1'], string['c_2'], word_att_str])
ques_content = []
for jj in range(10):
row_content = []
# question
ques_len = batch[ii]['ques_len'][jj]
ques_string = stringify(batch[ii]['ques'][:ques_len, jj])
# answer
ans_len = batch[ii]['ans_len'][jj]
ans_in = stringify(batch[ii]['ans_in'][jj, :ans_len])
ans_out = stringify(batch[ii]['ans_out'][jj, :ans_len])
# program
gt_prog_str = stringify_prog(batch[ii]['gt_layout'][:, jj])
cur_prog = outputs[ii]['pred_tokens'][:, jj]
prog_pred = stringify_prog(outputs[ii]['pred_tokens'][:, jj])
print_slot = (jj, ques_string, ans_in, gt_prog_str, prog_pred)
row_content.append(template % print_slot)
# get predictions
sort_arg = np.argsort(outputs[ii]['scores'][jj])[::-1][:args.top_options]
gt_score = outputs[ii]['scores'][jj][batch[ii]['gt_ind'][jj]]
gt_rank = np.sum(outputs[ii]['scores'][jj] > gt_score) + 1
options = [stringify(batch[ii]['opt_in'][kk][jj]) for kk in sort_arg]
row_content.append(pred_template % (gt_rank, '\n'.join(options)))
# visualizing intermediate outputs for each question
stack = outputs[ii]['intermediates'][0]
ques_stack = [datum for datum in stack
if (datum[0] == 'ques') and (datum[2] == jj)]
string = {'q_1':'', 'q_2':''}
for _, step, _, attention in ques_stack:
# reshape and renormalize
att = attention[:, :, 0]
#att_image = support.interpolate_attention(image, att)
att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (0, ii, jj, step), 'png')
# string for first row
string['q_1'] += page.link_image(server_save % (0, ii, jj, step))
att = attention[:, :, 0]
att_image = support.interpolate_attention(image, att)
#att_image = support.get_blend_map(image, att)
att_image = Image.fromarray(np.uint8(att_image))
att_image = att_image.resize((200, 200))
att_image.save(local_save % (1, ii, jj, step), 'png')
# string for second row
string['q_2'] += page.link_image(server_save % (1, ii, jj, step))
# if refer module, add weights
if ind2word_prog[cur_prog[step]] == '_Refer':
wt_stack = outputs[ii]['intermediates'][1]
cur_wt = [datum for datum in wt_stack if datum[0] == jj]
assert (len(cur_wt) == 1), 'Weights over history do not sum to one'
wts = cur_wt[0][1]
wt_labels = cur_wt[0][2]
if len(wts) > 0:
string['q_1'] = page.add_history_attention(wts, wt_labels)
string['q_1'] += ('\n' + string['q_1'])
row_content.append('\n'.join(['', string['q_1'], string['q_2']]))
# decide length based on first appearance of 14 <eos>
ques_prog = outputs[ii]['pred_tokens'][:, jj]
prog_len = np.where(ques_prog == end_prog_token)[0][0]
ques_tokens = [ind2word[w] for w in batch[ii]['ques'][:ques_len, jj]]
prog_tokens = [ind2word_prog[w] for w in ques_prog[:prog_len]]
att = 100 * outputs[ii]['attention'][:, :, jj, 0].transpose()
string = page.add_question_attention(ques_tokens, prog_tokens, att)
row_content.append(string)
ques_content.append(row_content)
# Add the span row
page.add_spanning_row(span_content, ques_content)
# render page and save
page.save_page(args.save_path)
if __name__ == '__main__':
# read command line arguments
title = 'Visualizing dialog by creating a HTML page.'
parser = argparse.ArgumentParser(description=title)
parser.add_argument('--batch_path', default='logs/sample_run_batches.npy',
help='Path to batches saved by visualize_sl.py')
parser.add_argument('--text_vocab_path', default='data/vocab_vd.txt',
help='Text vocabulary to decode sentence outputs')
parser.add_argument('--prog_vocab_path', default='data/vocab_layout.txt',
help='Program vocabulary to decode program outputs')
parser.add_argument('--save_path', default='vis/sample_run_examples.html',
help='Save the HTML file that visualizes examples')
parser.add_argument('--image_load_root', default='vis/coco_images/',
help='Path to the COCO images')
parser.add_argument('--image_save_root', default='vis/images/',
help='Path to the images to load in HTML')
parser.add_argument('--num_examples', default=50, type=int,
help='Number of examples to visualize')
parser.add_argument('--top_options', default=5, type=int,
help='Number of top ranked options to show')
args = parser.parse_args()
main(args)
|
corefnmn-main
|
vis/visualize_dialogs.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to decode and produce an answer.
Answer decoder for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from tensorflow.python.ops.nn import dropout
from tensorflow.contrib.layers import fully_connected as FC
from util import support
class AnswerDecoder:
def __init__(self, inputs, output_pool, params):
"""Initialize answer decoder.
Args:
inputs:
output_pool:
params:
"""
self.params = params
# keep track of inputs and outputs
used_inputs = []
outputs = {}
# alias for criterion
criterion = tf.nn.sparse_softmax_cross_entropy_with_logits
# decide the source based on train / evaluation
source = output_pool if params['train_mode'] else inputs
# a linear to number of choices
logits = FC(source['context'], params['num_choices'], activation_fn=None)
outputs['ans_logits'] = logits
# add program context vector, if not training
if not self.params['train_mode']:
used_inputs.append('context')
# softmax over the choices
answer_loss = criterion(logits=logits, labels=inputs['ans_ind'])
used_inputs.append('ans_ind')
outputs['ans_token_loss'] = tf.reduce_mean(answer_loss)
# setup the inputs and outputs
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#----------------------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#----------------------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None):
"""Produces the feed dict for this subcomponent.
Args:
batch: Batch returned from dataloader
output_pool: Outputs from previous subcomponents, mostly when evaluating
Returns:
feed_dict: Returns the feed dictionary
"""
feed_dict = {}
feed_dict[self.inputs['ans_ind']] = batch['ans_ind']
# if not training, use previous outputs, else inputs
if not self.params['train_mode']:
feed_dict[self.inputs['context']] = output_pool['context']
return feed_dict
|
corefnmn-main
|
models_mnist/decoder.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Add a reasonable description to the file.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from util.cnn import fc_layer as fc, conv_relu_layer as conv_relu
from tensorflow.contrib.layers import fully_connected as FC
from tensorflow.contrib.rnn import LSTMStateTuple
from util import support
def _get_valid_tokens(X, W, b):
constraints_validity = tf.greater_equal(tf.tensordot(X, W, axes=1) - b, 0)
token_validity = tf.reduce_all(constraints_validity, axis=2)
return tf.stop_gradient(token_validity)
#------------------------------------------------------------------------------
def _update_decoding_state(X, s, P):
X = X + tf.nn.embedding_lookup(P, s) # X = X + S P
return tf.stop_gradient(X)
#------------------------------------------------------------------------------
def _get_lstm_cell(num_layers, lstm_dim, apply_dropout):
if isinstance(lstm_dim, list): # Different layers have different dimensions
if not len(lstm_dim) == num_layers:
raise ValueError('the length of lstm_dim must be equal to num_layers')
cell_list = []
for l in range(num_layers):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim[l], state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout and l < num_layers-1:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list.append(dropout_cell)
else: # All layers has the same dimension.
lstm_cell = tf.contrib.rnn.BasicLSTMCell(lstm_dim, state_is_tuple=True)
# Dropout is only applied on output of the 1st to second-last layer.
# The output of the last layer has no dropout
if apply_dropout:
dropout_cell = tf.contrib.rnn.DropoutWrapper(lstm_cell,
output_keep_prob=0.5)
else:
dropout_cell = lstm_cell
cell_list = [dropout_cell] * (num_layers-1) + [lstm_cell]
cell = tf.contrib.rnn.MultiRNNCell(cell_list, state_is_tuple=True)
return cell
#------------------------------------------------------------------------------
# Sequence to Sequence with attention
class AttSeq2Seq:
def __init__(self, holders, use_gt_prog, assembler, params, reuse=None):
self.T_decoder = params['max_dec_len']
self.encoder_num_vocab = params['text_vocab_size']
self.encoder_embed_dim = params['text_embed_size']
self.decoder_num_vocab = params['prog_vocab_size']
self.decoder_embed_dim = params['prog_embed_size']
self.lstm_dim = params['lstm_size']
self.num_layers = params['num_layers']
self.EOS_token = assembler.EOS_idx
self.embed_scope = params['embed_scope']
self.temperature = params.get('temperature', 1)
# if word vectors need to be used or lstm outputs for attention
params['use_word_vectors'] = 'wv-att' in params['model']
params['generator'] = params.get('generator', 'ques')
self.params = params
# decoding transition variables
self.P = to_T(assembler.P, dtype=tf.int32)
self.W = to_T(assembler.W, dtype=tf.int32)
self.b = to_T(assembler.b, dtype=tf.int32)
self.encoder_dropout = params['enc_dropout']
self.decoder_dropout = params['dec_dropout']
self.decoder_sampling = params['dec_sampling']
# detect fake inputs
if 'fake' in holders: scope = 'enc_dec_cap'
else: scope = 'enc_dec'
with tf.variable_scope(scope, reuse=reuse):
# build a special encoder, if needed
if 'fake' not in holders and params['generator'] == 'mem':
self._build_memory_encoder(holders)
else:
# build a normal encoder
self._build_encoder(holders['ques'], holders['ques_len'])
self._build_decoder(use_gt_prog, holders['prog_gt'])
# build a usual encoder, ques based
def _build_encoder(self, input_seq_batch, seq_len_batch, scope='encoder',
reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
with tf.variable_scope(scope, reuse=reuse):
#T = tf.shape(input_seq_batch)[0]
T = input_seq_batch.shape.as_list()[0]
N = tf.shape(input_seq_batch)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embedding_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embedded_seq = tf.nn.embedding_lookup(embedding_mat, input_seq_batch)
self.embedded_input_seq = embedded_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell, embedded_seq,
seq_len_batch,
dtype=tf.float32,
time_major=True,
scope='lstm')
self.encoder_outputs = encoder_outputs
self.encoder_states = encoder_states
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished has shape [T, N, 1], where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
seq_len_batch[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
# build a special encoder
def _build_memory_encoder(self, holders, scope='encoder', reuse=None):
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.encoder_dropout
input_seq = holders['ques']
input_seq_len = holders['ques_len']
# facts/memories
hist_size = holders['hist'].shape.as_list()
hist_flat = tf.reshape(holders['hist'], [-1, hist_size[2]])
hist_len_flat = tf.reshape(holders['hist_len'], [-1])
with tf.variable_scope(scope, reuse=reuse):
T = input_seq.shape.as_list()[0]
N = tf.shape(input_seq)[1]
self.T_encoder = T
self.N = N
with tf.variable_scope(self.embed_scope, reuse=True):
embed_mat = tf.get_variable('embed_mat', [self.encoder_num_vocab,
self.encoder_embed_dim])
# text_seq has shape [T, N] and embedded_seq has shape [T, N, D].
embed_seq = tf.nn.embedding_lookup(embed_mat, input_seq)
self.embedded_input_seq = embed_seq
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
# encoder_outputs has shape [T, N, lstm_dim]
encoder_outputs, encoder_states = tf.nn.dynamic_rnn(cell,
embed_seq, input_seq_len, dtype=tf.float32,
time_major=True, scope='lstm')
self.encoder_outputs = encoder_outputs
# batch first encoder outputs
batch_encoder_outputs = tf.transpose(encoder_outputs, [1, 0, 2])
ques_enc = support.last_relevant(batch_encoder_outputs, input_seq_len)
size = [-1, self.params['num_rounds'], self.params['lstm_size']]
ques_enc = tf.reshape(ques_enc, size)
self.encoder_states = encoder_states
# similarly encode history
hist_out = tf.nn.embedding_lookup(embed_mat, hist_flat)
# rnns to encode history
cell = tf.contrib.rnn.BasicLSTMCell(self.params['lstm_size'])
for ii in range(0, self.params['num_layers']):
# dynamic rnn
hist_out, states = tf.nn.dynamic_rnn(cell, hist_out, \
sequence_length=hist_len_flat, \
dtype=tf.float32, scope='hist_layer_%d' % ii)
# get output from last timestep
hist_enc = support.last_relevant(hist_out, hist_len_flat)
# reshape back
size = [-1, hist_size[1], self.params['lstm_size']]
hist_enc = tf.reshape(hist_enc, size)
# concatenate, mlp and tanh
num_r = self.params['num_rounds']
# dot product
attention = tf.matmul(ques_enc, hist_enc, transpose_b=True)
# a very small large number
u_mat = np.full((num_r, num_r), -1e10)
suppress_mat = tf.constant(np.triu(u_mat, 1), dtype=tf.float32)
l_mat = np.full((num_r, num_r), 1)
mask_mat = tf.constant(np.tril(l_mat), dtype=tf.float32)
attention = tf.nn.softmax(tf.multiply(attention, mask_mat)
+ suppress_mat)
self.att_history = attention
att_hist_enc = tf.matmul(attention, hist_enc)
# flatten out
size = [-1, self.params['lstm_size']]
att_hist_flat = tf.reshape(att_hist_enc, size)
# concatenate attended history and encoder state for the last layer
concat = tf.concat([encoder_states[-1].h, att_hist_flat], -1)
new_state = LSTMStateTuple(encoder_states[-1].c,
FC(concat, self.params['lstm_size']))
# make it mutable
encoder_states = list(encoder_states)
encoder_states[-1] = new_state
self.encoder_states = tuple(encoder_states)
# check if wv flag is set
if self.params['use_word_vectors']:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(embedded_seq, [-1, self.encoder_embed_dim]),
output_dim=lstm_dim)
else:
# transform the encoder outputs for further attention alignments
# encoder_outputs_flat has shape [T, N, lstm_dim]
encoder_h_transformed = fc('encoder_h_transform',
tf.reshape(encoder_outputs, [-1, lstm_dim]), output_dim=lstm_dim)
encoder_h_transformed = tf.reshape(encoder_h_transformed,
to_T([T, N, lstm_dim]))
self.encoder_h_transformed = encoder_h_transformed
# seq_not_finished is a shape [T, N, 1] tensor, where seq_not_finished[t, n]
# is 1 iff sequence n is not finished at time t, and 0 otherwise
seq_not_finished = tf.less(tf.range(T)[:, tf.newaxis, tf.newaxis],
input_seq_len[:, tf.newaxis])
seq_not_finished = tf.cast(seq_not_finished, tf.float32)
self.seq_not_finished = seq_not_finished
def _build_decoder(self, use_gt_layout, gt_layout_batch, scope='decoder',
reuse=None):
# The main difference from before is that the decoders now takes another
# input (the attention) when computing the next step
# T_max is the maximum length of decoded sequence (including <eos>)
#
# This function is for decoding only. It performs greedy search or sampling.
# the first input is <go> (its embedding vector) and the subsequent inputs
# are the outputs from previous time step
# num_vocab does not include <go>
#
# use_gt_layout is None or a bool tensor, and gt_layout_batch is a tenwor
# with shape [T_max, N].
# If use_gt_layout is not None, then when use_gt_layout is true, predict
# exactly the tokens in gt_layout_batch, regardless of actual probability.
# Otherwise, if sampling is True, sample from the token probability
# If sampling is False, do greedy decoding (beam size 1)
N = self.N
encoder_states = self.encoder_states
T_max = self.T_decoder
lstm_dim = self.lstm_dim
num_layers = self.num_layers
apply_dropout = self.decoder_dropout
EOS_token = self.EOS_token
sampling = self.decoder_sampling
with tf.variable_scope(scope, reuse=reuse):
embedding_mat = tf.get_variable('embedding_mat',
[self.decoder_num_vocab, self.decoder_embed_dim])
# we use a separate embedding for <go>, as it is only used in the
# beginning of the sequence
go_embedding = tf.get_variable('go_embedding', [1, self.decoder_embed_dim])
with tf.variable_scope('att_prediction'):
v = tf.get_variable('v', [lstm_dim])
W_a = tf.get_variable('weights', [lstm_dim, lstm_dim],
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable('biases', lstm_dim,
initializer=tf.constant_initializer(0.))
# The parameters to predict the next token
with tf.variable_scope('token_prediction'):
W_y = tf.get_variable('weights', [lstm_dim*2, self.decoder_num_vocab],
initializer=tf.contrib.layers.xavier_initializer())
b_y = tf.get_variable('biases', self.decoder_num_vocab,
initializer=tf.constant_initializer(0.))
# Attentional decoding
# Loop function is called at time t BEFORE the cell execution at time t,
# and its next_input is used as the input at time t (not t+1)
# c.f. https://www.tensorflow.org/api_docs/python/tf/nn/raw_rnn
mask_range = tf.reshape(tf.range(self.decoder_num_vocab, dtype=tf.int32),
[1, -1])
if use_gt_layout is not None:
gt_layout_mult = tf.cast(use_gt_layout, tf.int32)
pred_layout_mult = 1 - gt_layout_mult
def loop_fn(time, cell_output, cell_state, loop_state):
if cell_output is None: # time == 0
next_cell_state = encoder_states
next_input = tf.tile(go_embedding, to_T([N, 1]))
else: # time > 0
next_cell_state = cell_state
# compute the attention map over the input sequence
# a_raw has shape [T, N, 1]
att_raw = tf.reduce_sum(
tf.tanh(tf.nn.xw_plus_b(cell_output, W_a, b_a) +
self.encoder_h_transformed) * v,
axis=2, keep_dims=True)
# softmax along the first dimension (T) over not finished examples
# att has shape [T, N, 1]
att = tf.nn.softmax(att_raw, dim=0)*self.seq_not_finished
att = att / tf.reduce_sum(att + 1e-10, axis=0, keep_dims=True)
# d has shape [N, lstm_dim]
d2 = tf.reduce_sum(att*self.encoder_outputs, axis=0)
# token_scores has shape [N, num_vocab]
token_scores = tf.nn.xw_plus_b(
tf.concat([cell_output, d2], axis=1),
W_y, b_y)
decoding_state = loop_state[2]
# token_validity has shape [N, num_vocab]
token_validity = _get_valid_tokens(decoding_state, self.W, self.b)
token_validity.set_shape([None, self.decoder_num_vocab])
if use_gt_layout is not None:
# when there's ground-truth layout, do not re-normalize prob
# and treat all tokens as valid
token_validity = tf.logical_or(token_validity, use_gt_layout)
validity_mult = tf.cast(token_validity, tf.float32)
# predict the next token (behavior depending on parameters)
if sampling:
token_scores_valid = token_scores - (1-validity_mult) * 50
# TODO:debug
sampled_token = tf.cast(tf.reshape(
tf.multinomial(token_scores_valid/self.temperature, 1), [-1]), tf.int32)
# make sure that the predictions are ALWAYS valid
# (it can be invalid with very small prob)
# If not, just fall back to min cases
# pred_mask has shape [N, num_vocab]
sampled_mask = tf.equal(mask_range, tf.reshape(sampled_token, [-1, 1]))
is_sampled_valid = tf.reduce_any(
tf.logical_and(sampled_mask, token_validity),
axis=1)
# Fall back to max score (no sampling)
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
max_score_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
predicted_token = tf.where(is_sampled_valid, sampled_token, max_score_token)
else:
min_score = tf.reduce_min(token_scores)
token_scores_valid = tf.where(token_validity, token_scores,
tf.ones_like(token_scores)*(min_score-1))
# predicted_token has shape [N]
predicted_token = tf.cast(tf.argmax(token_scores_valid, 1), tf.int32)
if use_gt_layout is not None:
predicted_token = (gt_layout_batch[time-1] * gt_layout_mult
+ predicted_token * pred_layout_mult)
# a robust version of softmax
# all_token_probs has shape [N, num_vocab]
all_token_probs = tf.nn.softmax(token_scores) * validity_mult
# tf.check_numerics(all_token_probs, 'NaN/Inf before div')
all_token_probs = all_token_probs / tf.reduce_sum(all_token_probs + 1e-10, axis=1, keep_dims=True)
# tf.check_numerics(all_token_probs, 'NaN/Inf after div')
# mask has shape [N, num_vocab]
mask = tf.equal(mask_range, tf.reshape(predicted_token, [-1, 1]))
# token_prob has shape [N], the probability of the predicted token
# although token_prob is not needed for predicting the next token
# it is needed in output (for policy gradient training)
# [N, num_vocab]
token_prob = tf.reduce_sum(all_token_probs * tf.cast(mask, tf.float32), axis=1)
# tf.assert_positive(token_prob)
neg_entropy = tf.reduce_sum(
all_token_probs * tf.log(all_token_probs + (1-validity_mult) + 1e-10),
axis=1)
# update states
updated_decoding_state = _update_decoding_state(
decoding_state, predicted_token, self.P)
# the prediction is from the cell output of the last step
# timestep (t-1), feed it as input into timestep t
next_input = tf.nn.embedding_lookup(embedding_mat, predicted_token)
elements_finished = tf.greater_equal(time, T_max)
# loop_state is a 5-tuple, representing
# 1) the predicted_tokens
# 2) the prob of predicted_tokens
# 3) the decoding state (used for validity)
# 4) the negative entropy of policy (accumulated across timesteps)
# 5) the attention
if loop_state is None: # time == 0
# Write the predicted token into the output
predicted_token_array = tf.TensorArray(dtype=tf.int32, size=T_max,
infer_shape=False)
token_prob_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
init_decoding_state = tf.tile(to_T([[0, 0, T_max]], dtype=tf.int32), to_T([N, 1]))
att_array = tf.TensorArray(dtype=tf.float32, size=T_max,
infer_shape=False)
next_loop_state = (predicted_token_array,
token_prob_array,
init_decoding_state,
tf.zeros(to_T([N]), dtype=tf.float32),
att_array)
else: # time > 0
t_write = time-1
next_loop_state = (loop_state[0].write(t_write, predicted_token),
loop_state[1].write(t_write, token_prob),
updated_decoding_state,
loop_state[3] + neg_entropy,
loop_state[4].write(t_write, att))
return (elements_finished, next_input, next_cell_state, cell_output,
next_loop_state)
# The RNN
cell = _get_lstm_cell(num_layers, lstm_dim, apply_dropout)
_, _, decodes_ta = tf.nn.raw_rnn(cell, loop_fn, scope='lstm')
predicted_tokens = decodes_ta[0].stack()
token_probs = decodes_ta[1].stack()
neg_entropy = decodes_ta[3]
# atts has shape [T_decoder, T_encoder, N, 1]
atts = decodes_ta[4].stack()
# static dimension recast
atts = tf.reshape(atts, [self.T_decoder, self.T_encoder, -1, 1])
self.atts = atts
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(atts*self.embedded_input_seq, axis=1)
predicted_tokens.set_shape([None, None])
token_probs.set_shape([None, None])
neg_entropy.set_shape([None])
#word_vecs.set_shape([None, None, self.encoder_embed_dim])
# static shapes
word_vecs.set_shape([self.T_decoder, None, self.encoder_embed_dim])
self.predicted_tokens = predicted_tokens
self.token_probs = token_probs
self.neg_entropy = neg_entropy
self.word_vecs = word_vecs
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/generator_attnet.py
|
corefnmn-main
|
models_mnist/__init__.py
|
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
TODO(satwik): Write a description about what this file contains and what
it does.
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow import convert_to_tensor as to_T
from models_mnist import modules as lm
# the number of attention input to each module
_module_input_num = {
'_Find': 0,
'_Refer': 0,
'_Exclude': 0,
'_Transform': 1,
'_Exist': 1,
'_Count': 1,
'_And': 2,
'_Diff': 2,
'_Not': 1,
'_Describe': 1
}
# output type of each module
_module_output_type = {
'_Find': 'att',
'_Refer': 'att',
'_Exclude': 'att',
'_Exist': 'ans',
'_Count': 'ans',
'_Transform': 'att',
'_And': 'att',
'_Diff': 'att',
'_Not': 'att',
'_Describe': 'ans'
}
INVALID_EXPR = 'INVALID_EXPR'
# decoding validity: maintaining a state x of [#att, #ans, T_remain]
# when T_remain is T_decoder when decoding the first module token
# a token s can be predicted iff all(<x, w_s> - b_s >= 0)
# the validity token list is
# XW - b >= 0
# the state transition matrix is P, so the state update is X += S P,
# where S is the predicted tokens (one-hot vectors)
def _build_validity_mats(module_names):
state_size = 3
num_vocab_nmn = len(module_names)
num_constraints = 4
P = np.zeros((num_vocab_nmn, state_size), np.int32)
W = np.zeros((state_size, num_vocab_nmn, num_constraints), np.int32)
b = np.zeros((num_vocab_nmn, num_constraints), np.int32)
# collect the input and output numbers of each module
att_in_nums = np.zeros(num_vocab_nmn)
att_out_nums = np.zeros(num_vocab_nmn)
ans_out_nums = np.zeros(num_vocab_nmn)
for n_s, s in enumerate(module_names):
if s != '<eos>':
att_in_nums[n_s] = _module_input_num[s]
att_out_nums[n_s] = _module_output_type[s] == 'att'
ans_out_nums[n_s] = _module_output_type[s] == 'ans'
# construct the trasition matrix P
for n_s, s in enumerate(module_names):
P[n_s, 0] = att_out_nums[n_s] - att_in_nums[n_s]
P[n_s, 1] = ans_out_nums[n_s]
P[n_s, 2] = -1
# construct the validity W and b
att_absorb_nums = (att_in_nums - att_out_nums)
max_att_absorb_nonans = np.max(att_absorb_nums * (ans_out_nums == 0))
max_att_absorb_ans = np.max(att_absorb_nums * (ans_out_nums != 0))
for n_s, s in enumerate(module_names):
if s != '<eos>':
# constraint: a non-<eos> module can be outputted iff all the following
# hold:
# * 0) there's enough att in the stack
# #att >= att_in_nums[n_s]
W[0, n_s, 0] = 1
b[n_s, 0] = att_in_nums[n_s]
# * 1) for answer modules, there's no extra att in the stack
# #att <= att_in_nums[n_s]
# -#att >= -att_in_nums[n_s]
# for non-answer modules, T_remain >= 3
# (the last two has to be AnswerType and <eos>)
if ans_out_nums[n_s] != 0:
W[0, n_s, 1] = -1
b[n_s, 1] = -att_in_nums[n_s]
else:
W[2, n_s, 1] = 1
b[n_s, 1] = 3
# * 2) there's no answer in the stack (otherwise <eos> only)
# #ans <= 0
# -#ans >= 0
W[1, n_s, 2] = -1
# * 3) there's enough time to consume the all attentions, output answer
# plus <eos>
# 3.1) for non-answer modules, we already have T_remain>= 3 from
# constraint 2
# In maximum (T_remain-3) further steps
# (plus 3 steps for this, ans, <eos>) to consume atts
# (T_remain-3) * max_att_absorb_nonans + max_att_absorb_ans +
# att_absorb_nums[n_s] >= #att
# T_remain*MANA - #att >= 3*MANA - MAA - A[s]
# - #att + MANA * T_remain >= 3*MANA - MAA - A[s]
# 3.2) for answer modules, if it can be decoded then constraint 0&1
# ensures that there'll be no att left in stack after decoding
# this answer, hence no further constraints here
if ans_out_nums[n_s] == 0:
W[0, n_s, 3] = -1
W[2, n_s, 3] = max_att_absorb_nonans
b[n_s, 3] = (3 * max_att_absorb_nonans - max_att_absorb_ans -
att_absorb_nums[n_s])
else: # <eos>-case
# constraint: a <eos> token can be outputted iff all the following holds
# * 0) there's ans in the stack
# #ans >= 1
W[1, n_s, 0] = 1
b[n_s, 0] = 1
return P, W, b
#------------------------------------------------------------------------------
class Assembler:
def __init__(self, module_vocab_file):
# read the module list, and record the index of each module and <eos>
with open(module_vocab_file) as f:
self.module_names = [s.strip() for s in f.readlines()]
# find the index of <eos>
for n_s in range(len(self.module_names)):
if self.module_names[n_s] == '<eos>':
self.EOS_idx = n_s
break
# build a dictionary from module name to token index
self.name2idx_dict = {name: n_s for n_s, name in enumerate(self.module_names)}
self.num_vocab_nmn = len(self.module_names)
self.P, self.W, self.b = _build_validity_mats(self.module_names)
def module_list2tokens(self, module_list, T=None):
layout_tokens = [self.name2idx_dict[name] for name in module_list]
if T is not None:
if len(module_list) >= T:
raise ValueError('Not enough time steps to add <eos>')
layout_tokens += [self.EOS_idx]*(T-len(module_list))
return layout_tokens
def _layout_tokens2str(self, layout_tokens):
return ' '.join([self.module_names[idx] for idx in layout_tokens])
def assemble_refer(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
logits = []
for find_arg in reuse_stack:
# compute the weights for each of the attention map
inputs = (text_att, find_arg[1], round_id, find_arg[2])
logits.append(weaver.align_text(*inputs))
# exponential each logit
weights = []
for ii in logits: weights.append(weaver.exp(ii))
# normalize the weights
if len(weights) < 2:
norm = weights[0]
else:
norm = weaver.add(weights[0], weights[1])
for ii in weights[2:]: norm = weaver.add(norm, ii)
for index, ii in enumerate(weights):
weights[index] = weaver.divide(ii, norm)
# multiply the attention with softmax weight
prev_att = []
for (att, _, _, _, _), weight in zip(reuse_stack, weights):
prev_att.append(weaver.weight_attention(att, weight))
# add all attentions to get the result
if len(prev_att) < 2: out = prev_att[0]
else:
out = weaver.add_attention(prev_att[0], prev_att[1])
for ii in prev_att[2:]:
out = weaver.add_attention(out, ii)
return out, weights, logits
def assemble_exclude(self, text_att, round_id, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# compute the scores
weights = []
exclude_att = reuse_stack[0][0]
if len(reuse_stack) > 1:
for find_arg in reuse_stack:
exclude_att = weaver.max_attention(exclude_att, find_arg[0])
return weaver.normalize_exclude(exclude_att)
# code to check if the program makes sense
# typically contains all the checks from the _assemble_program method
def sanity_check_program(self, layout):
decode_stack = []
for t_id, cur_op_id in enumerate(layout):
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
return False, 'Insufficient inputs'
# read the inputs
inputs = []
for ii in range(num_inputs):
arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
return False, 'Intermediate not attention'
decode_stack.append(_module_output_type[cur_op_name])
# Check if only one element is left
if len(decode_stack) != 1:
return False, 'Left with more than one outputs'
# final output is not answer type
elif decode_stack[0] != 'ans':
return False, 'Final output not an answer'
return True, 'Valid program'
def assemble(self, layout_tokens, executor, visualize=False):
# layout_tokens_batch is a numpy array with shape [T, N],
# containing module tokens and <eos>, in Reverse Polish Notation.
# internalize executor and weaver
self.executor = executor
# build a weaver
weaver = executor.create_weaver()
self.weaver = weaver
# visualize flag
self.visualize = visualize
# get extent of layout tokens
max_time, batch_size = layout_tokens['ques'].shape
num_rounds = executor.params['num_rounds']
batch_size = batch_size // num_rounds
outputs = []
reuse = [None] * batch_size
ques_invalid_prog = []
# program on questions and captions, if needed
ques_tokens = layout_tokens['ques']
for b_id in range(batch_size):
image = weaver.batch_input(executor._loom_types['image'], b_id)
if executor.params['use_fact']:
fact = weaver.batch_input(executor._loom_types['fact'], b_id)
else: fact = None
# Now run program on questions
text = weaver.batch_input(executor._loom_types['text'], b_id)
text_feat = weaver.batch_input(executor._loom_types['text_feat'], b_id)
# collect root node outputs for down the rounds
# tuples are immutable, recreate to ensure caption is round 0
round_zero = weaver.batch_input(executor._loom_types['round'], 0)
tokens = ques_tokens[:, num_rounds*b_id : num_rounds*(b_id+1)]
inputs = (image, text, fact, text_feat, tokens, [])
out, _, invalid_prog = self._assemble_program(*inputs)
ques_invalid_prog.extend(invalid_prog)
outputs.extend(out['comp'])
if visualize:
outputs.extend([ii for ii, _ in out['vis']['att']])
outputs.extend(out['vis']['weights'])
invalid_prog = {'ques': ques_invalid_prog}
return weaver, outputs, invalid_prog
def _assemble_program(self, image, text, fact, text_feat, tokens, reuse_stack):
# aliases
weaver = self.weaver
executor = self.executor
# get extent of layout tokens
max_time, batch_size = tokens.shape
num_rounds = executor.params['num_rounds']
outputs = []
validity = []
# for visualizing internal nodes
vis_outputs = {'att': [], 'weights': []}
for r_id in range(num_rounds):
layout = tokens[:, r_id]
invalid_prog = False
round_id = weaver.batch_input(executor._loom_types['round'], r_id)
if fact is not None: fact_slice = weaver.slice_fact(fact, round_id)
# valid layout must contain <eos>. Assembly fails if it doesn't.
if not np.any(layout == self.EOS_idx): invalid_prog = True
decode_stack = []
penult_out = None # penultimate output
for t_id in range(len(layout)):
weights = None
time = weaver.batch_input(executor._loom_types['time'], t_id)
text_att = weaver.slice_text(text, round_id, time)
# slice the text feature
text_feat_slice = weaver.slice_text_feat(text_feat, round_id, time)
cur_op_id = layout[t_id]
cur_op_name = self.module_names[cur_op_id]
# <eos> would mean stop
if cur_op_id == self.EOS_idx: break
# insufficient number of inputs
num_inputs = _module_input_num[cur_op_name]
if len(decode_stack) < num_inputs:
invalid_prog = True
break
# read the inputs
inputs = []
for ii in range(num_inputs):
arg, arg_type = decode_stack.pop()
# cannot consume anything but attention
if arg_type != 'att':
invalid_prog = True
break
inputs.append(arg)
# switch cases
if cur_op_name == '_Find':
out = weaver.find(image, text_att)
elif cur_op_name == '_Refer':
# nothing to refer to, wrong program
if len(reuse_stack) == 0:
invalid_prog = True
break
# if baseline is in the model, take the last output
if 'baseline' in self.executor.params['model']:
out = reuse_stack[-1][0]
else:
inputs = (text_feat_slice, round_id, reuse_stack)
out, weights, logits = self.assemble_refer(*inputs)
elif cur_op_name == '_Exclude':
# clean up reuse stack to avoid current finds
neat_stack = reuse_stack.copy()
for prev_time in range(t_id - 1, 0, -1):
if neat_stack[-1][-2] == prev_time: neat_stack.pop(-1)
# nothing to exclude to, wrong program
if len(neat_stack) == 0:
invalid_prog = True
break
inputs = (text_att, round_id, neat_stack)
out = self.assemble_exclude(*inputs)
# collect in reuse stack
#reuse_stack.append((out, text_att, round_id, r_id, t_id))
elif cur_op_name == '_Transform':
out = weaver.transform(inputs[0], image, text_att)
elif cur_op_name == '_Describe':
out = weaver.describe(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_Exist':
out = weaver.exist(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_Count':
out = weaver.count(inputs[0], image, text_att)
# TODO: Do this more carefully!
penult_out = arg
elif cur_op_name == '_And':
out = weaver.and_op(inputs[0], inputs[1])
elif cur_op_name == '_Diff':
out = weaver.diff_op(inputs[0], inputs[1])
# just invert the attention
elif cur_op_name == '_Not':
out = weaver.normalize_exclude(inputs[0])
else:
print('Current operand not defined: ' + cur_op_name)
invalid_prog = True
# collect outputs from all modules (visualize)
if self.visualize:
if _module_output_type[cur_op_name] == 'att':
vis_outputs['att'].append((out, r_id))
if weights is not None:
vis_outputs['weights'].extend(weights)
decode_stack.append((out, _module_output_type[cur_op_name]))
# Check if only one element is left
if len(decode_stack) != 1: invalid_prog = True
# final output is not answer type
elif decode_stack[0][1] != 'ans': invalid_prog = True
# record program validity
validity.append(invalid_prog)
# if program is invalid, return zeros
if invalid_prog: outputs.append(weaver.invalid(image))
else:
outputs.append(decode_stack[-1][0])
# if fact is to be used, take the penultimate output
if executor.params['use_fact']:
reuse_stack.append((penult_out, fact_slice, round_id, r_id, -1))
return {'comp': outputs, 'vis': vis_outputs}, reuse_stack, validity
|
corefnmn-main
|
models_mnist/assembler.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main CorefNMN model class.
Explicit visual coreference resolution in visual dialog using neural module
networks. Takes parameters and assemblers as input.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from models_mnist.generator import ProgramGenerator
from models_mnist.executor import ProgramExecutor
from models_mnist.decoder import AnswerDecoder
from util import support
class CorefNMN:
def __init__(self, params, assemblers, reuse=None):
# train mode
params['train_mode'] = 'test_split' not in params
print('Building model with train_model as: ' + str(params['train_mode']))
self.params = params
self.assemblers = assemblers
# module phases
self.phases = ['generate_program', 'execute_program', 'generate_answer']
# initializing input and output placeholders
self.inputs = {ii: {} for ii in self.phases}
self.outputs = self.inputs.copy()
# build place holders for inputs and outputs in the tensorflow graph
holders = self._build_placeholders(params)
self.holders = holders
with tf.variable_scope(params['model'], reuse=reuse):
# keep track of all outputs
output_pool = {}
# Part 1: Seq2seq RNN to generate module layout tokens
with tf.variable_scope('generate_program'):
self.generator = ProgramGenerator(holders, assemblers['ques'], params)
self.inputs['generate_program'] = self.generator.get_inputs()
self.outputs['generate_program'] = self.generator.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['generate_program'])
# Part 2: Neural Module Network
with tf.variable_scope('execute_program'):
self.executor = ProgramExecutor(holders, output_pool,
assemblers['copy'], params)
self.inputs['execute_program'] = self.executor.get_inputs()
self.outputs['execute_program'] = self.executor.get_outputs()
# add outputs to pool
output_pool.update(self.outputs['execute_program'])
# Part 3: Seq2Seq decoding of the answer
with tf.variable_scope('generate_answer'):
self.decoder = AnswerDecoder(holders, output_pool, params)
self.inputs['generate_answer'] = self.decoder.get_inputs()
self.outputs['generate_answer'] = self.decoder.get_outputs()
# pool up all the outputs
pooled_dict = []
outputs = self.outputs.copy()
for ii in outputs:
pooled_dict += outputs[ii].items()
self.pooled_outputs = dict(pooled_dict)
self.run_outputs = [ii for _, jj in self.outputs.items()
for _, ii in jj.items()]
self.run_inputs = list(set([ii for _, jj in self.inputs.items()
for _, ii in jj.items()]))
# add additional input tensorflow fold
if 'prog' in params['model']:
self.run_inputs.append(self.executor._loom._loom_input_tensor)
#---------------------------------------------------------------------------
def _build_placeholders(self, params):
inputs = {}
# Phase 1 - program generation
size = [params['max_enc_len'], None]
inputs['ques'] = tf.placeholder(tf.int32, size, 'ques')
inputs['ques_len'] = tf.placeholder(tf.int32, [None], 'ques_len')
inputs['prog_gt'] = tf.placeholder(tf.int32, [None, None], 'prog')
# place holders for fact
size = [None, params['max_enc_len'] + 1]
inputs['fact'] = tf.placeholder(tf.int32, size, 'fact')
inputs['fact_len'] = tf.placeholder(tf.int32, [None], 'fact_len')
# tie encoder and decoder
size = [params['num_layers'], None, params['lstm_size']]
inputs['enc_dec_h'] = tf.placeholder(tf.float32, size, 'enc_dec_h')
inputs['enc_dec_c'] = tf.placeholder(tf.float32, size, 'enc_dec_c')
# Phase 2 - program execution
size = [None, 112, 112, 3]
inputs['image'] = tf.placeholder(tf.float32, size, 'image')
inputs['prog_validity'] = tf.placeholder(tf.bool, [None])
# for the answer indices
inputs['ans_ind'] = tf.placeholder(tf.int32, [None], 'ans_ind')
# history
size = [None, params['num_rounds'], params['max_enc_len'] + 1]
inputs['hist'] = tf.placeholder(tf.int32, size, 'history')
size = [None, params['num_rounds']]
inputs['hist_len'] = tf.placeholder(tf.int32, size, 'hist_len')
if not self.params['train_mode']:
# additional placeholders during evaluation
size = [None, params['lstm_size']]
inputs['context'] = tf.placeholder(tf.float32, size, 'context')
size = [None, None, None, params['lstm_size']]
inputs['ques_enc'] = tf.placeholder(tf.float32, size, 'ques_enc')
size = [None, params['lstm_size']]
inputs['hist_enc'] = tf.placeholder(tf.float32, size, 'hist_enc')
size = [params['max_dec_len'], None, params['text_embed_size']]
inputs['ques_attended'] = tf.placeholder(tf.float32, size, 'ques_att')
return inputs
#---------------------------------------------------------------------------
# method to initialize training related attributes
def setup_training(self):
# answer prediction loss
total_loss = self.outputs['generate_answer']['ans_token_loss']
# supervised sequence prediction loss
total_loss += self.outputs['generate_program']['prog_pred_loss']
# add the total loss to the list of outputs
self.pooled_outputs['total_loss'] = total_loss
self.total_loss = total_loss
self.run_outputs.append(self.total_loss)
# setters and getters
def get_total_loss(self):
return self.total_loss
# return self.pooled_outputs['total_loss']
def set_train_step(self, step):
if hasattr(self, 'train_step'):
self.train_step.append(step)
else:
self.train_step = [step]
self.run_outputs.append(step)
def add_solver_op(self, op):
self.pooled_outputs['solver'] = op
#---------------------------------------------------------------------------
def run_train_iteration(self, batch, sess):
iter_loss = {}
h = sess.partial_run_setup(self.run_outputs, self.run_inputs)
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.partial_run(h, self.outputs['generate_program'], feeder)
iter_loss['prog'] = output['prog_pred_loss'] # record loss
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.partial_run(h, self.outputs['execute_program'], feeder))
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.partial_run(h, self.outputs['generate_answer'], feeder))
iter_loss['ans'] = output['ans_token_loss'] # record loss
# End: perform the gradient steps
output = sess.partial_run(h, self.train_step + [self.total_loss])
iter_loss['total'] = output[-1] # record loss
return iter_loss, None
#---------------------------------------------------------------------------
def run_train_iteration_legacy(self, batch, sess):
iter_loss = {}
# collect feeds from all subcomponents
feeder = self.generator.produce_feed_dict(batch)
feeder.update(self.executor.produce_feed_dict(batch))
feeder.update(self.decoder.produce_feed_dict(batch))
# run all subcomponents together
output = sess.run(self.pooled_outputs, feed_dict=feeder)
# record all the loss values
iter_loss['prog'] = output['prog_pred_loss']
iter_loss['ans'] = output['ans_token_loss']
iter_loss['total'] = output['total_loss']
return iter_loss, None
#---------------------------------------------------------------------------
def run_evaluate_iteration(self, batch, sess, eval_options=True):
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output = sess.run(self.outputs['generate_program'], feed_dict=feeder)
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['execute_program'], feed_dict=feeder))
if 'pred_tokens' in output:
prog_matches = []
prog_matches.append(batch['gt_layout'] == output['pred_tokens'])
output['matches'] = prog_matches
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
# use the logits and get the prediction
matches = np.argmax(output['ans_logits'], 1) == batch['ans_ind']
return matches, output
#---------------------------------------------------------------------------
def run_visualize_iteration(self, batch, sess, eval_options=True):
output = batch.copy()
# Part 0 & 1: Run Convnet and generate module layout
feeder = self.generator.produce_feed_dict(batch)
output.update(sess.run(self.outputs['generate_program'], feeder))
# Part 2: Run NMN and learning steps
feeder = self.executor.produce_feed_dict(batch, output, True)
output.update(sess.run(self.outputs['execute_program'], feeder))
# Part 3: Run the answer generation language model
feeder = self.decoder.produce_feed_dict(batch, output)
output.update(sess.run(self.outputs['generate_answer'], feeder))
# segregate weights and attention maps
output['intermediates'] = self.executor.segregrate_outputs(output)
return None, output
#-------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/model.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to generate programs for questions and captions.
Program generator for explicit visual coreference resolution model in visual
dialog using neural module networks, called CorefNMN.
This subcomponent uses memory network augmentation to figure out if an entity
has been seen before and/or if it needs resolution using history.
Author: Satwik Kottur
"""
import numpy as np
import tensorflow as tf
from models_mnist.generator_attnet import AttSeq2Seq
from util import support
# alias
linear = tf.contrib.layers.fully_connected
# behavior based on type of model
class ProgramGenerator:
def __init__(self, inputs, assembler, params):
"""Initialize program generator.
Args:
inputs:
assembler:
params:
"""
self.params = params
outputs = {}
used_inputs = []
# create embedding matrix
with tf.variable_scope('embed', reuse=None) as embed_scope:
size = [params['text_vocab_size'], params['text_embed_size']]
embed_mat = tf.get_variable('embed_mat', size)
# remember the scope for further use
params['embed_scope'] = embed_scope
cell = tf.contrib.rnn.BasicLSTMCell(params['lstm_size'])
#--------------------------------------------------------
# if program is to be predicted
if 'prog' in params['model']:
# define a constant for internal use
use_gt_prog = tf.constant(params['use_gt_prog'], dtype=tf.bool)
# use a low level model and construct internals
self.rnn = AttSeq2Seq(inputs, use_gt_prog, assembler, params)
# if memory based generator is used
if params['generator'] == 'mem':
used_inputs.extend(['hist', 'hist_len'])
outputs['encoder_output'] = self.rnn.encoder_outputs
outputs['pred_tokens'] = self.rnn.predicted_tokens
outputs['neg_entropy'] = tf.reduce_mean(self.rnn.neg_entropy)
# check if attHistory exists
if hasattr(self.rnn, 'att_history'):
outputs['att_history'] = self.rnn.att_history
# also add the encoder states (based on the flag)
concat_list = [ii.h for ii in self.rnn.encoder_states]
outputs['enc_dec_h'] = tf.stack(concat_list)
concat_list = [ii.c for ii in self.rnn.encoder_states]
outputs['enc_dec_c'] = tf.stack(concat_list)
# alias
attention = self.rnn.atts
# compute attended questions here
# word_vec has shape [T_decoder, N, 1]
word_vecs = tf.reduce_sum(attention * self.rnn.embedded_input_seq, axis=1)
size = [params['max_dec_len'], None, params['text_embed_size']]
word_vecs.set_shape(size)
outputs['attention'] = attention
outputs['ques_attended'] = word_vecs
#outputs['ques_attended'] = self.rnn.word_vecs
# log probability of each generated sequence
outputs['log_seq_prob'] = tf.reduce_sum(
tf.log(self.rnn.token_probs + 1e-10), axis=0)
outputs['ques_prog_loss'] = tf.reduce_mean(-outputs['log_seq_prob'])
q_output = tf.transpose(self.rnn.encoder_outputs, perm=[1, 0, 2])
q_output = support.last_relevant(q_output, inputs['ques_len'])
# bloat the first two dimensions
q_output = tf.expand_dims(q_output, axis=0)
outputs['ques_enc'] = tf.expand_dims(q_output, axis=0)
# keep track of inputs actually used
used_inputs.extend(['ques', 'ques_len', 'prog_gt'])
#------------------------------------------------------------------
#------------------------------------------------------------------
# setup the inputs and outputs
# should have at least one loss
total_loss = outputs.get('ques_prog_loss', tf.constant(0.0))
outputs['prog_pred_loss'] = outputs['ques_prog_loss']
self.outputs = outputs
self.inputs = {ii: inputs[ii] for ii in used_inputs}
#------------------------------------------------------------
# setters and getters
def get_outputs(self):
return self.outputs
def get_inputs(self):
return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, prev_output=None):
feed_dict = {}
feed_dict[self.inputs['ques']] = batch['ques']
feed_dict[self.inputs['ques_len']] = batch['ques_len']
# add program
if 'prog' in self.params['model']:
feed_dict[self.inputs['prog_gt']] = batch['gt_layout']
# add history
if self.params['generator'] == 'mem':
feed_dict[self.inputs['hist']] = batch['hist']
feed_dict[self.inputs['hist_len']] = batch['hist_len']
return feed_dict
|
corefnmn-main
|
models_mnist/generator.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Module definitions for Loom API.
Explicit visual coreference resolution in visual dialog using neural module
networks. Neural module definitions.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import numpy as np
import tensorflow as tf
from tensorflow import convert_to_tensor as to_T
from tensorflow_fold.public import loom
from util.cnn import fc_layer as fc, conv_layer as conv
from util.empty_safe_conv import empty_safe_1x1_conv as _1x1_conv
from util.empty_safe_conv import empty_safe_conv as _conv
def add_spatial_coord_map(image_feat_grid):
image_feat_shape = tf.shape(image_feat_grid)
N = image_feat_shape[0]
# static dimensions
#H = image_feat_shape[1]
#W = image_feat_shape[2]
H, W = image_feat_grid.shape.as_list()[1:3]
x_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., W), [1, 1, -1, 1]),
to_T([N, H, 1, 1]))
y_map = tf.tile(
tf.reshape(tf.linspace(-1., 1., H), [1, -1, 1, 1]),
to_T([N, 1, W, 1]))
# stop gradient on coords_map (needed to fix the tile grad error on TF 1.0.0)
coords_map = tf.stop_gradient(tf.concat([x_map, y_map], axis=3))
image_feat_with_coords = tf.concat([image_feat_grid, coords_map], axis=3)
# set shapes of the new feature maps
image_feat_static_shape = image_feat_grid.get_shape().as_list()
image_feat_static_shape[3] += 2
image_feat_with_coords.set_shape(image_feat_static_shape)
image_feat_static_shape[3] = 2
coords_map.set_shape(image_feat_static_shape)
return image_feat_with_coords, coords_map
#------------------------------------------------------------------------------
# Simple tensorflow ops as loom ops
class BinaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(BinaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
arg1, arg2 = inputs
return [self._op(arg1, arg2)]
#------------------------------------------------------------------------------
class UnaryLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types, op):
self._op = op
super(UnaryLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, arg):
return [self._op(arg[0])]
#------------------------------------------------------------------------------
# slice text attention
class SliceTextLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
text, round_id, time = inputs
round_squeeze = tf.squeeze(round_id, -1)
time_squeeze = tf.squeeze(time, -1)
# select the right round
shape = text.shape.as_list()
B = tf.shape(text)[0]
num_rounds, T, text_dim = shape[1], shape[2], shape[3]
indices = round_squeeze + num_rounds * tf.range(B)
# flatten
result = tf.gather(tf.reshape(text, [-1, T, text_dim]), indices)
# select the right time
indices = time_squeeze + T * tf.range(B)
# flatten
result = tf.gather(tf.reshape(result, [-1, text_dim]), indices)
return [result]
#------------------------------------------------------------------------------
# slice answer embeddding
class SliceAnswerLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(SliceAnswerLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
answer, round_id = inputs
round_squeeze = tf.squeeze(round_id, -1)
# select the right round
shape = answer.shape.as_list()
B = tf.shape(answer)[0]
num_rounds, text_dim = shape[1], shape[2]
indices = round_squeeze + num_rounds * tf.range(B)
result = tf.gather(tf.reshape(answer, [-1, text_dim]), indices)
return [result]
#--------------------------------------------------------------------
# attention weighting
class AttentionWeightLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(AttentionWeightLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
vis_att, scalar = inputs
# simple weighting
scalar = tf.expand_dims(tf.expand_dims(scalar, -1), -1)
att_grid = tf.multiply(vis_att, scalar)
return [att_grid]
#--------------------------------------------------------------------
# identity op to convert types
class IdentityLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(IdentityLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
return inputs
#--------------------------------------------------------------------
# normalize and complementary attention
class NormalizeExcludeLoomOp(loom.LoomOp):
def __init__(self, in_types, out_types):
super(NormalizeExcludeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
att_grid = inputs[0]
# complement the attention
max_entry = tf.reduce_max(tf.reduce_max(att_grid, 1), 1)
max_entry = tf.expand_dims(tf.expand_dims(max_entry, 1), 1)
att_grid = att_grid / max_entry
att_grid = 1 - att_grid
# normalize
norms = tf.reduce_sum(tf.reduce_sum(att_grid, 1), 1)
norms = tf.expand_dims(tf.expand_dims(norms, 1), 1)
# cutoff
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#-------------------------------------------------------------------
class AlignTextLoomOp(loom.LoomOp):
"""
Takes in two text attention and computes the alignment between them
Mapping: text_param x text_param -> scalar
Input:
text_param: [N, D_txt]
text_param: [N, D_txt]
Output:
scalar: [N, 1]
Implementation:
Parameters typically contain:
map_dim = 1024
module_scope = alignTextOp
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'alignTextOp')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AlignTextLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
text_att1, text_att2, round_id1, round_id2 = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att1.shape.as_list()[-1]
map_dim = self._params['map_dim']
embed_dim = self._params['text_embed_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# concat both text attentions, along with round diff (if need be)
concat_list = [text_att1, text_att2]
# additional weight for the distance to the past
if self._params['amalgam_text_feats']:
round_diff = tf.cast(round_id1 - round_id2, tf.float32)
concat_list.append(round_diff)
concat = tf.concat(concat_list, axis=-1)
# deeper 2 layer align network
weights = tf.contrib.layers.fully_connected(concat, embed_dim)
weights = tf.contrib.layers.fully_connected(weights, 1,
activation_fn=None)
return [weights]
#--------------------------------------------------------------------
# Modules as Loom Ops
class FindLoomOp(loom.LoomOp):
"""
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Elementwise multiplication between image_feat_grid and text_param
2. L2-normalization
3. Linear classification
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'find_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(FindLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
class AndLoomOp(loom.LoomOp):
"""
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = findModule
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'and_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(AndLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.minimum(input1, input2)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class CountLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> answer probs
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, self.num_choices]
Implementation:
1. linear transform of the attention map (also including max and min)
Parameters typically contain:
map_dim = 1024
module_scope = count_module
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'count_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(CountLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, _ = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
H, W = img_feat.shape.as_list()[1:3]
att_all = tf.reshape(vis_att, to_T([-1, H * W]))
att_min = tf.reduce_min(vis_att, axis=[1, 2])
att_max = tf.reduce_max(vis_att, axis=[1, 2])
# att_reduced has shape [N, 3]
att_concat = tf.concat([att_all, att_min, att_max], axis=1)
context = fc('fc_scores', att_concat, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class ExistLoomOp(loom.LoomOp):
'''
Mapping: att_grid -> answer probs
Input:
att_grid: [N, H, W, 1]
Output:
answer_scores: [N, self.num_choices]
Implementation:
1. Max-pool over att_grid
2. a linear mapping layer (without Re_lU)
Mapping: image_feat_grid x text_param -> att_grid
Input:
image_feat_grid: [N, H, W, D_im]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
'''
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'exist_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(ExistLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
'''
Inputs:
image feature for the example
text attention for all modules for the example
time id for current module
'''
vis_att, _, _ = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_min = tf.reduce_min(vis_att, axis=[1, 2])
att_avg = tf.reduce_mean(vis_att, axis=[1, 2])
att_max = tf.reduce_max(vis_att, axis=[1, 2])
# att_reduced has shape [N, 3]
att_reduced = tf.concat([att_min, att_avg, att_max], axis=1)
context = fc('fc_scores', att_reduced, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class DiffLoomOp(loom.LoomOp):
'''
Mapping: att_grid x att_grid -> att_grid
Input:
input_0: [N, H, W, 1]
input_1: [N, H, W, 1]
Output:
att_grid: [N, H, W, 1]
Implementation:
Take the elementwise diff and lower caps it to zero
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
'''
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'diff_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DiffLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
'''
Inputs:
visual attention outputs
time id for current module
'''
input1, input2 = inputs
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
att_grid = tf.maximum(input1 - input2, 0.)
# now L1 normalize
norms = tf.einsum('ijkl->i', att_grid)
norms = tf.reshape(norms, [-1, 1, 1, 1])
#norms = tf.tile(tf.reshape(norms, [-1, 1, 1, 1]), [1, H, W, 1])
# NOTE: if norm is too low, then clip it
norms = tf.clip_by_value(norms, 1e-6, 1e6)
att_grid = att_grid / norms
return [att_grid]
#------------------------------------------------------------------------------
class InvalidLoomOp(loom.LoomOp):
"""
Mapping: returns a context of zeros
Output:
context: [N, encode_size] of zeros
Implementation:
Take the elementwise-min
Parameters typically contain:
map_dim = 1024
module_scope = find_module
reuse = True
scope
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'invalid_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(InvalidLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
visual attention outputs
time id for current module
"""
img_feat = inputs
encode_size = self._params['encode_size']
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
N = tf.shape(img_feat)[0]
context = tf.zeros([N, encode_size], tf.float32)
return [context]
#------------------------------------------------------------------------------
class DescribeLoomOp(loom.LoomOp):
"""
Mapping: att_grid -> context vector
Input:
input_0: [N, H, W, 1]
Output:
answer_scores: [N, outputSize]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Element-wise multiplication of the two, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'describe_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(DescribeLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
text_map = fc('fc_text', text_att, output_dim=map_dim)
# nonlinearity
text_map = tf.nn.relu(text_map)
# att_feat, att_feat_1 has shape [N, D_vis]
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
img_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, map_dim])
# nonlinearity
img_map = tf.nn.relu(img_map)
eltwise_mult = tf.nn.l2_normalize(img_map * text_map, 1)
context = fc('fc_eltwise', eltwise_mult, output_dim=encode_size)
return [context]
#------------------------------------------------------------------------------
class TransformLoomOp(loom.LoomOp):
"""
Mapping: att_grid x text_param -> att_grid
Input:
input_0: [N, H, W, 1]
text_param: [N, D_txt]
Output:
att_grid: [N, H, W, 1]
Implementation:
1. Extract visual features using the input attention map, and
linear transform to map_dim
2. linear transform language features to map_dim
3. Convolve image features to map_dim
4. Element-wise multiplication of the three, l2_normalize, linear transform.
"""
def __init__(self, in_types, out_types, params):
self._params = params
self._scope = params.get('scope', 'transform_module')
self._module_scope = params['module_scope']
self._reuse = params.get('reuse', None)
super(TransformLoomOp, self).__init__(in_types, out_types)
def instantiate_batch(self, inputs):
"""
Inputs:
output from the previous modules
image feature for the example
text attention for all modules for the example
time id for current module
"""
vis_att, img_feat, text_att = inputs
# text feature dimension, intermediate mapping dimension
# batch size, image feature height and width
text_dim = text_att.shape.as_list()[-1]
map_dim = self._params['map_dim']
encode_size = self._params['encode_size']
N = tf.shape(img_feat)[0]
H, W = img_feat.shape.as_list()[1:3]
with tf.variable_scope(self._module_scope):
with tf.variable_scope(self._scope, reuse=self._reuse):
# image_feat_mapped has shape [N, H, W, map_dim]
img_map = _1x1_conv('conv_image', img_feat, output_dim=map_dim)
# nonlinearity
img_map = tf.nn.relu(img_map)
text_map = fc('fc_text', text_att, output_dim=map_dim)
text_map = tf.reshape(text_map, [-1, 1, 1, map_dim])
# nonlinearity
text_map = tf.nn.relu(text_map)
att_feats = tf.reduce_sum(img_feat * vis_att, axis=[1, 2])
att_map = tf.reshape(fc('fc_att', att_feats, output_dim=map_dim),
[N, 1, 1, map_dim])
# interact via element wise map
eltwise_mult = tf.nn.l2_normalize(img_map * text_map * att_map, 3)
att_grid = _1x1_conv('conv_eltwise', eltwise_mult, output_dim=1)
# softmax
att_grid_soft = tf.nn.softmax(tf.reshape(att_grid, [-1, H*W]))
att_grid = tf.reshape(att_grid_soft, [-1, H, W, 1])
return [att_grid]
#------------------------------------------------------------------------------
|
corefnmn-main
|
models_mnist/modules.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Main class to execute programs using tensorflow fold loom API.
Program execution for explicit visual coreference resolution model in visual
dialog using neural module networks. Uses low-level loom API in tensorflow
fold:
https://github.com/tensorflow/fold/blob/master/tensorflow_fold/g3doc/py/loom.md
for dynamic creation and execution of computation graphs.
Author: Satwik Kottur
"""
import math
import numpy as np
import tensorflow as tf
import tensorflow_fold as td
from tensorflow_fold.public import loom
import models_mnist.modules as lm
from models_mnist.assembler import INVALID_EXPR, _module_output_type
class ProgramExecutor:
def __init__(self, inputs, output_pool, assembler, params) :
"""Initialize program execution subcomponent.
Args:
inputs:
output_pool:
assembler:
params:
"""
self.params = params
# assembler dynamically assembles the graph at run time
self._assembler = assembler
#--------------------------------------------------------------------------
# A. Create loom data inputs
loom_inputs, used_inputs = self._build_loom_inputs(inputs, output_pool)
# B. Create loom data types
types = self._build_loom_types()
self._loom_types = types
# C. Create loom operations
loom_ops_dict = self._build_loom_ops()
self._loom_ops = loom_ops_dict
# create a loom object
keys = ['text', 'image', 'fact', 'time', 'round', 'text_feat']
batch_ins = {types[k]: loom_inputs[k] for k in keys if k in loom_inputs}
self._loom = loom.Loom(batch_inputs=batch_ins, named_ops=loom_ops_dict)
# setup the inputs and outputs
self.outputs = {'context': self.get_loom_output(),
'att': self.get_loom_output(types['attention']),
'logits': self.get_loom_output(types['float'])}
# add invalid prog to used inputs
used_inputs.extend(['prog_validity'])
self.inputs = {ii: inputs[ii] for ii in used_inputs}
# time/round place holder
self.inputs['time'] = loom_inputs['time']
self.inputs['round'] = loom_inputs['round']
def create_weaver(self):
"""Creates a weaver object within the current loom object.
"""
self._weaver = self._loom.make_weaver()
return self._weaver
def get_loom_output(self, type_shape=None):
"""Return the loom output given the type and shape.
"""
# default output is the context vector
if type_shape is None:
type_shape = self._loom_types['context']
return self._loom.output_tensor(type_shape)
#---------------------------------------------------------
def _adjust_text(self, text):
"""
takes text attention output from generator
modifies it to have certain dimensions
"""
params = self.params
# transpose text to have batch first dimension
text_mod = tf.transpose(text, [1, 0, 2])
# split across rounds
shape = text_mod.shape.as_list()
new_size = [-1, params['num_rounds'], shape[1], shape[2]]
return tf.reshape(text_mod, new_size)
def _build_image_feature_network(self, image):
"""
Takes in images and build features for the program
"""
output = image
# local aliases
BN = tf.contrib.layers.batch_norm
max_pool = tf.layers.max_pooling2d
# Four convolutions networks followed by pooling
for ii in range(2):
# Convolutional Layer
output = tf.layers.conv2d(inputs=output, filters=32,
kernel_size=[3, 3], padding="same",
activation=None)
# if batch norm is to be used
output = BN(output, center=True, scale=True,
is_training=self.params['train_mode'])
# Re_lU
output = tf.nn.relu(output, 'relu')
# Pooling Layer
output = max_pool(output, pool_size=[2, 2], strides=2)
for ii in range(2):
# Convolutional Layer
output = tf.layers.conv2d(inputs=output, filters=64,
kernel_size=[3, 3], padding="same",
activation=None)
# if batch norm is to be used
output = BN(output, center=True, scale=True,
is_training=self.params['train_mode'])
# Re_lU
output = tf.nn.relu(output, 'relu')
# Pooling Layer
output = max_pool(output, pool_size=[2, 2], strides=2)
return output
def _build_fact_encoder(self, inputs):
"""
"""
# local alias
params = self.params
with tf.variable_scope(self.params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
# flatten
# embed the words
output = tf.nn.embedding_lookup(embed_mat, inputs['fact'])
# pass through encoder
cell = tf.contrib.rnn.BasicLSTMCell(params['text_embed_size'])
# begin decoding
for ii in range(0, params['num_layers']):
# dynamic rnn
output, states = tf.nn.dynamic_rnn(cell, output,
sequence_length=inputs['fact_len'],
dtype=tf.float32,
scope='fact_layer_%d' % ii)
# split roundwise
fact_embed = states[1]
text_dim = fact_embed.shape.as_list()[-1]
fact_embed = tf.reshape(fact_embed, [-1, params['num_rounds'], text_dim])
return fact_embed
def _build_loom_inputs(self, inputs, output_pool):
'''
Sub routine to build the inputs to loom
'''
# --------- grab required inputs -------------
loom_inputs = {}
params = self.params
# A. image
# build image feature network
image_feat = self._build_image_feature_network(inputs['image'])
loom_inputs['image'], _ = lm.add_spatial_coord_map(image_feat)
used_inputs = ['image']
# B. text -- both question and caption
key = 'ques_attended'
if params['train_mode']:
text = output_pool[key]
else:
text = inputs[key]
used_inputs.append(key)
adjusted_text = self._adjust_text(text)
loom_inputs['text'] = adjusted_text
batch_size = tf.shape(adjusted_text)[0]
# C. Facts
if params['use_fact']:
loom_inputs['fact'] = self._build_fact_encoder(inputs)
used_inputs.extend(['fact', 'fact_len'])
concat_list = [adjusted_text]
loom_inputs['text_feat'] = tf.concat(concat_list, -1)
# D. time steps (internal placeholder)
loom_inputs['time'] = tf.placeholder(tf.int32, (None, 1), 'time')
loom_inputs['round'] = tf.placeholder(tf.int32, (None, 1), 'round')
return loom_inputs, used_inputs
def _build_loom_types(self):
"""Method to build loom types for given setting.
"""
params = self.params
encode_size = params['lstm_size']
# create and save loom types
types = {}
types['time'] = loom.TypeShape('int32', (1,), 'time')
types['round'] = loom.TypeShape('int32', (1,), 'round')
types['float'] = loom.TypeShape('float32', (1,))
types['context'] = loom.TypeShape('float32', (encode_size,), 'context')
types['align'] = loom.TypeShape('float32', (encode_size,), 'align')
size = (params['num_rounds'], params['text_embed_size'])
types['fact'] = loom.TypeShape('float32', size, 'fact')
size = (params['num_rounds'], params['max_dec_len'],
params['text_embed_size'])
types['text'] = loom.TypeShape('float32', size, 'text')
size = (params['text_embed_size'],)
types['text_slice'] = loom.TypeShape('float32', size, 'text_slice')
# this depends on whether we want all features
concat_dim = params['text_embed_size']
size = (params['num_rounds'], params['max_dec_len'], concat_dim)
types['text_feat'] = loom.TypeShape('float32', size, 'text_feat')
size = (concat_dim,)
types['text_feat_slice'] = loom.TypeShape('float32', size, 'text_feat_slice')
# TODO: cleaner way to include spatial dimensions for img_feat
size = (params['h_feat'], params['w_feat'], params['d_feat'] + 2)
types['image'] = loom.TypeShape('float32', size, 'image')
size = (params['h_feat'], params['w_feat'], 1)
types['attention'] = loom.TypeShape('float32', size, 'att')
return types
def _build_loom_ops(self):
"""TODO(satwik): Some helper text here
"""
params = self.params
types = self._loom_types
# create all modules under the same scope
op_params = {'map_dim': params['map_size']}
with tf.variable_scope('loom_modules') as module_scope:
op_params['module_scope'] = module_scope
# creating ops
loom_ops_dict = {}
in_types = [types['float'], types['float']]
out_types = [types['float']]
loom_ops_dict['add'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
loom_ops_dict['divide'] = lm.BinaryLoomOp(in_types, out_types, tf.divide)
in_types = [types['float']]
loom_ops_dict['exp'] = lm.UnaryLoomOp(in_types, out_types, tf.exp)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['add_attention'] = lm.BinaryLoomOp(in_types, out_types, tf.add)
in_types = [types['attention'], types['attention']]
out_types = [types['attention']]
loom_ops_dict['max_attention'] = lm.BinaryLoomOp(in_types, out_types,
tf.maximum)
# basic attention manipulation ops
in_types = [types['attention'], types['float']]
out_types = [types['attention']]
loom_ops_dict['weight_attention'] = lm.AttentionWeightLoomOp(in_types,
out_types)
in_types = [types['text_feat_slice'], types['text_feat_slice'],
types['round'], types['round']]
out_types = [types['float']]
op_params['amalgam_text_feats'] = params['amalgam_text_feats']
op_params['text_embed_size'] = params['text_embed_size']
loom_ops_dict['align_text'] = lm.AlignTextLoomOp(in_types, out_types, op_params)
# slicing ops
in_types = [types['text'], types['round'], types['time']]
out_types = [types['text_slice']]
loom_ops_dict['slice_text'] = lm.SliceTextLoomOp(in_types, out_types)
in_types = [types['text_feat'], types['round'], types['time']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_text_feat'] = lm.SliceTextLoomOp(in_types, out_types)
# slice_answer_embedding
in_types = [types['fact'], types['round']]
out_types = [types['text_feat_slice']]
loom_ops_dict['slice_fact'] = lm.SliceAnswerLoomOp(in_types, out_types)
# normalize and complement
in_types = [types['attention']]
out_types = [types['attention']]
loom_ops_dict['normalize_exclude']= lm.NormalizeExcludeLoomOp(in_types,
out_types)
#------------------------------------------------------------------
# find module
in_types = [types['image'], types['text_slice']]
out_types = [types['attention']]
loom_ops_dict['find'] = lm.FindLoomOp(in_types, out_types, op_params)
# and module
in_types = [types['attention'], types['attention']]
loom_ops_dict['and_op'] = lm.AndLoomOp(in_types, out_types, op_params)
# diff module
loom_ops_dict['diff_op'] = lm.DiffLoomOp(in_types, out_types, op_params)
# transform module
in_types = [types['attention'], types['image'], types['text_slice']]
loom_ops_dict['transform'] = lm.TransformLoomOp(in_types, out_types, op_params)
# describe module
out_types = [types['context']]
op_params['encode_size'] = params['lstm_size']
loom_ops_dict['describe'] = lm.DescribeLoomOp(in_types, out_types, op_params)
# exist module
loom_ops_dict['exist'] = lm.ExistLoomOp(in_types, out_types, op_params)
# count module
loom_ops_dict['count'] = lm.CountLoomOp(in_types, out_types, op_params)
# invalid Module
in_types = [types['image']]
loom_ops_dict['invalid'] = lm.InvalidLoomOp(in_types, out_types, op_params)
return loom_ops_dict
#---------------------------------------------------------
# setters and getters
def get_outputs(self): return self.outputs
def get_inputs(self): return self.inputs
#------------------------------------------------------------
# produce feed dict
def produce_feed_dict(self, batch, output_pool=None, visualize=False):
if 'prog' not in self.params['model']: return
# dynamically assemble the graph, based on predicted tokens
if self.params['train_mode']:
ques_programs = batch['gt_layout']
else:
ques_programs = output_pool['pred_tokens']
tokens = {'ques': ques_programs}
weaver, loom_outputs, invalid_prog \
= self._assembler.assemble(tokens, self, visualize)
# build feed dict from loom
feed_dict = weaver.build_feed_dict(loom_outputs)
# feed invalid Prog
feed_dict[self.inputs['prog_validity']] = np.array(invalid_prog['ques'])
# additional feeds
feed_dict[self.inputs['image']] = batch['imgs']
max_time = self.params['max_dec_len']
feed_dict[self.inputs['time']] = np.arange(max_time).reshape([-1, 1])
round_ranges = np.arange(self.params['num_rounds']).reshape([-1, 1])
feed_dict[self.inputs['round']] = round_ranges
# fact is needed
if self.params['use_fact']:
feed_dict[self.inputs['fact']] = batch['fact']
feed_dict[self.inputs['fact_len']] = batch['fact_len']
if not self.params['train_mode']:
# list of labels to read from output pool conditionally
labels = ['ques_attended', 'ques_enc']
for label in labels:
if label in self.inputs:
feed_dict[self.inputs[label]] = output_pool[label]
return feed_dict
#------------------------------------------------------------
# segregating the outputs
def segregrate_outputs(self, output):
'''
Go over the outputs, cap tokens and ques tokens
'''
ques_tokens = output['pred_tokens']
mod_out_type = _module_output_type
mod_dict = self._assembler.module_names
att = output['att']
weights = output['weight']
# segregrated outputs
sep_att = []
sep_wts = []
wt_labels = []
num_reuse = 0
att_ind = 0
weight_ind = 0
# assume a batch size of 1
for r_id in range(self.params['num_rounds']):
#refer_seen = False
for t_id in range(self.params['max_dec_length']):
cur_module = mod_dict[ques_tokens[t_id, r_id]]
if cur_module == '<eos>':
# even answer has a weight now
if self.params['use_answer'] or self.params['use_fact']:
wt_labels.append('A%d' % r_id)
num_reuse += 1
break
if mod_out_type[cur_module] == 'att':
sep_att.append(('ques', t_id, r_id, att[att_ind]))
att_ind += 1
if cur_module == '_Refer':
refer_seen = True
st = weight_ind
end = weight_ind + num_reuse
sep_wts.append((r_id, weights[st:end], wt_labels))
weight_ind += num_reuse
'''
if self.params['reuse_refer'] and cur_module == '_Refer':
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
if cur_module == '_Find':
if refer_seen and self.params['remove_aux_find']: continue
wt_labels.append('Q%d_%d' % (r_id, t_id))
num_reuse += 1
'''
for arg in sep_wts: assert(abs(np.sum(arg[1]) - 1.0) < 1e-5)
assert(weight_ind == weights.shape[0])
#assert(att_ind == att.shape[0])
return sep_att, sep_wts
|
corefnmn-main
|
models_mnist/executor.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Dataloader file for Visual Dialog experiments.
Explicit visual coreference resolution in visual dialog using neural module
networks.
Author: Satwik Kottur
"""
from __future__ import absolute_import, division, print_function
import h5py
import json
import os
import threading
import queue
import numpy as np
from tqdm import tqdm as progressbar
from util import text_processing, support
class BatchLoaderVD:
"""Subclass to DataReader that serves batches during training.
"""
# adjust for current directory
def _adjust_image_dir(self, path):
# split before data, and append with pwd
return os.path.join(os.getcwd(), 'data', path.split('data/')[-1])
def __init__(self, imdb, params):
"""Initialize by reading the data and pre-processing it.
"""
self.imdb = imdb
self.params = params
self.fetch_options = self.params.get('fetch_options', False)
self.preload_features = params['preload_features']
self.num_inst = len(self.imdb['data'])
self.num_rounds = len(self.imdb['data'][0]['question_ind'])
# check if vgg features are to be used
self.use_vgg = 'vgg' in self.params['feature_path']
# load vocabulary
vocab_path = params['text_vocab_path']
self.vocab_dict = text_processing.VocabDict(vocab_path)
self.T_encoder = params['max_enc_len']
# record special token ids
self.start_token_id = self.vocab_dict.word2idx('<start>')
self.end_token_id = self.vocab_dict.word2idx('<end>')
self.pad_token_id = self.vocab_dict.word2idx('<pad>')
# peek one example to see whether answer and gt_layout are in the data
test_data = self.imdb['data'][0]
self.load_gt_layout = test_data.get('gt_layout_tokens', False)
if 'load_gt_layout' in params:
self.load_gt_layout = params['load_gt_layout']
# decide whether or not to load gt textatt
self.supervise_attention = params['supervise_attention']
self.T_decoder = params['max_dec_len']
self.assembler = params['assembler']
# load one feature map to peek its size
feats = np.load(self._adjust_image_dir(test_data['feature_path']))
self.feat_H, self.feat_W, self.feat_D = feats.shape[1:]
# convert to tokens
self.digitizer = lambda x: [self.vocab_dict.word2idx(w) for w in x]
if 'prog' in self.params['model']:
# preload features
if self.preload_features:
img_paths = set([ii['feature_path'] for ii in self.imdb['data']])
self.img_feats = {ii:np.load(ii) for ii in progressbar(img_paths)}
# if VGG is to be used
if self.use_vgg:
# inform the dataloader to use self.img_feats
self.preload_features = True
img_paths = set([ii['feature_path'] for ii in self.imdb['data']])
# first read the index file
index_file = os.path.join(self.params['input_img'], 'img_id.json')
with open(index_file, 'r') as file_id:
index_data = json.load(file_id)
# get the split -- either train / val
for ii in img_paths: break
split = ii.split('/')[-2][:-4]
# read the features for that particular split
self.img_index = {img_id: index for index, img_id
in enumerate(index_data[split])}
feature_file = os.path.join(self.params['input_img'],
'data_img_%s.h5' % split)
key = 'images_test' if split == 'val' else 'images_train'
self.img_feats = h5py.File(feature_file)[key]
# check if all the images in img_paths are in img_index
count = 0
for ii in img_paths:
img_id = '/'.join(ii.split('/')[-2:])
if img_id.replace('npy', 'jpg') not in self.img_index:
count += 1
print('Missing: %d image features' % count)
# adjust the feature sizes
self.feat_H, self.feat_W, self.feat_D = self.img_feats.shape[1:]
self.zero_feature = np.zeros((1,) + self.img_feats.shape[1:])
# use history if needed by the program generator
self.use_history = self.params['generator'] == 'mem'
if self.use_history:
self._construct_history()
# if fact is to be used
if self.params['use_fact']:
self._construct_fact()
#--------------------------------------------------------------------------
def _construct_fact(self):
"""Method to construct facts.
Facts are previous question and answers strings concatenated as one. These
serve as memory units that the model can refer back to.
For example, 'Q: What is the man wearing? A: Sweater.' will have a fact
'What is the man wearing? Sweater.' so that the model can address follow-up
questions like 'What color is it?' by referring to this fact.
"""
print('Constructing facts..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
fact = np.zeros((num_diags, num_rounds, max_len))
fact_len = np.zeros((num_diags, num_rounds))
fact.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans, a_len = self.imdb['ans'][a_id], self.imdb['ans_len'][a_id]
# handle overflow
bound = min(q_len, max_len)
fact[diag_id, r_id, :bound] = ques[:bound]
if bound < max_len:
bound = min(q_len + a_len, max_len)
fact[diag_id, r_id, q_len:bound] = ans[:bound-q_len]
fact_len[diag_id, r_id] = bound
# flatten
self.imdb['fact'] = fact
self.imdb['fact_len'] = fact_len
#--------------------------------------------------------------------------
def _construct_history(self):
"""Method to construct history, which concatenates entire dialogs so far.
"""
print('Constructing history..')
num_diags = len(self.imdb['data'])
max_len = self.T_encoder * 2 # question + answer appended
num_rounds = len(self.imdb['data'][0]['question_ind'])
history = np.zeros((num_diags, num_rounds, max_len))
hist_len = np.zeros((num_diags, num_rounds))
history.fill(self.pad_token_id)
for diag_id, datum in enumerate(self.imdb['data']):
# history for first round is caption
c_id = datum['caption_ind']
cap_len = self.imdb['cap_len'][c_id]
caption = self.imdb['cap'][c_id]
# handle overflow
bound = min(cap_len, max_len)
hist_len[diag_id, 0] = bound
history[diag_id, 0, :bound] = caption[:bound]
for r_id in range(num_rounds - 1):
q_id = datum['question_ind'][r_id]
a_id = datum['answer_ind'][r_id]
ques, q_len = self.imdb['ques'][q_id], self.imdb['ques_len'][q_id]
ans, a_len = self.imdb['ans'][a_id], self.imdb['ans_len'][a_id]
# handle overflow
bound = min(q_len, max_len)
history[diag_id, r_id + 1, :bound] = ques[:bound]
if bound < max_len:
bound = min(q_len + a_len, max_len)
history[diag_id, r_id + 1, q_len:bound] = ans[:bound-q_len]
hist_len[diag_id, r_id + 1] = bound
self.imdb['hist'] = history
self.imdb['hist_len'] = hist_len
#--------------------------------------------------------------------------
def load_one_batch(self, sample_ids):
"""Load data given the sample ids.
"""
actual_batch_size = len(sample_ids)
batch = {}
# replace question _Find with _Refer
find_module_token = self.assembler.name2idx_dict['_Find']
#refer_module_token = self.assembler.name2idx_dict['_Refer']
eos_token = self.assembler.name2idx_dict['<eos>']
# whether to flatten or not
flatten = 'dial' not in self.params['model']
flatten = 'nmn-cap' not in self.params['model']
num_rounds = self.num_rounds
# captions
if flatten:
cap_inds = [self.imdb['data'][ii]['caption_ind'] for ii in sample_ids
for _ in range(num_rounds)]
else:
cap_inds = [self.imdb['data'][ii]['caption_ind'] for ii in sample_ids]
cap_batch = self.imdb['cap'][cap_inds][:, :self.T_encoder]
cap_len = self.imdb['cap_len'][cap_inds]
# get caption programs
cap_prog = None
cap_gt_att = None
if 'nmn-cap' in self.params['model']:
cap_prog = np.zeros((self.T_decoder, len(cap_inds)), np.int32)
cap_prog.fill(eos_token)
for spot, ii in enumerate(cap_inds):
layout = self.imdb['cap_prog'][ii]
cap_prog[:, spot] = \
self.assembler.module_list2tokens(layout, self.T_decoder)
# also get attention for supervision
if self.supervise_attention:
cap_gt_att = np.zeros((self.T_decoder, self.T_encoder, \
actual_batch_size, 1), np.float32)
for spot, ii in enumerate(cap_inds):
for t_id, att in enumerate(self.imdb['cap_prog_att'][ii]):
span = att[1] - att[0]
# NOTE: number of attention hardwired to be <= 4
if span > 0 or span == 0: continue
if span == 0: continue
cap_gt_att[t_id, att[0]:att[1], spot] = 1/span
# questions
ques_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_ind']]
ques_batch = self.imdb['ques'][ques_inds][:, :self.T_encoder].transpose()
ques_len = self.imdb['ques_len'][ques_inds]
ques_ids = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['question_id']]
gt_index = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['gt_ind']]
# answers
ans_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['answer_ind']]
ans_batch_in = self.imdb['ans_in'][ans_inds][:, :self.T_encoder]
ans_batch_out = self.imdb['ans_out'][ans_inds][:, :self.T_encoder]
ans_batch = self.imdb['ans_in'][ans_inds][:, 1:self.T_encoder]
ans_len = self.imdb['ans_len'][ans_inds]
# getting history
if self.use_history:
history = self.imdb['hist'][sample_ids]
hist_len = self.imdb['hist_len'][sample_ids]
else:
history, hist_len = None, None
# image features
if 'prog' in self.params['model']:
# single copy per conversation
image_feats = np.zeros((actual_batch_size, self.feat_H,
self.feat_W, self.feat_D), np.float32)
else:
image_feats = None
image_path = [None] * actual_batch_size
# load fact
if self.params['use_fact']:
fact = self.imdb['fact'][sample_ids]
fact_len = self.imdb['fact_len'][sample_ids]
# flatten
fact = np.reshape(fact, [-1, fact.shape[-1]])
fact_len = np.reshape(fact_len, [-1])
else:
fact, fact_len = None, None
# programs
if self.load_gt_layout:
gt_layout_batch = np.zeros((self.T_decoder,
num_rounds * actual_batch_size), np.int32)
gt_layout_batch.fill(eos_token)
gt_attention = None
if self.supervise_attention:
gt_attention = np.zeros((self.T_decoder, self.T_encoder,
num_rounds * actual_batch_size, 1), np.float32)
# mask for weights, for history attention
weight_mask = []
for n in range(len(sample_ids)):
iminfo = self.imdb['data'][sample_ids[n]]
# image features
if 'prog' in self.params['model']:
# if VGG features are to be used
if self.use_vgg:
img_id = '/'.join(iminfo['feature_path'].split('/')[-2:])
img_id = img_id.replace('npy', 'jpg')
if img_id in self.img_index:
f_ind = self.img_index[img_id]
cur_feat = self.img_feats[f_ind]
else:
cur_feat = self.zero_feature
else:
# use preloaded image features
feat_path = self._adjust_image_dir(iminfo['feature_path'])
if not self.preload_features: cur_feat = np.load(feat_path)
else: cur_feat = self.img_feats[feat_path]
# single copy per conversation
image_feats[n] = cur_feat
image_path[n] = iminfo['image_path']
# programs
if self.load_gt_layout:
# go over all the questions
for r_id, layout in enumerate(iminfo['gt_layout_tokens']):
gt_layout_batch[:, num_rounds * n + r_id] = \
self.assembler.module_list2tokens(layout, self.T_decoder)
if self.supervise_attention:
num_refers = 0
for r_id, att in enumerate(iminfo['gt_layout_att']):
for t_id in range(att.shape[0]):
index = num_rounds * n + r_id
span = att[t_id, 1] - att[t_id, 0]
# NOTE: number of attention timesteps hardwired to be <= 4
if span > 4 or span == 0: continue
gt_attention[t_id, att[t_id,0]:att[t_id,1], index] = 1/span
# if options are not needed, continue
if not self.fetch_options: continue
#------------------------------------------------------------------
# get options
opt_inds = [jj for ii in sample_ids
for jj in self.imdb['data'][ii]['option_ind']]
num_options = len(opt_inds[0])
opt_batch_in = [None] * num_options
opt_batch_out = [None] * num_options
opt_len = [None] * num_options
for ii in range(num_options):
cur_inds = [jj[ii] for jj in opt_inds]
opt_batch_in[ii] = self.imdb['ans_in'][cur_inds][:, :self.T_encoder]
opt_batch_out[ii] = self.imdb['ans_out'][cur_inds][:, :self.T_encoder]
opt_len[ii] = self.imdb['ans_len'][cur_inds]
#------------------------------------------------------------------
batch = {'ques': ques_batch, 'ques_len': ques_len,
'ques_id': ques_ids, 'gt_layout': gt_layout_batch,
'gt_att' : gt_attention,
'cap': cap_batch, 'cap_len': cap_len, 'cap_prog': cap_prog,
'cap_att': cap_gt_att,
'hist': history, 'hist_len': hist_len, 'ans_in': ans_batch_in,
'ans_out': ans_batch_out, 'ans_len':ans_len, 'ans': ans_batch,
'fact': fact, 'fact_len': fact_len,
'img_feat': image_feats, 'img_path': image_path}
#------------------------------------------------------------------
# further add options
if self.fetch_options:
options = {'opt_in': opt_batch_in, 'opt_out': opt_batch_out,\
'opt_len': opt_len, 'gt_ind': gt_index}
batch.update(options)
#------------------------------------------------------------------
if 'nmn-cap' not in self.params['model']:
return batch
# getting data for training alignment on caption
if actual_batch_size > 1:
info = [batch['cap'], batch['cap_len'],
batch['cap_prog'].transpose()]
if batch['cap_att'] is not None:
info.append(batch['cap_att'].transpose((2, 0, 1, 3)))
shuffled = support.shuffle(info, actual_batch_size)
batch['sh_cap'], batch['sh_cap_len'] = shuffled[:2]
batch['sh_cap_prog'] = shuffled[2].transpose()
batch['align_gt'] = np.ones(num_rounds*actual_batch_size).astype('int32')
if batch['cap_att'] is not None:
batch['sh_cap_att'] = shuffled[3].transpose((1, 2, 0, 3))
for ii in range(actual_batch_size):
start = num_rounds * ii + num_rounds // 2
end = num_rounds * (ii+1)
batch['align_gt'][start:end] = 0
else:
batch['sh_cap'] = np.tile(batch['cap'], [num_rounds, 1])
batch['sh_cap_len'] = np.tile(batch['cap_len'], [num_rounds])
batch['sh_cap_prog'] = np.tile(batch['cap_prog'], [1, num_rounds])
batch['sh_cap_att'] = np.tile(batch['cap_att'], [1, 1, num_rounds, 1])
batch['align_gt'] = np.ones(num_rounds*actual_batch_size).astype('int32')
return batch
class DataReader:
"""Main dataloader class for experiments on Visual Dialog.
"""
def __init__(self, params):
imdb_path = params['path']
print('Loading imdb from: %s' % imdb_path)
if imdb_path.endswith('.npy'): imdb = np.load(imdb_path)
else: raise Type_error('unknown imdb format.')
self.imdb = imdb[()]
self.shuffle = params.get('shuffle', True)
self.one_pass = params.get('one_pass', False)
self.prefetch_num = params.get('num_prefetch', 8)
self.params = params
copy_args = {'max_enc_len', 'max_dec_len', 'text_vocab_path', 'model',
'batch_size', 'use_fact', 'preload_features',
'supervise_attention','generator', 'feature_path'}
self.params.update({ii: params['args'][ii] for ii in copy_args
if ii in params['args'] and
params['args'][ii] is not None})
# VD data loader
self.batch_loader = BatchLoaderVD(self.imdb, self.params)
# Start prefetching thread
self.prefetch_queue = queue.Queue(maxsize=self.prefetch_num)
self.prefetch_thread = threading.Thread(target=_run_prefetch,
args=(self.prefetch_queue, self.batch_loader, self.imdb,
self.shuffle, self.one_pass, self.params))
self.prefetch_thread.daemon = True
self.prefetch_thread.start()
def batches(self):
while True:
# Get a batch from the prefetching queue
if self.prefetch_queue.empty(): pass
#print('data reader: waiting for data loading (IO is slow)...')
batch = self.prefetch_queue.get(block=True)
if batch is None:
assert(self.one_pass)
print('data reader: one pass finished')
raise StopIteration()
yield batch
def _run_prefetch(prefetch_queue, batch_loader, imdb, shuffle,
one_pass, params):
num_samples = len(imdb['data'])
batch_size = params['batch_size']
n_sample = 0
fetch_order = np.arange(num_samples)
while True:
# Shuffle the sample order for every epoch
if n_sample == 0 and shuffle:
fetch_order = np.random.permutation(num_samples)
# Load batch from file
# note that len(sample_ids) <= batch_size, not necessarily equal
sample_ids = fetch_order[n_sample:n_sample+batch_size]
batch = batch_loader.load_one_batch(sample_ids)
prefetch_queue.put(batch, block=True)
n_sample += len(sample_ids)
if n_sample >= num_samples:
# Put in a None batch to indicate a whole pass is over
if one_pass:
prefetch_queue.put(None, block=True)
n_sample = 0
|
corefnmn-main
|
loader_vd/data_reader.py
|
"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to read command line flags.
Uses argparse library to read command line flags.
Author: Satwik Kottur
"""
import argparse
import os
import pdb
from util import support
# read command line arguments
def read_command_line():
title = 'Train explicit coreference resolution visual dialog model'
parser = argparse.ArgumentParser(description=title)
#-------------------------------------------------------------------------
# data input settings
parser.add_argument('--dataset', default='visdial_v0.9_tiny',
help='Visdial dataset type')
parser.add_argument('--data_root', default='data/',
help='Root to the data')
parser.add_argument('--feature_path', default='data/resnet_res5c/',
help='Path to the image features')
parser.add_argument('--text_vocab_path', default='',
help='Path to the vocabulary for text')
parser.add_argument('--prog_vocab_path', default='',
help='Path to the vocabulary for programs')
parser.add_argument('--snapshot_path', default='checkpoints/',
help='Path to save checkpoints')
#--------------------------------------------------------------------------
# specify encoder/decoder
parser.add_argument('--model', default='nmn-cap-prog-only',
help='Name of the model, will be changed later')
parser.add_argument('--generator', default='ques',
help='Name of the generator to use (ques | memory)')
parser.add_argument('--decoder', default='gen',
help='Name of the decoder to use (gen | disc)')
parser.add_argument('--preload_features', default=False, type=bool,
help='Preload visual features on RAM')
#-------------------------------------------------------------------------
# model hyperparameters
parser.add_argument('--h_feat', default=14, type=int,
help='Height of visual conv feature')
parser.add_argument('--w_feat', default=14, type=int,
help='Width of visual conv feature')
parser.add_argument('--d_feat', default=2048, type=int,
help='Size of visual conv feature')
parser.add_argument('--text_embed_size', default=300, type=int,
help='Size of embedding for text')
parser.add_argument('--map_size', default=1024, type=int,
help='Size of the final mapping')
parser.add_argument('--prog_embed_size', default=300, type=int,
help='Size of embedding for program tokens')
parser.add_argument('--lstm_size', default=1000, type=int,
help='Size of hidden state in LSTM')
parser.add_argument('--enc_dropout', default=True, type=bool,
help='Dropout in encoder')
parser.add_argument('--dec_dropout', default=True, type=bool,
help='Dropout in decoder')
parser.add_argument('--num_layers', default=2, type=int,
help='Number of layers in LSTM')
parser.add_argument('--max_enc_len', default=24, type=int,
help='Maximum encoding length for sentences (ques|cap)')
parser.add_argument('--max_dec_len', default=14, type=int,
help='Maximum decoding length for programs (ques|cap)')
parser.add_argument('--dec_sampling', default=False, type=bool,
help='Sample while decoding programs vs argmax')
#---------------------------------------------------------------------------
parser.add_argument('--use_refer', dest='use_refer', action='store_true',
help='Flag to use Refer for coreference resolution')
parser.set_defaults(use_refer=False)
parser.add_argument('--use_fact', dest='use_fact', action='store_true',
help='Flag to use the fact in coreference pool')
parser.set_defaults(use_fact=False)
parser.add_argument('--supervise_attention', dest='supervise_attention',
action='store_true',
help='Flag to supervise attention for the modules')
parser.set_defaults(supervise_attention=False)
parser.add_argument('--amalgam_text_feats', dest='amalgam_text_feats',
action='store_true',
help='Flag to amalgamate text features')
parser.set_defaults(amalgam_text_feats=False)
parser.add_argument('--no_cap_alignment', dest='cap_alignment',
action='store_false',
help='Use the auxiliary caption alignment loss')
parser.set_defaults(cap_alignment=True)
#-------------------------------------------------------------------------
# optimization params
parser.add_argument('--batch_size', default=20, type=int,
help='Training batch size (adjust based on GPU memory)')
parser.add_argument('--learning_rate', default=1e-3, type=float,
help='Learning rate for training')
parser.add_argument('--dropout', default=0.5, type=float, help='Dropout')
parser.add_argument('--num_epochs', default=20, type=int,
help='Maximum number of epochs to run training')
parser.add_argument('--gpu_id', type=int, default=0,
help='GPU id to use for training, -1 for CPU')
#-------------------------------------------------------------------------
try:
parsed_args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if parsed_args['gpu_id'] < 0 else str(parsed_args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# pretty print arguments and return
support.pretty_print_dict(parsed_args)
return parsed_args
|
corefnmn-main
|
exp_vd/options.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to visualize trained Visual Dialog model using supervised learning.
Visualizes visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_vd/visualize_sl.py --gpu_id=0 --test_split='val' \
--checkpoint='checkpoints/model_epoch_005.tmodel' --batch_size 1
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# Read command line options.
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True, help="Checkpoint to load")
parser.add_argument('--batch_size', type=int, default=10,
help='Batch size for visualization')
parser.add_argument('--test_split', default='val',
help='Split to run visualization')
parser.add_argument('--gpu_id', type=int, default=0)
parser.add_argument('--num_instances', type=int, default=50)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# Set the cuda environment variable for the gpu to use.
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
args['preload_feats'] = False
args['supervise_attention'] = False
print('Current model: ' + args['model'])
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler,
'fetch_options': True}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# for models trained later
if 'num_rounds' not in eval_params:
eval_params['num_rounds'] = val_loader.batch_loader.num_rounds
# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
cur_iter = 0
to_save = {'output': [], 'batch': []}
for batch in progressbar(val_loader.batches(), total=args['num_instances']):
_, outputs = model.run_visualize_iteration(batch, sess)
to_save['output'].append(outputs)
to_save['batch'].append(batch)
cur_iter += 1
if cur_iter >= args['num_instances']:
break
# Save the output + batch
batch_path = '{0}.{1}_batches.npy'.format(args['checkpoint'],
args['num_instances'])
support.save_batch(to_save, batch_path)
|
corefnmn-main
|
exp_vd/visualize_sl.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to evaluate trained Visual Dialog model using supervised learning.
Evaluates visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage:
python -u exp_vd/eval_sl.py --gpu_id=0 --test_split='val' \
--checkpoint='checkpoints/model_epoch_005.tmodel'
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# Read command line options.
parser = argparse.ArgumentParser()
parser.add_argument('--checkpoint', required=True, help="Checkpoint to load")
parser.add_argument('--test_split', default='val',
help='Split to run evaluation')
parser.add_argument('--gpu_id', type=int, default=0)
try:
args = vars(parser.parse_args())
except (IOError) as msg:
parser.error(str(msg))
# set the cuda environment variable for the gpu to use
gpu_id = '' if args['gpu_id'] < 0 else str(args['gpu_id'])
print('Using GPU id: %s' % gpu_id)
os.environ['CUDA_VISIBLE_DEVICES'] = gpu_id
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# read the train args from checkpoint
param_path = args['checkpoint'].replace('.tmodel', '_params.json')
with open(param_path, 'r') as file_id:
saved_args = json.load(file_id)
saved_args.update(args)
args = saved_args
args['preload_feats'] = False
# no supervision is needed
args['supervise_attention'] = False
# adjust for complex models
args['batch_size'] = min(args['batch_size'], 10)
support.pretty_print_dict(args)
# Data files
root = args['data_root']
imdb_path_val = os.path.join(root, 'imdb_%s.npy' % args['test_split'])
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# dataloader for val
input_dict = {'path': imdb_path_val, 'shuffle': False, 'one_pass': True,
'args': args, 'assembler': question_assembler,
'fetch_options': True}
val_loader = DataReader(input_dict)
# model for training
eval_params = args.copy()
eval_params['use_gt_prog'] = False # for training
eval_params['enc_dropout'] = False
eval_params['dec_dropout'] = False
eval_params['dec_sampling'] = False # do not sample, take argmax
# for models trained later
if 'num_rounds' not in eval_params:
eval_params['num_rounds'] = val_loader.batch_loader.num_rounds
# model for evaluation
# create another assembler of caption
model = CorefNMN(eval_params, assemblers)
# Load snapshot
print('Loading checkpoint from: %s' % args['checkpoint'])
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
snapshot_saver.restore(sess, args['checkpoint'])
print('Evaluating on %s' % args['test_split'])
ranks = []
matches = []
total_iter = int(val_loader.batch_loader.num_inst / args['batch_size'])
num_iters = 0
# get confusion matrix only if using refer
confusion_mat = np.zeros((2, 2))
if args['use_refer']:
refer_token = question_assembler.name2idx_dict['_Refer']
find_token = question_assembler.name2idx_dict['_Find']
for batch in progressbar(val_loader.batches(), total=total_iter):
batch_ranks, outputs = model.run_evaluate_iteration(batch, sess)
ranks.append(batch_ranks)
if 'matches' in outputs: matches.append(outputs['matches'])
# debug, get confusion between find/refer
if args['use_refer']:
find_gt = batch['gt_layout'] == find_token
refer_gt = batch['gt_layout'] == refer_token
find_pred = outputs['pred_tokens'] == find_token
refer_pred = outputs['pred_tokens'] == refer_token
confusion_mat[0, 0] += np.sum(find_pred & find_gt)
confusion_mat[0, 1] += np.sum(refer_pred & find_gt)
confusion_mat[1, 0] += np.sum(find_pred & refer_gt)
confusion_mat[1, 1] += np.sum(refer_pred & refer_gt)
try:
if len(matches) > 0:
matches = np.concatenate(matches)
percent = 100*np.sum(matches) / matches.size
print('Program accuracy: %f percent\n' % percent)
except:
pass
# print confusion matrix
print(confusion_mat)
# save the ranks
param_path = args['checkpoint'].replace('.tmodel', '_rankdump.npy')
np.save(param_path, np.hstack(ranks))
metrics = metrics.compute_metrics(np.hstack(ranks))
|
corefnmn-main
|
exp_vd/eval_sl.py
|
r"""Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Portions of the source code are from the n2nmn project which
notice below and in LICENSE.n2nmn in the root directory of
this source tree.
Copyright (c) 2017, Ronghang Hu
All rights reserved.
Script to train Visual Dialog model using supervised learning.
Trains visual dialog model that performs explicit visual coreference resolution
using neural module networks. Additional details are in the paper:
Visual Coreference Resolution in Visual Dialog using Neural Module Networks
Satwik Kottur, José M. F. Moura, Devi Parikh, Dhruv Batra, Marcus Rohrbach
European Conference on Computer Vision (ECCV), 2018
Usage (check scripts/run_train.sh):
python -u exp_vd/train_sl.py --gpu_id=0 --dataset='visdial_v0.9' \
--data_root='data/' --model='nmn-cap-prog-only' --batch_size=5 \
--use_refer --use_fact --generator='mem' --feature_path='data/' \
--learning_rate=0.0001 --amalgam_text_feats \
--decoder='disc' --lstm_size 512
"""
from __future__ import absolute_import, division, print_function
import argparse
import json
import os
import sys
import time
from tqdm import tqdm as progressbar
import numpy as np
import tensorflow as tf
from exp_vd import options
# read command line options
args = options.read_command_line()
# Start the session BEFORE importing tensorflow_fold
# to avoid taking up all GPU memory
tf_config = tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True),
allow_soft_placement=False,
log_device_placement=False)
sess = tf.Session(config=tf_config)
from models_vd.assembler import Assembler
from models_vd.model import CorefNMN
from loader_vd.data_reader import DataReader
from util import metrics
from util import support
# setting random seeds
np.random.seed(1234)
tf.set_random_seed(1234)
# Data files
glove_mat_path = args['data_root'] + 'vocabulary_vd_glove.npy'
args['data_root'] = os.path.join(args['data_root'], args['dataset'])
args['text_vocab_path'] = os.path.join(args['data_root'], 'vocabulary_vd.txt')
root = args['data_root']
if args['use_refer']:
# use refer module
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_5.txt')
else:
# no explicit refer module
args['prog_vocab_path'] = os.path.join(root, 'vocabulary_layout_4.txt')
imdb_path_train = os.path.join(root, 'imdb_train.npy')
# assemblers for question and caption programs
question_assembler = Assembler(args['prog_vocab_path'])
caption_assembler = Assembler(args['prog_vocab_path'])
assemblers = {'ques': question_assembler, 'cap': caption_assembler}
# Dataloader for train
input_dict = {'path': imdb_path_train, 'shuffle': True, 'one_pass': False,
'assembler': question_assembler, 'use_count': False,
'args': args}
if args['decoder'] == 'disc':
input_dict['fetch_options'] = True
train_loader = DataReader(input_dict)
# model params for training
train_params = args.copy()
# use the ground truth program for training
train_params['use_gt_prog'] = True
train_params['text_vocab_size'] = train_loader.batch_loader.vocab_dict.num_vocab
train_params['prog_vocab_size'] = len(question_assembler.module_names)
train_params['pad_id'] = train_loader.batch_loader.vocab_dict.word2idx('<pad>')
train_params['num_rounds'] = train_loader.batch_loader.num_rounds
print('Using a vocab size: %d' % train_params['text_vocab_size'])
# model for training
model = CorefNMN(train_params, assemblers)
model.setup_training()
# train with Adam, optimization ops
solver = tf.train.AdamOptimizer(learning_rate=train_params['learning_rate'])
gradients = solver.compute_gradients(model.get_total_loss())
# clip gradients based on value
gradients = [(tf.clip_by_value(g, -2.0, 2.0), v) if g is not None else (g, v)
for g, v in gradients]
solver_op = solver.apply_gradients(gradients)
# add it to the output
model.add_solver_op(solver_op)
# adjust snapshot to have a time stamp folder
cur_time = time.strftime('%a-%d%b%y-%X', time.gmtime())
args['snapshot_path'] = os.path.join(args['snapshot_path'], cur_time)
os.makedirs(args['snapshot_path'], exist_ok=True)
snapshot_saver = tf.train.Saver(max_to_keep=None) # keep all snapshots
print('Saving checkpoints at: %s' % args['snapshot_path'])
# initialize all variables
sess.run(tf.global_variables_initializer())
# load glove vectors for embedding
glove_mat_path = os.path.join(args['data_root'], 'vocabulary_vd_glove.npy')
glove_mat = np.load(glove_mat_path)
with tf.variable_scope(train_params['embed_scope'], reuse=True):
embed_mat = tf.get_variable('embed_mat')
sess.run(tf.assign(embed_mat, glove_mat))
#------------------------------------------------------------------------------
# forget about embed and module scopes
del train_params['embed_scope']
if 'module_scope' in train_params:
del train_params['module_scope']
#-------------------------------------------------------------------------
print('Running training iteration..')
num_iter_per_epoch = int(train_loader.batch_loader.num_inst/args['batch_size'])
print('Number of iterations per epoch: %d' % num_iter_per_epoch)
# exponential smoothing for loss
smoother = metrics.ExponentialSmoothing()
for n_iter, batch in enumerate(train_loader.batches()):
# add epoch and iteration
epoch = float(n_iter) / num_iter_per_epoch
batch['epoch'] = epoch
batch['n_iter'] = n_iter
if n_iter >= args['num_epochs'] * num_iter_per_epoch:
break
# perform training iteration
losses, _ = model.run_train_iteration(batch, sess)
losses = smoother.report(losses)
# printing log
if n_iter % 10 == 0:
cur_time = time.strftime('%a %d%b%y %X', time.gmtime())
print_format = ('[%s][It: %d][Ep: %.2f][Loss: %.3f ' +
'Prog: %.3f Ans: %.3f Align: %.3f]')
print_info = (cur_time, n_iter, epoch, losses['total'], losses['prog'],
losses['ans'], losses['align'])
print(print_format % print_info)
# save snapshot after every epoch
if n_iter % num_iter_per_epoch == 0:
epoch = float(n_iter) / num_iter_per_epoch
# Save snapshot at every epoch
file_name = 'model_epoch_%03d.tmodel' % epoch
snapshot_path = os.path.join(args['snapshot_path'], file_name)
snapshot_saver.save(sess, snapshot_path, write_meta_graph=False)
# also save the arguments
params_path = snapshot_path.replace('.tmodel', '_params.json')
with open(params_path, 'w') as file_id:
json.dump(train_params, file_id)
print('Snapshot saved to: ' + snapshot_path)
#-------------------------------------------------------------------------
|
corefnmn-main
|
exp_vd/train_sl.py
|
# -*- coding: utf-8 -*-
"""HyenaDNA training & inference example (Public)
This code is adapted from the original colab tutorial on HyenaDNA. Check that out for an easier entry point into the code.
We provide the code here as an example for those who want something outside collab, with Huggingface integration.
Original file is located at
https://colab.research.google.com/drive/1wyVEQd4R3HYLTUOXEEQmp_I8aNC_aLhL
"""
#@title Imports
# for HyenaDNA specifically
import torch
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial
from einops import rearrange
from typing import Optional
from functools import partial
from torch import Tensor
from torchvision.ops import StochasticDepth
from collections import namedtuple
import numpy as np
import os
import json
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
"""# HyenaDNA
"""
#@title Hyena layer
def fftconv(u, k, D):
"""
We apply a convolution through the fourier domain (from the Convolution Theorem)
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
if len(u.shape) > 3: k_f = k_f.unsqueeze(1)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return out.to(dtype=u.dtype)
@torch.jit.script
def mul_sum(q, y):
return (q * y).sum(dim=1)
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
class Sin(nn.Module):
"""The Sin activation function for the Hyena Filter function."""
def __init__(self, dim, w=10, train_freq=True):
super().__init__()
self.freq = nn.Parameter(w * torch.ones(1, dim)) if train_freq else w * torch.ones(1, dim)
def forward(self, x):
return torch.sin(self.freq * x)
class PositionalEmbedding(OptimModule):
def __init__(self, emb_dim: int, seq_len: int, lr_pos_emb: float=1e-5, **kwargs):
"""Complex exponential positional embeddings for Hyena filters."""
super().__init__()
self.seq_len = seq_len
# The time embedding fed to the filteres is normalized so that t_f = 1
t = torch.linspace(0, 1, self.seq_len)[None, :, None] # 1, L, 1
if emb_dim > 1:
bands = (emb_dim - 1) // 2
# To compute the right embeddings we use the "proper" linspace
t_rescaled = torch.linspace(0, seq_len - 1, seq_len)[None, :, None]
w = 2 * math.pi * t_rescaled / seq_len # 1, L, 1
f = torch.linspace(1e-4, bands - 1, bands)[None, None]
z = torch.exp(-1j * f * w)
z = torch.cat([t, z.real, z.imag], dim=-1)
self.register("z", z, lr=lr_pos_emb)
self.register("t", t, lr=0.0)
def forward(self, L):
return self.z[:, :L], self.t[:, :L]
class ExponentialModulation(OptimModule):
"""The window function applied to the output of the (MLP) filter function."""
def __init__(
self,
d_model,
fast_decay_pct=0.3,
slow_decay_pct=1.5,
target=1e-2,
modulation_lr=0.0,
modulate: bool=True,
shift: float = 0.05,
**kwargs
):
super().__init__()
self.modulate = modulate
self.shift = shift
max_decay = math.log(target) / fast_decay_pct
min_decay = math.log(target) / slow_decay_pct
deltas = torch.linspace(min_decay, max_decay, d_model)[None, None]
self.register("deltas", deltas, lr=modulation_lr)
def forward(self, t, x):
if self.modulate:
decay = torch.exp(-t * self.deltas.abs())
x = x * (decay + self.shift)
return x
class HyenaFilter(OptimModule):
def __init__(
self,
d_model,
emb_dim=3, # dim of input to MLP, augments with positional encoding
order=16, # width of the implicit MLP
fused_fft_conv=False,
seq_len=1024,
lr=1e-3,
lr_pos_emb=1e-5,
dropout=0.0,
w=1, # frequency of periodic activations
wd=0, # weight decay of kernel parameters
bias=True,
num_inner_mlps=2,
normalized=False,
**kwargs
):
"""
Implicit long filter with modulation.
Args:
d_model: number of channels in the input
emb_dim: dimension of the positional encoding (`emb_dim` - 1) // 2 is the number of bands
order: width of the FFN
num_inner_mlps: number of inner linear layers inside filter MLP
Note:
filter_dropout is not implemented
"""
super().__init__()
self.d_model = d_model
self.use_bias = bias
self.fused_fft_conv = fused_fft_conv
self.bias = nn.Parameter(torch.randn(self.d_model))
self.dropout = nn.Dropout(dropout)
act = Sin(dim=order, w=w)
self.emb_dim = emb_dim
assert emb_dim % 2 != 0 and emb_dim >= 3, "emb_dim must be odd and greater or equal to 3 (time, sine and cosine)"
self.seq_len = seq_len
self.pos_emb = PositionalEmbedding(emb_dim, seq_len, lr_pos_emb)
self.implicit_filter = nn.Sequential(
nn.Linear(emb_dim, order),
act,
)
for i in range(num_inner_mlps):
self.implicit_filter.append(nn.Linear(order, order))
self.implicit_filter.append(act)
self.implicit_filter.append(nn.Linear(order, d_model, bias=False))
self.modulation = ExponentialModulation(d_model, **kwargs)
self.normalized = normalized
for c in self.implicit_filter.children():
for name, v in c.state_dict().items():
optim = {"weight_decay": wd, "lr": lr}
setattr(getattr(c, name), "_optim", optim)
def filter(self, L, *args, **kwargs):
z, t = self.pos_emb(L)
h = self.implicit_filter(z)
h = self.modulation(t, h)
return h
def forward(self, x, L, k=None, bias=None, *args, **kwargs):
if k is None: k = self.filter(L)
# Ensure compatibility with filters that return a tuple
k = k[0] if type(k) is tuple else k
y = fftconv(x, k, bias)
return y
class HyenaOperator(nn.Module):
def __init__(
self,
d_model,
l_max,
order=2,
filter_order=64,
dropout=0.0,
filter_dropout=0.0,
**filter_args,
):
r"""
Hyena operator described in the paper https://arxiv.org/pdf/2302.10866.pdf
Args:
d_model (int): Dimension of the input and output embeddings (width of the layer)
l_max: (int): Maximum input sequence length. Defaults to None
order: (int): Depth of the Hyena recurrence. Defaults to 2
dropout: (float): Dropout probability. Defaults to 0.0
filter_dropout: (float): Dropout probability for the filter. Defaults to 0.0
"""
super().__init__()
self.d_model = d_model
self.l_max = l_max
self.order = order
inner_width = d_model * (order + 1)
self.dropout = nn.Dropout(dropout)
self.in_proj = nn.Linear(d_model, inner_width)
self.out_proj = nn.Linear(d_model, d_model)
self.short_filter = nn.Conv1d(
inner_width,
inner_width,
3,
padding=2,
groups=inner_width
)
self.filter_fn = HyenaFilter(
d_model * (order - 1),
order=filter_order,
seq_len=l_max,
channels=1,
dropout=filter_dropout,
**filter_args
)
def forward(self, u, *args, **kwargs):
l = u.size(-2)
l_filter = min(l, self.l_max)
u = self.in_proj(u)
u = rearrange(u, 'b l d -> b d l')
uc = self.short_filter(u)[...,:l_filter]
*x, v = uc.split(self.d_model, dim=1)
k = self.filter_fn.filter(l_filter)[0]
k = rearrange(k, 'l (o d) -> o d l', o=self.order - 1)
bias = rearrange(self.filter_fn.bias, '(o d) -> o d', o=self.order - 1)
for o, x_i in enumerate(reversed(x[1:])):
v = self.dropout(v * x_i)
v = self.filter_fn(v, l_filter, k=k[o], bias=bias[o])
y = rearrange(v * x[0], 'b d l -> b l d')
y = self.out_proj(y)
return y
#@title Self-Attention (alternative)
"""
If you'd like to try the HyenaDNA model using attention instead, you can. ie,
use a regular decoder only Transformer.
"""
class SelfAttention(nn.Module):
"""Implement the scaled dot product attention with softmax.
Arguments
---------
softmax_scale: The temperature to use for the softmax attention.
(default: 1/sqrt(d_keys) where d_keys is computed at
runtime)
attention_dropout: The dropout rate to apply to the attention
(default: 0.0)
"""
def __init__(self, causal=False, softmax_scale=None, attention_dropout=0.0):
super().__init__()
self.causal = causal
self.softmax_scale = softmax_scale
self.dropout_p = attention_dropout
def forward(self, qkv, causal=None, key_padding_mask=None):
"""Implements the multihead softmax attention.
Arguments
---------
qkv: The tensor containing the query, key, and value. (B, S, 3, H, D)
causal: if passed, will override self.causal
key_padding_mask: boolean mask to apply to the attention weights. True means to keep,
False means to mask out. (B, S)
"""
batch_size, seqlen = qkv.shape[0], qkv.shape[1]
causal = self.causal if causal is None else causal
q, k, v = qkv.unbind(dim=2)
softmax_scale = self.softmax_scale or 1.0 / math.sqrt(q.shape[-1])
scores = torch.einsum('bthd,bshd->bhts', q, k * softmax_scale)
if key_padding_mask is not None:
padding_mask = torch.full((batch_size, seqlen), -10000.0, dtype=scores.dtype,
device=scores.device)
padding_mask.masked_fill_(key_padding_mask, 0.0)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + rearrange(padding_mask, 'b s -> b 1 1 s')
if causal:
# "triu_tril_cuda_template" not implemented for 'BFloat16'
# So we have to construct the mask in float
causal_mask = torch.triu(torch.full((seqlen, seqlen), -10000.0, device=scores.device), 1)
# TD [2022-09-30]: Adding is faster than masked_fill_ (idk why, just better kernel I guess)
scores = scores + causal_mask.to(dtype=scores.dtype)
attention = torch.softmax(scores, dim=-1, dtype=v.dtype)
attention_drop = F.dropout(attention, self.dropout_p if self.training else 0.0)
output = torch.einsum('bhts,bshd->bthd', attention_drop, v)
return output
class MHA(nn.Module):
"""Multi-head self-attention and cross-attention
"""
def __init__(self, embed_dim, num_heads, bias=True, dropout=0.0,
softmax_scale=None, causal=False, layer_idx=None, dwconv=False,return_residual=False,device=None, dtype=None) -> None:
"""
return_residual: whether to return the input x along with the output. This is for
performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.embed_dim = embed_dim
self.causal = causal
self.layer_idx = layer_idx
self.dwconv = dwconv
self.return_residual = return_residual
self.num_heads = num_heads
assert self.embed_dim % num_heads == 0, "self.kdim must be divisible by num_heads"
self.head_dim = self.embed_dim // num_heads
linear_cls = nn.Linear
linear_resid_cls = LinearResidual
inner_attn_cls = SelfAttention
if not self.return_residual:
self.Wqkv = linear_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
else:
self.Wqkv = linear_resid_cls(embed_dim, 3 * embed_dim, bias=bias, **factory_kwargs)
if self.dwconv:
self.dwconv_qkv = nn.Conv1d(3 * embed_dim, 3 * embed_dim, kernel_size=3, padding=2,
groups=3 * embed_dim)
self.inner_attn = inner_attn_cls(causal=causal, softmax_scale=softmax_scale,
attention_dropout=dropout)
# output projection always have the bias (for now)
self.out_proj = linear_cls(embed_dim, embed_dim, **factory_kwargs)
def forward(self, x, key_padding_mask=None, **kwargs):
"""
Arguments:
x: (batch, seqlen, hidden_dim) (where hidden_dim = num heads * head dim) if
cu_seqlens is None and max_seqlen is None, else (total, hidden_dim) where total
is the is the sum of the sequence lengths in the batch.
cu_seqlens: (batch_size + 1,), dtype torch.int32. The cumulative sequence lengths
of the sequences in the batch, used to index into x. Only applicable when using
FlashAttention.
max_seqlen: int. Maximum sequence length in the batch.
key_padding_mask: boolean mask, True means to keep, False means to mask out.
(batch, seqlen). Only applicable when not using FlashAttention.
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
inference_params: for generation. Adapted from Megatron-LM (and Apex)
https://github.com/NVIDIA/apex/blob/3ff1a10f72ec07067c4e44759442329804ac5162/apex/transformer/testing/standalone_transformer_lm.py#L470
"""
kwargs = ({'key_padding_mask': key_padding_mask, **kwargs})
if not self.return_residual:
qkv = self.Wqkv(x)
else:
qkv, x = self.Wqkv(x)
if self.dwconv:
qkv = rearrange(self.dwconv_qkv(rearrange(qkv, 'b s d -> b d s'))[..., :-2],
'b d s -> b s d').contiguous()
qkv = rearrange(qkv, '... (three h d) -> ... three h d', three=3, d=self.head_dim)
context = self.inner_attn(qkv, **kwargs)
out = self.out_proj(rearrange(context, '... h d -> ... (h d)'))
return out if not self.return_residual else (out, x)
#@title MLP layer
"""
The MLP layer after the mixer layer (HyenaOperator).
"""
class Mlp(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, activation=F.gelu,
return_residual=False, device=None, dtype=None):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/mlp.py
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
self.return_residual = return_residual
self.fc1 = nn.Linear(in_features, hidden_features, **factory_kwargs)
self.activation = activation
self.fc2 = nn.Linear(hidden_features, out_features, **factory_kwargs)
def forward(self, x):
y = self.fc1(x)
y = self.activation(y)
y = self.fc2(y)
return y if not self.return_residual else (y, x)
#@title Block layer (Hyena + MLP layers)
"""
A block consists of a Mixer layer (Hyena or attention), and a MLP layer.
"""
class LinearResidual(nn.Linear):
"""Wrap nn.Linear to return the residual as well. For compatibility with FusedDense.
"""
def forward(self, input: torch.Tensor) -> torch.Tensor:
return super().forward(input), input
class Block(nn.Module):
def __init__(self, dim, mixer_cls=None, mlp_cls=None, norm_cls=nn.LayerNorm,
dropout_cls=nn.Dropout, prenorm=True, resid_dropout1=0., resid_dropout2=0.,
drop_path1=0., drop_path2=0.,
return_residual=False,
residual_in_fp32=False):
"""
From https://github.com/HazyResearch/flash-attention/blob/main/flash_attn/modules/block.py
For prenorm=True, this Block has a slightly different structure compared to a regular
prenorm Transformer block.
The standard block is: LN -> MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add.
[Ref: https://arxiv.org/abs/2002.04745]
Here we have: Dropout -> Add -> LN -> MHA -> Dropout -> Add -> LN -> MLP, returning both
the hidden_states (output of the MLP) and the residual.
This is for performance reasons, as we can fuse the dropout, add and LayerNorm.
The residual needs to be provided (except for the very first block).
For prenorm=False, this Block has the same structure as a regular postnorm Transformer
block: MHA -> Dropout -> Add -> LN -> MLP -> Dropout -> Add -> LN.
return_residual: whether each of the sub-layers (mixer and mlp) will return the residual.
This is for performance reason: for post-norm architecture, returning the input allows us
to fuse the backward of nn.Linear with the residual connection.
"""
super().__init__()
self.prenorm = prenorm
self.return_residual = return_residual
self.residual_in_fp32 = residual_in_fp32
if self.residual_in_fp32:
assert self.prenorm, 'residual_in_fp32 is only compatible with prenorm=True'
if mixer_cls is None:
mixer_cls = partial(MHA, num_heads=dim // 64)
if mlp_cls is None:
mlp_cls = partial(Mlp, hidden_features=4 * dim)
self.mixer = mixer_cls()
self.dropout1 = dropout_cls(resid_dropout1)
self.drop_path1 = StochasticDepth(drop_path1, mode='row')
self.norm1 = norm_cls(dim)
self.mlp = mlp_cls(dim)
if not isinstance(self.mlp, nn.Identity):
self.dropout2 = dropout_cls(resid_dropout2)
self.drop_path2 = StochasticDepth(drop_path2, mode='row')
self.norm2 = norm_cls(dim)
def forward(self, hidden_states, residual = None,
mixer_subset=None, mixer_kwargs=None):
r"""Pass the input through the encoder layer.
Args:
hidden_states: the sequence to the encoder layer (required).
residual: if postnorm, residual=None, If prenorm, hidden_states = Attn/MLP(LN(residual))
mixer_subset: for cross-attention only. If not None, will take a subset of x
before applying the query projection. Useful for e.g., ViT where we only care
about the CLS token in the last layer.
"""
if self.prenorm:
dropped = self.drop_path1(self.dropout1(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm1(residual.to(dtype=self.norm1.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
if mixer_kwargs is None:
mixer_kwargs = {}
if mixer_subset is not None:
mixer_kwargs['mixer_subset'] = mixer_subset
hidden_states = self.mixer(hidden_states, **mixer_kwargs)
if mixer_subset is not None:
residual = residual[:, mixer_subset]
if not isinstance(self.mlp, nn.Identity):
dropped = self.drop_path2(self.dropout2(hidden_states))
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.norm2(residual.to(dtype=self.norm2.weight.dtype))
if self.residual_in_fp32:
residual = residual.to(torch.float32)
hidden_states = self.mlp(hidden_states)
return hidden_states, residual
else:
assert residual is None
mixer_out = self.mixer(
hidden_states, **(mixer_kwargs if mixer_kwargs is not None else {})
)
if self.return_residual: # mixer out is actually a pair here
mixer_out, hidden_states = mixer_out
hidden_states = self.norm1((self.drop_path1(self.dropout1(mixer_out))
+ hidden_states).to(dtype=self.norm1.weight.dtype))
if not isinstance(self.mlp, nn.Identity):
mlp_out = self.mlp(hidden_states)
if self.return_residual: # mlp out is actually a pair here
mlp_out, hidden_states = mlp_out
hidden_states = self.norm2((self.drop_path2(self.dropout2(mlp_out))
+ hidden_states).to(dtype=self.norm2.weight.dtype))
return hidden_states
def create_mixer_cls(layer=None,
attn_layer_idx=None, attn_cfg=None, layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
if attn_layer_idx is not None and layer_idx in attn_layer_idx:
causal = True if attn_cfg is None else attn_cfg.pop('causal', True)
mha_cls = MHA
mixer_cls = partial(mha_cls, causal=causal, layer_idx=layer_idx,
**(attn_cfg if attn_cfg is not None else {}),**factory_kwargs)
else:
# mixer_cls = instantiate(registry.layer, layer, partial=True, layer_idx=layer_idx, **factory_kwargs)
mixer_cls = partial(HyenaOperator, **layer)
return mixer_cls
def create_mlp_cls(d_model, d_inner=None, device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
inner_dim = d_inner if d_inner is not None else 4 * d_model
mlp_cls = partial(Mlp, hidden_features=inner_dim,
activation=partial(F.gelu, approximate='tanh'), **factory_kwargs)
return mlp_cls
def create_block(d_model, d_inner=None,
layer=None, attn_layer_idx=None,
attn_cfg=None, layer_norm_epsilon=1e-5,
resid_dropout1=0.0, resid_dropout2=0.0, residual_in_fp32=False,
layer_idx=None,
device=None, dtype=None):
factory_kwargs = {'device': device, 'dtype': dtype}
mixer_cls = create_mixer_cls(layer=layer,
attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_idx=layer_idx,
**factory_kwargs)
mlp_cls = create_mlp_cls(d_model, d_inner=d_inner,
**factory_kwargs)
norm_cls = partial(nn.LayerNorm, eps=layer_norm_epsilon, **factory_kwargs)
block = Block(d_model, mixer_cls, mlp_cls, norm_cls=norm_cls,
prenorm=True, resid_dropout1=resid_dropout1, resid_dropout2=resid_dropout2,residual_in_fp32=residual_in_fp32)
block.layer_idx = layer_idx
return block
# https://github.com/huggingface/transformers/blob/c28d04e9e252a1a099944e325685f14d242ecdcd/src/transformers/models/gpt2/modeling_gpt2.py#L454
def _init_weights(module, n_layer, initializer_range=0.02, rescale_prenorm_residual=True,
glu_act=False):
if isinstance(module, nn.Linear):
nn.init.normal_(module.weight, std=initializer_range)
if module.bias is not None:
nn.init.zeros_(module.bias)
elif isinstance(module, nn.Embedding):
nn.init.normal_(module.weight, std=initializer_range)
if rescale_prenorm_residual:
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
#
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
for name, p in module.named_parameters():
if name in ["out_proj.weight", "fc2.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
# If using GLU activation for now, we scale the std by 2
elif name in ["output_linear.0.weight"]:
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
if not glu_act:
nn.init.normal_(p, mean=0.0, std=initializer_range / math.sqrt(2 * n_layer))
else:
out_features = p.shape[0]
# Multiplying the first half of the matrix by 2 since sigmoid scales it down by 0.5
# on average.
nn.init.normal_(p[:out_features // 2], mean=0.0, std=initializer_range / math.sqrt(2 * n_layer) * 2)
#@title Backbone model (stack of blocks)
"""
A backbone model consists of a stack of blocks. If you use attention, then
positional embeddings are included. When using Hyena, then the pos emb
revert to doing nothing.
"""
class GPT2Embeddings(nn.Module):
def __init__(self, embed_dim, vocab_size, max_position_embeddings, padding_idx=None,
word_embed_proj_dim=None, device=None, dtype=None):
"""
If max_position_embeddings <= 0, there's no position embeddings
If word_embe_proj_dim is not None (e.g., OPT-350m), we embed to that dimension
the project up to embed_dim
"""
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if word_embed_proj_dim is None:
self.word_embeddings = nn.Embedding(vocab_size, embed_dim, padding_idx=padding_idx,
**factory_kwargs)
self.project_in = None
else:
self.word_embeddings = nn.Embedding(vocab_size, word_embed_proj_dim,
padding_idx=padding_idx, **factory_kwargs)
self.project_in = nn.Linear(word_embed_proj_dim, embed_dim, bias=False,
**factory_kwargs)
self.max_position_embeddings = max_position_embeddings
if self.max_position_embeddings > 0:
self.position_embeddings = nn.Embedding(max_position_embeddings, embed_dim,
**factory_kwargs)
def forward(self, input_ids, position_ids=None):
"""
input_ids: (batch, seqlen)
position_ids: (batch, seqlen)
"""
batch_size, seqlen = input_ids.shape
embeddings = self.word_embeddings(input_ids)
if self.project_in is not None:
embeddings = self.project_in(embeddings)
if self.max_position_embeddings > 0:
if position_ids is None:
position_ids = torch.arange(seqlen, dtype=torch.long, device=input_ids.device)
position_embeddings = self.position_embeddings(position_ids)
embeddings = embeddings + position_embeddings
return embeddings
class LMBackbone(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
process_group=None, layer=None,
attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
self.process_group = process_group
self.residual_in_fp32 = residual_in_fp32
# note max_position_embeddings is 0 for Hyena, and therefore isn't used
self.embeddings = GPT2Embeddings(d_model, vocab_size, max_position_embeddings,
**factory_kwargs)
self.layers = nn.ModuleList([create_block(
d_model, d_inner=d_inner,
layer=layer, attn_layer_idx=attn_layer_idx,
attn_cfg=attn_cfg, layer_norm_epsilon=layer_norm_epsilon,
resid_dropout1=embed_dropout if i == 0 else resid_dropout,
resid_dropout2=resid_dropout, residual_in_fp32=residual_in_fp32,layer_idx=i,
**factory_kwargs,
) for i in range(n_layer)])
self.drop_f = nn.Dropout(resid_dropout)
self.ln_f = nn.LayerNorm(d_model, eps=layer_norm_epsilon, **factory_kwargs)
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
def forward(self, input_ids, position_ids=None):
hidden_states = self.embeddings(input_ids, position_ids=position_ids,)
residual = None
for layer in self.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.ln_f(residual.to(dtype=self.ln_f.weight.dtype))
return hidden_states
#@title Decoder head layer
"""
A simple decoder head (using MLP) to predict a sequence level classification.
You have the option to average across all the tokens in a sequence or using the
"last" token to classify. At least, those 2 worked best for us, but we provide
other "modes" as well.
We only need this for classification. Otherwise we'll use the hidden
states of the backbone as embeddings.
"""
class SequenceDecoder(nn.Module):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
#@title Model (backbone + head)
"""
Putting it all together, the model consists of a backbone model
and a decoder head (you can turn off head for embeddings only too).
Here we use a simple head to do multi-classification, but
can also swap the head to do next token prediction too. We defer to the main
HyenaDNA for that code, since pretraining with next token prediction isn't quite
feasible on colab.
"""
class HyenaDNAModel(nn.Module):
def __init__(self, d_model: int, n_layer: int, d_inner: int, vocab_size: int,
layer=None, attn_layer_idx=None, attn_cfg=None, max_position_embeddings=0,
resid_dropout: float = 0.0, embed_dropout: float = 0.1,
layer_norm_epsilon: float = 1e-5, initializer_cfg=None,residual_in_fp32=False,
pad_vocab_size_multiple: int = 1, use_head=False, n_classes: int = 2,
device=None, dtype=None, **kwargs) -> None:
factory_kwargs = {'device': device, 'dtype': dtype}
super().__init__()
if vocab_size % pad_vocab_size_multiple != 0:
vocab_size += pad_vocab_size_multiple - (vocab_size % pad_vocab_size_multiple)
self.use_head = use_head
# check if layer (config) has d_model (HF code differs from main Safari code)
if 'd_model' not in layer:
layer['d_model'] = d_model
self.backbone = LMBackbone(
d_model=d_model, n_layer=n_layer, d_inner=d_inner, vocab_size=vocab_size,
layer=layer, attn_layer_idx=attn_layer_idx, attn_cfg=attn_cfg,
max_position_embeddings=max_position_embeddings,
resid_dropout=resid_dropout, embed_dropout=embed_dropout,
layer_norm_epsilon=layer_norm_epsilon,
initializer_cfg=initializer_cfg, residual_in_fp32=residual_in_fp32,
**factory_kwargs, **kwargs
)
# we only need a head if doing classification, otherwise we'll use the
# hidden states as embeddings
if self.use_head:
self.head = SequenceDecoder(d_model=d_model, d_output=n_classes, l_output=0, mode='pool')
# Initialize weights and apply final processing
self.apply(partial(_init_weights, n_layer=n_layer,
**(initializer_cfg if initializer_cfg is not None else {})))
# if self.use_head:
# self.tie_weights()
# def tie_weights(self):
# self.head.weight = self.backbone.embeddings.word_embeddings.weight
def forward(self, input_ids, position_ids=None, state=None): # state for the repo interface
hidden_states = self.backbone(input_ids, position_ids=position_ids)
if self.use_head:
return self.head(hidden_states)
else:
return hidden_states
"""# Data pipeline
"""
#@title Tokenizer
"""
Just a simple character level tokenizer.
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg)
|
hyena-dna-main
|
standalone_hyenadna.py
|
#@title Huggingface Pretrained Wrapper
"""
This is script is a simple HuggingFace wrapper around a HyenaDNA model, to enable a one click example
of how to load the pretrained weights and get embeddings.
It will instantiate a HyenaDNA model (model class is in the `standalone_hyenadna.py`), and handle the downloading of pretrained weights from HuggingFace.
Check out the colab notebook for a simpler and more complete walk through of how to use HyenaDNA with pretrained weights.
"""
import json
import os
import subprocess
import torch
# import transformers
from transformers import PreTrainedModel
import re
from standalone_hyenadna import HyenaDNAModel
from standalone_hyenadna import CharacterTokenizer
# helper 1
def inject_substring(orig_str):
"""Hack to handle matching keys between models trained with and without
gradient checkpointing."""
# modify for mixer keys
pattern = r"\.mixer"
injection = ".mixer.layer"
modified_string = re.sub(pattern, injection, orig_str)
# modify for mlp keys
pattern = r"\.mlp"
injection = ".mlp.layer"
modified_string = re.sub(pattern, injection, modified_string)
return modified_string
# helper 2
def load_weights(scratch_dict, pretrained_dict, checkpointing=False):
"""Loads pretrained (backbone only) weights into the scratch state dict."""
# loop thru state dict of scratch
# find the corresponding weights in the loaded model, and set it
# need to do some state dict "surgery"
for key, value in scratch_dict.items():
if 'backbone' in key:
# the state dicts differ by one prefix, '.model', so we add that
key_loaded = 'model.' + key
# breakpoint()
# need to add an extra ".layer" in key
if checkpointing:
key_loaded = inject_substring(key_loaded)
try:
scratch_dict[key] = pretrained_dict[key_loaded]
except:
raise Exception('key mismatch in the state dicts!')
# scratch_dict has been updated
return scratch_dict
class HyenaDNAPreTrainedModel(PreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
base_model_prefix = "hyenadna"
def __init__(self, config):
pass
def forward(self, input_ids, **kwargs):
return self.model(input_ids, **kwargs)
@classmethod
def from_pretrained(cls,
path,
model_name,
download=False,
config=None,
device='cpu',
use_head=False,
n_classes=2,
):
# first check if it is a local path
pretrained_model_name_or_path = os.path.join(path, model_name)
if os.path.isdir(pretrained_model_name_or_path) and download == False:
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
else:
hf_url = f'https://huggingface.co/LongSafari/{model_name}'
subprocess.run(f'rm -rf {pretrained_model_name_or_path}', shell=True)
command = f'mkdir -p {path} && cd {path} && git lfs install && git clone {hf_url}'
subprocess.run(command, shell=True)
if config is None:
config = json.load(open(os.path.join(pretrained_model_name_or_path, 'config.json')))
scratch_model = HyenaDNAModel(**config, use_head=use_head, n_classes=n_classes) # the new model format
loaded_ckpt = torch.load(
os.path.join(pretrained_model_name_or_path, 'weights.ckpt'),
map_location=torch.device(device)
)
# need to load weights slightly different if using gradient checkpointing
if config.get("checkpoint_mixer", False):
checkpointing = config["checkpoint_mixer"] == True or config["checkpoint_mixer"] == True
else:
checkpointing = False
# grab state dict from both and load weights
state_dict = load_weights(scratch_model.state_dict(), loaded_ckpt['state_dict'], checkpointing=checkpointing)
# scratch model has now been updated
scratch_model.load_state_dict(state_dict)
print("Loaded pretrained weights ok!")
return scratch_model
####################################################################################################
"""# Inference (450k to 1M tokens)!
If all you're interested in is getting embeddings on long DNA sequences
(inference), then we can do that right here in Colab!
* We provide an example how to load the weights from Huggingface.
* On the free tier, which uses a
T4 GPU w/16GB of memory, we can process 450k tokens / nucleotides.
* For processing 1M tokens, you'll need an A100, which Colab offers as a paid tier.
* (Don't forget to run the entire notebook above too)
--
To pretrain or fine-tune the 1M long sequence model (8 layers, d_model=256),
you'll need 8 A100s 80GB, and all that code is in the main repo!
"""
#@title Single example
import json
import os
import subprocess
# import transformers
from transformers import PreTrainedModel
def inference_single():
'''
this selects which backbone to use, and grabs weights/ config from HF
4 options:
'hyenadna-tiny-1k-seqlen' # fine-tune on colab ok
'hyenadna-small-32k-seqlen'
'hyenadna-medium-160k-seqlen' # inference only on colab
'hyenadna-medium-450k-seqlen' # inference only on colab
'hyenadna-large-1m-seqlen' # inference only on colab
'''
# you only need to select which model to use here, we'll do the rest!
pretrained_model_name = 'hyenadna-small-32k-seqlen'
max_lengths = {
'hyenadna-tiny-1k-seqlen': 1024,
'hyenadna-small-32k-seqlen': 32768,
'hyenadna-medium-160k-seqlen': 160000,
'hyenadna-medium-450k-seqlen': 450000, # T4 up to here
'hyenadna-large-1m-seqlen': 1_000_000, # only A100 (paid tier)
}
max_length = max_lengths[pretrained_model_name] # auto selects
# data settings:
use_padding = True
rc_aug = False # reverse complement augmentation
add_eos = False # add end of sentence token
# we need these for the decoder head, if using
use_head = False
n_classes = 2 # not used for embeddings only
# you can override with your own backbone config here if you want,
# otherwise we'll load the HF one in None
backbone_cfg = None
device = 'cuda' if torch.cuda.is_available() else 'cpu'
print("Using device:", device)
# instantiate the model (pretrained here)
if pretrained_model_name in ['hyenadna-tiny-1k-seqlen',
'hyenadna-small-32k-seqlen',
'hyenadna-medium-160k-seqlen',
'hyenadna-medium-450k-seqlen',
'hyenadna-large-1m-seqlen']:
# use the pretrained Huggingface wrapper instead
model = HyenaDNAPreTrainedModel.from_pretrained(
'./checkpoints',
pretrained_model_name,
download=True,
config=backbone_cfg,
device=device,
use_head=use_head,
n_classes=n_classes,
)
# from scratch
elif pretrained_model_name is None:
model = HyenaDNAModel(**backbone_cfg, use_head=use_head, n_classes=n_classes)
# create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'], # add DNA characters, N is uncertain
model_max_length=max_length + 2, # to account for special tokens, like EOS
add_special_tokens=False, # we handle special tokens elsewhere
padding_side='left', # since HyenaDNA is causal, we pad on the left
)
#### Single embedding example ####
# create a sample 450k long, prepare
sequence = 'ACTG' * int(max_length/4)
tok_seq = tokenizer(sequence)
tok_seq = tok_seq["input_ids"] # grab ids
# place on device, convert to tensor
tok_seq = torch.LongTensor(tok_seq).unsqueeze(0) # unsqueeze for batch dim
tok_seq = tok_seq.to(device)
# prep model and forward
model.to(device)
model.eval()
with torch.inference_mode():
embeddings = model(tok_seq)
print(embeddings.shape) # embeddings here!
# # uncomment to run! (to get embeddings)
inference_single()
# to run this, just call:
# python huggingface.py
|
hyena-dna-main
|
huggingface.py
|
import copy
import os
import random
import time
from functools import partial, wraps
from typing import Callable, List, Sequence
import hydra
import numpy as np
import pytorch_lightning as pl
import torch
import torch.nn as nn
import wandb
from hydra.utils import get_original_cwd
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.loggers import WandbLogger
from pytorch_lightning.utilities import rank_zero_only, rank_zero_warn
from pytorch_lightning.strategies.ddp import DDPStrategy
from tqdm.auto import tqdm
from pytorch_lightning.strategies.ddp import DDPStrategy
import src.models.nn.utils as U
import src.utils as utils
import src.utils.train
from src.dataloaders import SequenceDataset # TODO make registry
from src.tasks import decoders, encoders, tasks
from src.utils import registry
from src.utils.optim_groups import add_optimizer_hooks
log = src.utils.train.get_logger(__name__)
# Turn on TensorFloat32 (speeds up large model training substantially)
import torch.backends
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
OmegaConf.register_new_resolver('eval', eval)
OmegaConf.register_new_resolver('div_up', lambda x, y: (x + y - 1) // y)
# Lots of annoying hacks to get WandbLogger to continuously retry on failure
class DummyExperiment:
"""Dummy experiment."""
def nop(self, *args, **kw):
pass
def __getattr__(self, _):
return self.nop
def __getitem__(self, idx) -> "DummyExperiment":
# enables self.logger.experiment[0].add_image(...)
return self
def __setitem__(self, *args, **kwargs) -> None:
pass
def rank_zero_experiment(fn: Callable) -> Callable:
"""Returns the real experiment on rank 0 and otherwise the DummyExperiment."""
@wraps(fn)
def experiment(self):
@rank_zero_only
def get_experiment():
return fn(self)
return get_experiment() or DummyExperiment()
return experiment
class CustomWandbLogger(WandbLogger):
def __init__(self, *args, **kwargs):
"""Modified logger that insists on a wandb.init() call and catches wandb's error if thrown."""
super().__init__(*args, **kwargs)
@property
@rank_zero_experiment
def experiment(self):
r"""
Actual wandb object. To use wandb features in your
:class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
Example::
.. code-block:: python
self.logger.experiment.some_wandb_function()
"""
if self._experiment is None:
if self._offline:
os.environ["WANDB_MODE"] = "dryrun"
attach_id = getattr(self, "_attach_id", None)
if wandb.run is not None:
# wandb process already created in this instance
rank_zero_warn(
"There is a wandb run already in progress and newly created instances of `WandbLogger` will reuse"
" this run. If this is not desired, call `wandb.finish()` before instantiating `WandbLogger`."
)
self._experiment = wandb.run
elif attach_id is not None and hasattr(wandb, "_attach"):
# attach to wandb process referenced
self._experiment = wandb._attach(attach_id)
else:
# create new wandb process
while True:
try:
self._experiment = wandb.init(**self._wandb_init)
break
except Exception as e:
print("wandb Exception:\n", e)
t = random.randint(30, 60)
print(f"Sleeping for {t} seconds")
time.sleep(t)
# define default x-axis
if getattr(self._experiment, "define_metric", None):
self._experiment.define_metric("trainer/global_step")
self._experiment.define_metric("*", step_metric="trainer/global_step", step_sync=True)
return self._experiment
class SequenceLightningModule(pl.LightningModule):
def __init__(self, config):
# Disable profiling executor. This reduces memory and increases speed.
try:
torch._C._jit_set_profiling_executor(False)
torch._C._jit_set_profiling_mode(False)
except AttributeError:
pass
super().__init__()
# Passing in config expands it one level, so can access by self.hparams.train instead of self.hparams.config.train
self.save_hyperparameters(config, logger=False)
# Dataset arguments
self.dataset = SequenceDataset.registry[self.hparams.dataset._name_](
**self.hparams.dataset
)
# Check hparams
self._check_config()
# PL has some bugs, so add hooks and make sure they're only called once
self._has_setup = False
self.setup() ## Added by KS
def setup(self, stage=None):
if not self.hparams.train.disable_dataset:
self.dataset.setup()
# We need to set up the model in setup() because for some reason when training with DDP, one GPU uses much more memory than the others
# In order to not overwrite the model multiple times during different stages, we need this hack
# TODO PL 1.5 seems to have an option to skip hooks to avoid this
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5410#issuecomment-762257024
if self._has_setup:
return
else:
self._has_setup = True
# Convenience feature: if model specifies encoder, combine it with main encoder
encoder_cfg = utils.to_list(self.hparams.encoder) + utils.to_list(
self.hparams.model.pop("encoder", None)
)
decoder_cfg = utils.to_list(
self.hparams.model.pop("decoder", None)
) + utils.to_list(self.hparams.decoder)
# Instantiate model
self.model = utils.instantiate(registry.model, self.hparams.model)
if (name := self.hparams.train.post_init_hook['_name_']) is not None:
kwargs = self.hparams.train.post_init_hook.copy()
del kwargs['_name_']
for module in self.modules():
if hasattr(module, name):
getattr(module, name)(**kwargs)
# Instantiate the task
self.task = utils.instantiate(
tasks.registry, self.hparams.task, dataset=self.dataset, model=self.model
)
# Create encoders and decoders
encoder = encoders.instantiate(
encoder_cfg, dataset=self.dataset, model=self.model
)
decoder = decoders.instantiate(
decoder_cfg, model=self.model, dataset=self.dataset
)
# Extract the modules so they show up in the top level parameter count
self.encoder = U.PassthroughSequential(self.task.encoder, encoder)
self.decoder = U.PassthroughSequential(decoder, self.task.decoder)
self.loss = self.task.loss
self.loss_val = self.task.loss
if hasattr(self.task, 'loss_val'):
self.loss_val = self.task.loss_val
self.metrics = self.task.metrics
self.train_torchmetrics = self.task.train_torchmetrics
self.val_torchmetrics = self.task.val_torchmetrics
self.test_torchmetrics = self.task.test_torchmetrics
def load_state_dict(self, state_dict, strict=False):
if self.hparams.train.pretrained_model_state_hook['_name_'] is not None:
model_state_hook = utils.instantiate(
registry.model_state_hook,
self.hparams.train.pretrained_model_state_hook.copy(),
partial=True,
)
state_dict = model_state_hook(self.model, state_dict)
print("Custom load_state_dict function is running.")
# strict==True will require all modules to match
# strict==False can allow encoder/decoder to be loaded from scratch too
return super().load_state_dict(state_dict, strict=strict)
def _check_config(self):
assert self.hparams.train.state.mode in [None, "none", "null", "reset", "bptt", "tbptt"]
assert (
(n := self.hparams.train.state.n_context) is None
or isinstance(n, int)
and n >= 0
)
assert (
(n := self.hparams.train.state.n_context_eval) is None
or isinstance(n, int)
and n >= 0
)
def _initialize_state(self):
"""Called at model setup and start of epoch to completely reset state"""
self._state = None
self._memory_chunks = []
def _reset_state(self, batch, device=None):
"""Called to construct default_state when necessary, e.g. during BPTT"""
device = device or batch[0].device
self._state = self.model.default_state(*batch[0].shape[:1], device=device)
def _detach_state(self, state):
if isinstance(state, torch.Tensor):
return state.detach()
elif isinstance(state, tuple):
return tuple(self._detach_state(s) for s in state)
elif isinstance(state, list):
return [self._detach_state(s) for s in state]
elif isinstance(state, dict):
return {k: self._detach_state(v) for k, v in state.items()}
elif state is None:
return None
else:
raise NotImplementedError
def _process_state(self, batch, batch_idx, train=True):
"""Handle logic for state context."""
# Number of context steps
key = "n_context" if train else "n_context_eval"
n_context = self.hparams.train.state.get(key)
# Don't need to do anything if 0 context steps. Make sure there is no state
if n_context == 0 and self.hparams.train.state.mode not in ['tbptt']:
self._initialize_state()
return
# Reset state if needed
if self.hparams.train.state.mode == "reset":
if batch_idx % (n_context + 1) == 0:
self._reset_state(batch)
# Pass through memory chunks
elif self.hparams.train.state.mode == "bptt":
self._reset_state(batch)
with torch.no_grad(): # should be unnecessary because individual modules should handle this
for _batch in self._memory_chunks:
self.forward(_batch)
# Prepare for next step
self._memory_chunks.append(batch)
self._memory_chunks = self._memory_chunks[-n_context:]
elif self.hparams.train.state.mode == 'tbptt':
_, _, z = batch
reset = z["reset"]
if reset:
self._reset_state(batch)
else:
self._state = self._detach_state(self._state)
# def forward(self, batch):
# """Passes a batch through the encoder, backbone, and decoder"""
# # z holds arguments such as sequence length
# x, y, *z = batch # z holds extra dataloader info such as resolution
# if len(z) == 0:
# z = {}
# else:
# assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
# z = z[0]
# x, w = self.encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
# x, state = self.model(x, **w, state=self._state)
# self._state = state
# x, w = self.decoder(x, state=state, **z)
# return x, y, w
def forward(self, batch):
return self.task.forward(batch, self.encoder, self.model, self.decoder, self._state)
def step(self, x_t):
x_t, *_ = self.encoder(x_t) # Potential edge case for encoders that expect (B, L, H)?
x_t, state = self.model.step(x_t, state=self._state)
self._state = state
# x_t = x_t[:, None, ...] # Dummy length
# x_t, *_ = self.decoder(x_t, state=state)
# x_t = x_t[:, 0, ...]
x_t, *_ = self.decoder.step(x_t, state=state)
return x_t
def _shared_step(self, batch, batch_idx, prefix="train"):
self._process_state(batch, batch_idx, train=(prefix == "train"))
x, y, w = self.forward(batch)
# Loss
if prefix == 'train':
loss = self.loss(x, y, **w)
else:
loss = self.loss_val(x, y, **w)
# Metrics
metrics = self.metrics(x, y, **w)
metrics["loss"] = loss
metrics = {f"{prefix}/{k}": v for k, v in metrics.items()}
# Calculate torchmetrics
torchmetrics = getattr(self, f'{prefix}_torchmetrics')
torchmetrics(x, y, loss=loss)
log_on_step = 'eval' in self.hparams and self.hparams.eval.get('log_on_step', False) and prefix == 'train'
self.log_dict(
metrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
# log the whole dict, otherwise lightning takes the mean to reduce it
# https://pytorch-lightning.readthedocs.io/en/stable/visualize/logging_advanced.html#enable-metrics-for-distributed-training
self.log_dict(
torchmetrics,
on_step=log_on_step,
on_epoch=True,
prog_bar=True,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def on_train_epoch_start(self):
# Reset training torchmetrics
self.task._reset_torchmetrics("train")
def training_epoch_end(self, outputs):
# Log training torchmetrics
super().training_epoch_end(outputs)
def on_validation_epoch_start(self):
# Reset all validation torchmetrics
for name in self.val_loader_names:
self.task._reset_torchmetrics(name)
def validation_epoch_end(self, outputs):
# Log all validation torchmetrics
super().validation_epoch_end(outputs)
def on_test_epoch_start(self):
# Reset all test torchmetrics
for name in self.test_loader_names:
self.task._reset_torchmetrics(name)
def test_epoch_end(self, outputs):
# Log all test torchmetrics
super().test_epoch_end(outputs)
def training_step(self, batch, batch_idx, dataloader_idx=0):
loss = self._shared_step(batch, batch_idx, prefix="train")
# Log the loss explicitly so it shows up in WandB
# Note that this currently runs into a bug in the progress bar with ddp (as of 1.4.6)
# https://github.com/PyTorchLightning/pytorch-lightning/pull/9142
# We additionally log the epochs under 'trainer' to get a consistent prefix with 'global_step'
loss_epoch = {"trainer/loss": loss, "trainer/epoch": self.current_epoch}
self.log_dict(
loss_epoch,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
# Log any extra info that the models want to expose (e.g. output norms)
metrics = {}
for module in list(self.modules())[1:]:
if hasattr(module, "metrics"):
metrics.update(module.metrics)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
return loss
def validation_step(self, batch, batch_idx, dataloader_idx=0):
ema = (
self.val_loader_names[dataloader_idx].endswith("/ema")
and self.optimizers().optimizer.stepped
) # There's a bit of an annoying edge case with the first (0-th) epoch; it has to be excluded due to the initial sanity check
if ema:
self.optimizers().swap_ema()
loss = self._shared_step(
batch, batch_idx, prefix=self.val_loader_names[dataloader_idx]
)
if ema:
self.optimizers().swap_ema()
return loss
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, prefix=self.test_loader_names[dataloader_idx]
)
def configure_optimizers(self):
# Set zero weight decay for some params
if 'optimizer_param_grouping' in self.hparams.train:
add_optimizer_hooks(self.model, **self.hparams.train.optimizer_param_grouping)
# Normal parameters
all_params = list(self.parameters())
params = [p for p in all_params if not hasattr(p, "_optim")]
optimizer = utils.instantiate(registry.optimizer, self.hparams.optimizer, params)
del self.hparams.optimizer._name_
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_params if hasattr(p, "_optim")]
hps = [
# dict(s) for s in set(frozenset(hp.items()) for hp in hps)
dict(s) for s in sorted(list(dict.fromkeys(frozenset(hp.items()) for hp in hps)))
# dict(s) for s in dict.fromkeys(frozenset(hp.items()) for hp in hps)
] # Unique dicts
print("Hyperparameter groups", hps)
for hp in hps:
params = [p for p in all_params if getattr(p, "_optim", None) == hp]
optimizer.add_param_group(
{"params": params, **self.hparams.optimizer, **hp}
)
### Layer Decay ###
if self.hparams.train.layer_decay['_name_'] is not None:
get_num_layer = utils.instantiate(
registry.layer_decay,
self.hparams.train.layer_decay['_name_'],
partial=True,
)
# Go through all parameters and get num layer
layer_wise_groups = {}
num_max_layers = 0
for name, p in self.named_parameters():
# Get layer id for each parameter in the model
layer_id = get_num_layer(name)
# Add to layer wise group
if layer_id not in layer_wise_groups:
layer_wise_groups[layer_id] = {
'params': [],
'lr': None,
'weight_decay': self.hparams.optimizer.weight_decay
}
layer_wise_groups[layer_id]['params'].append(p)
if layer_id > num_max_layers: num_max_layers = layer_id
# Update lr for each layer
for layer_id, group in layer_wise_groups.items():
group['lr'] = self.hparams.optimizer.lr * (self.hparams.train.layer_decay.decay ** (num_max_layers - layer_id))
# Reset the torch optimizer's param groups
optimizer.param_groups = []
for layer_id, group in layer_wise_groups.items():
optimizer.add_param_group(group)
# Print optimizer info for debugging
keys = set([k for hp in hps for k in hp.keys()]) # Special hparams
utils.train.log_optimizer(log, optimizer, keys)
# Configure scheduler
if "scheduler" not in self.hparams:
return optimizer
lr_scheduler = utils.instantiate(
registry.scheduler, self.hparams.scheduler, optimizer
)
scheduler = {
"scheduler": lr_scheduler,
"interval": self.hparams.train.interval, # 'epoch' or 'step'
"monitor": self.hparams.train.monitor,
"name": "trainer/lr", # default is e.g. 'lr-AdamW'
}
# See documentation for how to configure the return
# https://pytorch-lightning.readthedocs.io/en/latest/api/pytorch_lightning.core.lightning.html#pytorch_lightning.core.lightning.LightningModule.configure_optimizers
return [optimizer], [scheduler]
def train_dataloader(self):
return self.dataset.train_dataloader(**self.hparams.loader)
def _eval_dataloaders_names(self, loaders, prefix):
"""Process loaders into a list of names and loaders"""
if utils.is_dict(loaders):
return [
f"{prefix}/{k}" if k is not None else prefix for k in loaders.keys()
], list(loaders.values())
elif utils.is_list(loaders):
return [f"{prefix}/{i}" for i in range(len(loaders))], loaders
else:
return [prefix], [loaders]
def _eval_dataloaders(self):
# Return all val + test loaders
val_loaders = self.dataset.val_dataloader(**self.hparams.loader)
test_loaders = self.dataset.test_dataloader(**self.hparams.loader)
val_loader_names, val_loaders = self._eval_dataloaders_names(val_loaders, "val")
test_loader_names, test_loaders = self._eval_dataloaders_names(
test_loaders, "test"
)
# Duplicate datasets for ema
if self.hparams.train.ema > 0.0:
val_loader_names += [name + "/ema" for name in val_loader_names]
val_loaders = val_loaders + val_loaders
test_loader_names += [name + "/ema" for name in test_loader_names]
test_loaders = test_loaders + test_loaders
# adding option to only have val loader at eval (eg if test is duplicate)
if self.hparams.train.get("remove_test_loader_in_eval", False):
return val_loader_names, val_loaders
# adding option to only have test loader at eval
elif self.hparams.train.get("remove_val_loader_in_eval", False):
return test_loader_names, test_loaders
# default behavior is to add test loaders in eval
else:
return val_loader_names + test_loader_names, val_loaders + test_loaders
def val_dataloader(self):
val_loader_names, val_loaders = self._eval_dataloaders()
self.val_loader_names = val_loader_names
return val_loaders
def test_dataloader(self):
test_loader_names, test_loaders = self._eval_dataloaders()
self.test_loader_names = ["final/" + name for name in test_loader_names]
return test_loaders
### pytorch-lightning utils and entrypoint ###
def create_trainer(config, **kwargs):
callbacks: List[pl.Callback] = []
logger = None
# WandB Logging
if config.get("wandb") is not None:
# Pass in wandb.init(config=) argument to get the nice 'x.y.0.z' hparams logged
# Can pass in config_exclude_keys='wandb' to remove certain groups
import wandb
logger = CustomWandbLogger(
config=utils.to_dict(config, recursive=True),
settings=wandb.Settings(start_method="fork"),
**config.wandb,
)
# Lightning callbacks
if "callbacks" in config:
for _name_, callback in config.callbacks.items():
if config.get("wandb") is None and _name_ in ["learning_rate_monitor"]:
continue
log.info(f"Instantiating callback <{registry.callbacks[_name_]}>")
callback._name_ = _name_
callbacks.append(utils.instantiate(registry.callbacks, callback))
# Add ProgressiveResizing callback
if config.callbacks.get("progressive_resizing", None) is not None:
num_stages = len(config.callbacks.progressive_resizing.stage_params)
print(f"Progressive Resizing: {num_stages} stages")
for i, e in enumerate(config.callbacks.progressive_resizing.stage_params):
# Stage params are resolution and epochs, pretty print
print(f"\tStage {i}: {e['resolution']} @ {e['epochs']} epochs")
# Configure ddp automatically
n_devices = config.trainer.get('devices', 1)
if isinstance(n_devices, Sequence): # trainer.devices could be [1, 3] for example
n_devices = len(n_devices)
if n_devices > 1 and config.trainer.get('strategy', None) is None:
config.trainer.strategy = dict(
_target_='pytorch_lightning.strategies.DDPStrategy',
find_unused_parameters=False,
gradient_as_bucket_view=True, # https://pytorch-lightning.readthedocs.io/en/stable/advanced/advanced_gpu.html#ddp-optimizations
)
# Init lightning trainer
log.info(f"Instantiating trainer <{config.trainer._target_}>")
# special processing for seqlen warmup scheduler (reload)
if config.callbacks.get("seqlen_warmup_reload", None) is not None:
# we need to instantiate manually instead of with hydra, since it expects a dict instead of a hydra config for the accumulate_grad_batches
# so we convert everything to dicts (from hydra configs)
trainer_config_dict = dict(config.trainer)
epochs_cume = 0 # track cumulative epochs
accumulate_grad_schedule = {} # contains the accumulate_grad_batches schedule to init the trainer
for stage in config.callbacks.seqlen_warmup_reload.stage_params:
batch_size = stage['batch_size'] # curr batch size at this stage
grad_accum_factor = config.train.global_batch_size // batch_size # grad accum factor for this stage
accumulate_grad_schedule[epochs_cume] = grad_accum_factor # set the grad accum factor for this stage
epochs_cume += stage['epochs'] # increment epochs_cume for next stage
trainer_config_dict['accumulate_grad_batches'] = accumulate_grad_schedule # set the accumulate_grad_batches schedule
trainer_config_dict.pop('_target_') # only hydra uses this to instantiate
# Set DDPStrategy to work with pl.Trainer
config.trainer.pop('strategy')
trainer_config_dict['strategy'] = DDPStrategy(find_unused_parameters=False, gradient_as_bucket_view=True)
trainer = pl.Trainer(**trainer_config_dict, callbacks=callbacks, logger=logger)
else:
trainer = hydra.utils.instantiate(config.trainer, callbacks=callbacks, logger=logger)
return trainer
def train(config):
if config.train.seed is not None:
pl.seed_everything(config.train.seed, workers=True)
trainer = create_trainer(config)
model = SequenceLightningModule(config)
# Load pretrained_model if specified
if config.train.get("pretrained_model_path", None) is not None:
# PTL style. Note, method returns a new model object, and need to pass config.
model = SequenceLightningModule.load_from_checkpoint(
config.train.pretrained_model_path,
config=config,
strict=config.train.pretrained_model_strict_load,
)
# Run initial validation epoch (useful for debugging, finetuning)
if config.train.validate_at_start:
print("Running validation before training")
trainer.validate(model)
if config.train.ckpt is not None:
trainer.fit(model, ckpt_path=config.train.ckpt)
else:
trainer.fit(model)
if config.train.test:
trainer.test(model)
@hydra.main(config_path="configs", config_name="config.yaml")
def main(config: OmegaConf):
# Process config:
# - register evaluation resolver
# - filter out keys used only for interpolation
# - optional hooks, including disabling python warnings or debug friendly configuration
config = utils.train.process_config(config)
# Pretty print config using Rich library
utils.train.print_config(config, resolve=True)
train(config)
if __name__ == "__main__":
main()
|
hyena-dna-main
|
train.py
|
import torch
import torch.nn.functional as F
from einops import rearrange
from fftconv import fftconv_fwd, fftconv_bwd
def fftconv_ref(u, k, D, dropout_mask):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size) / fft_size
u_f = torch.fft.rfft(u.to(dtype=k.dtype), n=fft_size)
y = torch.fft.irfft(u_f * k_f, n=fft_size, norm='forward')[..., :seqlen]
out = y + u * D.unsqueeze(-1)
return (F.gelu(out) * rearrange(dropout_mask, 'b H -> b H 1')).to(dtype=u.dtype)
def fftconv_fast(u, k, D, dropout_mask):
"""Fuse padding + rfft + pointwise mult + ifft + multiply with D + gelu + dropout
"""
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
out = fftconv_fwd(u, k_f, D, dropout_mask, fft_size)
return out
def fftconv_fast_bwd(dout, u, k, D, dropout_mask=None):
seqlen = u.shape[-1]
fft_size = 2 * seqlen
k_f = torch.fft.rfft(k, n=fft_size)
dx, dk_f, dD = fftconv_bwd(dout, u, k_f, D, dropout_mask, fft_size)
dk = torch.fft.irfft(dk_f, n=fft_size, norm='forward')[..., :seqlen]
return dx, dk, dD
device = 'cuda'
dtype = torch.float32
# dtype = torch.float16
batch_size = 64
H = 256
fft_size = 2048
seqlen = 1024
dropout_prob = 0.37
torch.manual_seed(0)
u = torch.randn(batch_size, H, seqlen, device=device, dtype=dtype, requires_grad=True)
k = torch.randn(H, seqlen, device=device, requires_grad=True)
D = torch.randn(H, device=device, requires_grad=True)
dropout_mask = F.dropout(torch.ones(batch_size, H, device=device), dropout_prob)
out = fftconv_ref(u, k, D, dropout_mask)
out = fftconv_fast(u, k, D, dropout_mask)
g = torch.randn_like(out)
fftconv_fast_bwd(g, u, k, D, dropout_mask)
|
hyena-dna-main
|
csrc/fftconv/launch_fftconv.py
|
# Adapted from https://github.com/NVIDIA/apex/blob/master/setup.py
import torch
from torch.utils.cpp_extension import BuildExtension, CppExtension, CUDAExtension, CUDA_HOME
from setuptools import setup, find_packages
import subprocess
import sys
import warnings
import os
# ninja build does not work unless include_dirs are abs path
this_dir = os.path.dirname(os.path.abspath(__file__))
def get_cuda_bare_metal_version(cuda_dir):
raw_output = subprocess.check_output([cuda_dir + "/bin/nvcc", "-V"], universal_newlines=True)
output = raw_output.split()
release_idx = output.index("release") + 1
release = output[release_idx].split(".")
bare_metal_major = release[0]
bare_metal_minor = release[1][0]
return raw_output, bare_metal_major, bare_metal_minor
def check_cuda_torch_binary_vs_bare_metal(cuda_dir):
raw_output, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(cuda_dir)
torch_binary_major = torch.version.cuda.split(".")[0]
torch_binary_minor = torch.version.cuda.split(".")[1]
print("\nCompiling cuda extensions with")
print(raw_output + "from " + cuda_dir + "/bin\n")
if (bare_metal_major != torch_binary_major) or (bare_metal_minor != torch_binary_minor):
raise RuntimeError(
"Cuda extensions are being compiled with a version of Cuda that does "
"not match the version used to compile Pytorch binaries. "
"Pytorch binaries were compiled with Cuda {}.\n".format(torch.version.cuda)
+ "In some cases, a minor-version mismatch will not cause later errors: "
"https://github.com/NVIDIA/apex/pull/323#discussion_r287021798. "
"You can try commenting out this check (at your own risk)."
)
def raise_if_cuda_home_none(global_option: str) -> None:
if CUDA_HOME is not None:
return
raise RuntimeError(
f"{global_option} was requested, but nvcc was not found. Are you sure your environment has nvcc available? "
"If you're installing within a container from https://hub.docker.com/r/pytorch/pytorch, "
"only images whose names contain 'devel' will provide nvcc."
)
def append_nvcc_threads(nvcc_extra_args):
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) >= 11 and int(bare_metal_minor) >= 2:
return nvcc_extra_args + ["--threads", "4"]
return nvcc_extra_args
if not torch.cuda.is_available():
# https://github.com/NVIDIA/apex/issues/486
# Extension builds after https://github.com/pytorch/pytorch/pull/23408 attempt to query torch.cuda.get_device_capability(),
# which will fail if you are compiling in an environment without visible GPUs (e.g. during an nvidia-docker build command).
print(
"\nWarning: Torch did not find available GPUs on this system.\n",
"If your intention is to cross-compile, this is not an error.\n"
"By default, Apex will cross-compile for Pascal (compute capabilities 6.0, 6.1, 6.2),\n"
"Volta (compute capability 7.0), Turing (compute capability 7.5),\n"
"and, if the CUDA version is >= 11.0, Ampere (compute capability 8.0).\n"
"If you wish to cross-compile for a single specific architecture,\n"
'export TORCH_CUDA_ARCH_LIST="compute capability" before running setup.py.\n',
)
if os.environ.get("TORCH_CUDA_ARCH_LIST", None) is None:
_, bare_metal_major, bare_metal_minor = get_cuda_bare_metal_version(CUDA_HOME)
if int(bare_metal_major) == 11:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0"
if int(bare_metal_minor) > 0:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5;8.0;8.6"
else:
os.environ["TORCH_CUDA_ARCH_LIST"] = "6.0;6.1;6.2;7.0;7.5"
print("\n\ntorch.__version__ = {}\n\n".format(torch.__version__))
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
cmdclass = {}
ext_modules = []
raise_if_cuda_home_none("fftconv")
# Check, if CUDA11 is installed for compute capability 8.0
cc_flag = []
# cc_flag.append("-gencode")
# cc_flag.append("arch=compute_70,code=sm_70")
cc_flag.append("-gencode")
cc_flag.append("arch=compute_80,code=sm_80")
ext_modules.append(
CUDAExtension(
'fftconv', [
'fftconv.cpp',
'fftconv_cuda.cu',
],
extra_compile_args={'cxx': ['-g', '-march=native', '-funroll-loops'],
'nvcc': ['-O3', '--threads', '4', '-lineinfo', '--use_fast_math', '-std=c++17', '-arch=compute_70']
# extra_compile_args={'cxx': ['-O3'],
# 'nvcc': append_nvcc_threads(['-O3', '-lineinfo', '--use_fast_math', '-std=c++17'] + cc_flag)
},
include_dirs=[os.path.join(this_dir, 'mathdx/22.02/include')]
)
)
torch.utils.cpp_extension.COMMON_NVCC_FLAGS.remove('-D__CUDA_NO_HALF2_OPERATORS__')
setup(
name="fftconv",
version="0.1",
description="FFTConv for state-space models",
ext_modules=ext_modules,
cmdclass={"build_ext": BuildExtension} if ext_modules else {},
)
|
hyena-dna-main
|
csrc/fftconv/setup.py
|
import math
import re
import numpy as np
# N = 8192
N = 16384
# The case of 0 / N is special, we want to simplify it to 0 / 2 instead of 0 / 1
numerator = np.arange(1, N // 8 + 1)
gcd = np.gcd(numerator, N)
num = numerator // gcd
denom = N // gcd
lut_vals = ['T_2_0'] + [f'T_{d}_{n}' for n, d in zip(num, denom)]
lut_string = f"static const __device__ float2 lut_mine_sp_8_{N}[{N // 8 + 1}] = {{\n {','.join(lut_vals)}\n}};"
print(lut_string)
# Only define new values if it's not already in the cuFFTDx lookup table
cufftdx_lut_filename = 'mathdx/22.02/include/cufftdx/include/database/lut_defines_0.hpp.inc'
matches = set()
reg = re.compile(f'^#define T_{N}_([0-9]+) ')
with open(cufftdx_lut_filename, 'r') as f:
for line in f:
if (match := reg.match(line)) is not None:
matches.add(int(match[1]))
numerator = np.arange(1, N // 8 + 1, 2)
angle = -2 * math.pi * numerator.astype(np.float64) / N
cos, sin = np.cos(angle), np.sin(angle)
defs = [f'#define T_{N}_{n} {{{c:.40f},{s:.40f}}}' for n, c, s in zip(numerator, cos, sin) if n not in matches]
def_string = '\n'.join(defs)
print(def_string)
|
hyena-dna-main
|
csrc/fftconv/lut_code_gen.py
|
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange, repeat
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
def exists(x):
return x is not None
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def soft_prompting():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--dataset", default='none')
parser.add_argument("--config", default='./configs/evals/soft_prompting_genomics.yaml')
parser.add_argument("--results", default='./results/soft_prompting')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
# chrom_names = [
# 'chr11', 'chr13', 'chr15', 'chr17', 'chr19', 'chr21', 'chr2', 'chr4', 'chr6', 'chr8', 'chr10', 'chr12',
# 'chr14', 'chr16', 'chr18', 'chr20', 'chr22', 'chrX', 'chrY', 'chr1', 'chr3', 'chr5', 'chr7', 'chr9'
# ]
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'dummy_mouse_enhancers_ensembl': {
'max_length': 3200,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
# 'demo_coding_vs_intergenomic_seqs': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token
# },
# 'demo_human_or_worm': {
# 'max_length': 202,
# 'd_output': 2,
# 'characters': characters,
# 'label_to_token': label_to_token,
# },
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
df_results = []
df_i = 0
ds_iter = datasets.items() if args.dataset=='none' else zip([args.dataset], [datasets[args.dataset]])
for dataset, dataset_cfg in ds_iter:
print(f'\nDataset {dataset}...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
for soft_tokens in cfg_tuning['soft_tokens']:
print(f'...and {soft_tokens} soft tokens...')
# print('Pretrained model...')
pretrained_model = load_model(
cfg_model=cfg_model,
ckpt_path=args.ckpt_path,
n_soft_tokens=soft_tokens,
soft_token_pdrop=cfg_tuning['soft_token_pdrop'],
max_length=cfg['max_length'] if shots>0 else None
)
pretrained_model.to(DEVICE)
if soft_tokens>0: # we only tune when using soft tokens!
print('...tuning...')
pretrained_model = tune_model(
pretrained_model, #deepcopy(pretrained_model).to(DEVICE),
loader,
cfg_tuning,
rng=rng
)
print('...evaluating...')
acc = eval_on_loaders(pretrained_model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'model': 'pretrained',
'shots': shots,
'soft_tokens': soft_tokens,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(
args.results,
f'soft_prompting_performance_{dataset}.csv'
)
)
del pretrained_model
def load_model(
cfg_model: tp.Dict,
ckpt_path: str=None,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model, n_soft_tokens=n_soft_tokens, soft_token_pdrop=soft_token_pdrop, max_length=max_length)
class LitModel(pl.LightningModule):
def __init__(self,
model,
n_soft_tokens: int=0,
soft_token_pdrop: float=0.,
max_length: int=None
):
super().__init__()
self.model = model
requires_grad(self.model, False) # we only want to train soft tokens
self.max_length = max_length
d_model = self.model.lm_head.weight.shape[1]
self.n_soft_tokens = n_soft_tokens
soft_tokens = torch.nn.Parameter(torch.zeros(n_soft_tokens, d_model)) if n_soft_tokens>0 else None
if exists(soft_tokens):
torch.nn.init.normal_(soft_tokens, mean=0.0, std=0.02)
self.soft_tokens = soft_tokens
self.soft_tokens_drop = torch.nn.Dropout(soft_token_pdrop) if soft_token_pdrop>0 else torch.nn.Identity()
def forward(self, x: torch.Tensor):
# get embeddings
with torch.no_grad():
hidden_states = self.model.backbone.embeddings(x)
# attach soft tokens
if exists(self.soft_tokens):
hidden_states = torch.cat([
repeat(self.soft_tokens_drop(self.soft_tokens), 'n d -> b n d', b=hidden_states.shape[0]),
hidden_states
], dim=1)
# forward
residual = None
for layer in self.model.backbone.layers:
hidden_states, residual = layer(hidden_states, residual)
dropped = self.model.backbone.drop_f(hidden_states)
residual = (dropped + residual) if residual is not None else dropped
hidden_states = self.model.backbone.ln_f(residual.to(dtype=self.model.backbone.ln_f.weight.dtype))
return self.model.lm_head(hidden_states)
def step(self, batch: tp.Tuple[torch.Tensor], phase: str='train'):
# get ys
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
labels_idx = x.shape[1]-1
if exists(self.max_length):
x = torch.cat([x, y], dim=1)
labels_idx = self.get_labels_idx(x)
y = x[:,labels_idx]
# forward
logits = self(x)
logits = logits[:,self.n_soft_tokens:] # we exclude soft tokens
logits = logits[:,labels_idx-1] # previous token predicts target
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
# compute loss/acc
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def get_labels_idx(self, x):
return np.concatenate([
[self.max_length+1],
np.arange((2*self.max_length)+4, x.shape[1], self.max_length+3)
])
def tune_model(model, loader, cfg_tuning, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
optimizer=optimizer,
mode='min',
factor=0.1,
patience=0
)
best_model = deepcopy(model)
requires_grad(best_model, False)
step = 0
losses, accs, val_losses = [], [], []
for epoch in range(cfg_tuning['max_epochs']):
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
for i, (x,y) in enumerate(loader.train_dataloader()):
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
with torch.cuda.amp.autocast():
out = model.step(batch)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
# eval epoch:
model.eval()
val_loss = []
with torch.no_grad():
for x, y in loader.val_dataloader():
batch = {'x': x, 'y': y}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch)
loss, acc = out['loss'], out['acc']
val_loss.append(loss.cpu().detach().numpy())
val_losses.append(np.mean(val_loss))
if val_losses[-1]==np.min(val_losses): # also covers first epoch
update_ema(best_model, model, decay=0)
scheduler.step(val_losses[-1])
if verbose:
print(f'\tstep {step}; avg. val loss: {val_losses[-1]:1.4f}')
if (epoch > 0 and sum(val_losses[-1] >= val_losses[:-1])>1) or (epoch+1)>=cfg_tuning['max_epochs']:
break
best_model = best_model.to(DEVICE)
requires_grad(best_model, True) # we turn grads back on for completion, even though model will not be trained further...
return best_model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for x,y in tqdm(val_loader):
x = x.to(DEVICE)
with torch.no_grad():
logits = model(x)
logits = logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
soft_prompting()
|
hyena-dna-main
|
evals/soft_prompting_genomics.py
|
#!/usr/bin/env python3
import argparse
import yaml
from tqdm import tqdm
import typing as tp
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import OrderedDict
import torch
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.allow_tf32 = True
import torch.nn.functional as F
import pytorch_lightning as pl
from einops import rearrange
import sys, os
FILEDIR = os.path.realpath(__file__)
sys.path.append(os.path.join(FILEDIR, '..'))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from src.dataloaders.icl_genomics_dataloader import ICLGenomics
from src.dataloaders.genomics import ICLGenomics
# TODO:
# Make use of maximum long context: either put entire downstream dataset in context
# or add many tunable soft tokens (soft prompting)!
# -> just fill the context up one way or another and show whats possible!
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def instruction_tuned_ICL():
parser = argparse.ArgumentParser()
parser.add_argument("--ckpt_path", help="Path to pretrained model checkpoint")
parser.add_argument("--config", default='./configs/evals/instruction_tuned_genomics.yaml')
parser.add_argument("--results", default='./results/instruction_tuned_genomics')
args = parser.parse_args()
os.makedirs(args.results, exist_ok=True)
# load configs
config = yaml.load(open(args.config, 'r'), Loader=yaml.FullLoader)
cfg_model = config['model'].copy()
cfg_dataset = config['dataset'].copy()
cfg_tuning = config['tuning'].copy()
np.random.seed(config['seed'])
torch.manual_seed(config['seed'])
rng = np.random.RandomState(config['seed'])
# dataset_name num_seqs num_classes median_len std
# dummy_mouse_enhancers_ensembl 1210 2 2381 984.4
# demo_coding_vs_intergenomic_seqs 100_000 2 200 0
# demo_human_or_worm 100_000 2 200 0
# human_enhancers_cohn 27791 2 500 0
# human_enhancers_ensembl 154842 2 269 122.6
# human_ensembl_regulatory 289061 3 401 184.3
# human_nontata_promoters 36131 2 251 0
# human_ocr_ensembl 174756 2 315 108.1
nuc_chars = list('ACGTN')
characters = nuc_chars # + chrom_names
label_to_token = {0: 'A', 1: 'N'}
datasets = {
'human_enhancers_cohn': {
'max_length': 502,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_nontata_promoters': {
'max_length': 251, #253
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_enhancers_ensembl': {
'max_length': 320,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
},
'human_ensembl_regulatory': {
'max_length': 600,
'd_output': 3,
'characters': characters,
'label_to_token': {0: 'A', 1: 'G', 2: 'N'},
},
'human_ocr_ensembl': {
'max_length': 420,
'd_output': 2,
'characters': characters,
'label_to_token': label_to_token,
}
}
print('\n\nEvaluating instruction-tuned ICL performance... ')
df_results = []
df_i = 0
for tuning_samples in cfg_tuning['tuning_samples']:
print(f'...when tuning on {tuning_samples} samples...')
for shots in cfg_dataset['shots']:
print(f'...with {shots} shots...')
for dataset, dataset_cfg in datasets.items():
print(f'...from dataset {dataset}...')
print(f'Collecting tuning data...')
cfg = cfg_dataset.copy()
cfg.update(dataset_cfg)
cfg['dataset_name'] = dataset
cfg['shots'] = shots
loader = ICLGenomics(**cfg)
loader.setup()
# collect tuning samples
tuning_X = []
train_loader = iter(loader.train_dataloader())
samples_collected = 0
for x, y in tqdm(train_loader):
n = min(tuning_samples, x.shape[0])
tuning_X.append(torch.cat([x[:n], y[:n]], dim=1))
samples_collected += n
if samples_collected >= tuning_samples:
print(f'...stop becuase {tuning_samples} samples collected.')
break
tuning_X = torch.cat(tuning_X, dim=0)
if shots>0:
tuning_y_idx = np.concatenate([
[cfg['max_length']+1],
np.arange((2*cfg['max_length'])+4, tuning_X.shape[1], cfg['max_length']+3)
])
else:
tuning_y_idx = cfg['max_length']+1
tuning_y = tuning_X[:,tuning_y_idx]
tuning_loss_mask = tuning_y_idx-1 # prediction is always from previous token
print('Tuning pretrained model...')
pretrained_model = load_model(cfg_model, args.ckpt_path)
pretrained_model.to(DEVICE)
tuned_pretrained_model = tune_model(
deepcopy(pretrained_model).to(DEVICE),
tuning_X,
tuning_y,
cfg_tuning,
loss_mask=tuning_loss_mask,
rng=rng
)
# print('Tuning untrained model...')
# scratch_model = load_model(cfg_model)
# scratch_model.to(DEVICE)
# tuned_scratch_model = tune_model(
# scratch_model,
# tuning_X,
# tuning_y,
# cfg_tuning,
# loss_mask=tuning_loss_mask,
# rng=rng
# )
print('Evaluating ICL performance...')
for label, model in zip(
['tuned_pretrained'], #, 'scratchtrained'
[tuned_pretrained_model] # tuned_scratch_model
):
print(f'{label}:')
acc = eval_on_loaders(model, {dataset: loader})[dataset]
df_results.append(
pd.DataFrame({
'dataset': dataset,
'tuning_samples': tuning_samples,
'model': label,
'shots': shots,
'eval_acc': acc
}, index=[df_i])
)
df_i += 1
pd.concat(df_results).to_csv(
os.path.join(args.results, 'instruction_tuned_genomics.csv')
)
def load_model(cfg_model, ckpt_path: str=None):
model = ConvLMHeadModel(**cfg_model)
if ckpt_path is not None:
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(model_state_dict)
return LitModel(model)
class LitModel(pl.LightningModule):
def __init__(self, model):
super().__init__()
self.model = model
def forward(self, x: torch.Tensor):
return self.model(x)[0]
def step(self, batch: tp.Tuple[torch.Tensor], loss_mask: tp.Union[int, np.ndarray]=-1, phase: str='train'):
x, y = batch['x'].to(DEVICE), batch['y'].to(DEVICE)
loss_mask = -1 if loss_mask is None else loss_mask
out = self(x)
logits = out.logits[:,loss_mask]
if logits.ndim>2:
logits = rearrange(logits, 'b n c -> (b n) c')
if y.ndim==2:
y = rearrange(y, 'b n -> (b n)')
loss = F.cross_entropy(logits, y)
preds = logits.argmax(axis=-1)
acc = torch.mean((preds==y).to(torch.float32))
return {'loss': loss, 'acc': acc}
def tune_model(model, X, y, cfg_tuning, max_epochs: int=1, loss_mask=None, verbose: bool=True, rng: np.random.RandomState=None):
rng = np.random.RandomState(0) if rng is None else rng
# # we use expected moving average of model for downstream ICL...
# ema = deepcopy(model).to(DEVICE)
# requires_grad(ema, False)
# update_ema(ema, model, decay=0) # Ensure EMA is initialized with synced weights
# ema.eval()
optimizer = torch.optim.AdamW(
model.parameters(),
weight_decay=float(cfg_tuning['weight_decay']),
lr=float(cfg_tuning['lr'])
)
# split train/eval
n_samples = X.shape[0]
train_idx = np.arange(n_samples)
batch_size = min(len(train_idx), cfg_tuning['batch_size'])
epoch = 0
step = 0
losses, accs = [], []
stop_training = False
while not stop_training:
if verbose:
print(f'Epoch {epoch}...')
# train epoch:
model.train()
rng.shuffle(train_idx)
batch_i, batch_start = 0, 0
while batch_start+batch_size <= len(train_idx):
idx = train_idx[batch_start:batch_start+batch_size]
batch = {'x': X[idx], 'y': y[idx]}
model.on_train_batch_start(batch=batch, batch_idx=step)
out = model.step(batch, loss_mask=loss_mask)
loss, acc = out['loss'], out['acc']
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), cfg_tuning.get('gradient_clip_val', 1.0))
losses.append(loss.cpu().detach().numpy().mean())
accs.append(acc.cpu().detach().numpy())
# accumulate gradients of N batches
if (batch_i + 1) % cfg_tuning['accumulate_grad_batches'] == 0:
optimizer.step()
optimizer.zero_grad()
# update_ema(ema, model, decay=cfg_tuning['ema_decay'])
step += 1
print(f'step: {step}; train loss: {losses[-1]}, acc: {accs[-1]}')
batch_start += batch_size
batch_i += 1
epoch += 1
if epoch>=max_epochs:
stop_training = True
return model #, ema
@torch.no_grad()
def update_ema(ema_model, model, decay=0.999):
ema_params = OrderedDict(ema_model.named_parameters())
model_params = OrderedDict(model.named_parameters())
for name, param in model_params.items():
ema_params[name].mul_(decay).add_(param.data, alpha=1 - decay)
def requires_grad(model, flag=True):
for p in model.parameters():
p.requires_grad = flag
def eval_on_loaders(model, loaders):
results = {}
for name, loader in loaders.items():
print(f'Evaluating on {name} data...')
all_acc = []
val_loader = loader.val_dataloader()
for batch in tqdm(val_loader):
x, y = batch
x = x.to(DEVICE)
with torch.no_grad():
out = model(x)
if type(out) == tuple: out = out[0]
logits = out.logits[:, -1]
logits = logits.cpu().detach().numpy()
batch_preds = logits.argmax(axis=-1)
# batch_preds = np.array(batch_preds)
y = y.cpu().detach().numpy()
batch_preds = batch_preds.flatten()
y = y.flatten()
acc = (batch_preds == y).mean()
all_acc.append(acc)
results[name] = np.mean(all_acc)
print(f"{name}; full eval. accuracy: {results[name]:1.4f}")
return results
if __name__ == "__main__":
instruction_tuned_ICL()
|
hyena-dna-main
|
evals/instruction_tuned_genomics.py
|
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
from src.models.sequence.long_conv_lm import DNAEmbeddingModel
from src.tasks.decoders import SequenceDecoder
from src.dataloaders import SequenceDataset
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.genomic_bench_dataloader import GenomicBenchmark
from src.dataloaders.nucleotide_transformer_dataloader import NucleotideTransformer
try:
from tokenizers import Tokenizer
except:
pass
genomic_benchmark_datasets = ["dummy_mouse_enhancers_ensembl", "demo_coding_vs_intergenomic_seqs", "demo_human_or_worm", "human_enhancers_cohn", "human_enhancers_ensembl", "human_ensembl_regulatory", "human_nontata_promoters", "human_ocr_ensembl"]
nucleotide_datasets = [""]
class HG38Inference:
'''Model (backbone + decoder) inference, initially for enhancer model, but can be modified for other classification tasks as well.
model_cfg, dict: config for entire model, backbone and decoder head
ckpt_path, str: path to config
max_seq_len, int: max seq len of model (technically in the model_cfg already, but more explicit)
'''
def __init__(self, cfg, ckpt_path, max_seq_len, use_dataloader=False):
self.max_seq_len = max_seq_len
self.backbone, self.decoder, self.tokenizer = self.load_model(cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.backbone = self.backbone.to(self.device)
self.decoder = self.decoder.to(self.device)
# load dataloader if given
if use_dataloader:
self.loader = self.get_dataloader(cfg)
def get_dataloader(self, config):
cfg = yaml.load(open(config, 'r'), Loader=yaml.FullLoader)
dataset_name = cfg['dataset']["dataset_name"]
if dataset_name in genomic_benchmark_datasets:
loader = GenomicBenchmark(**cfg['dataset'])
else:
# assume the rest are in the nucleotide trans datasets
loader = NucleotideTransformer(**cfg['dataset'])
loader.setup()
return loader
def predict_on_list(self, seqs):
"""
makes predictions just given a list of string sequences, handles all the tokenizers, and tensor conversion
"""
preds = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
seq = self.tokenizer.encode(seq).ids
else:
seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
embeddings, _ = self.backbone(torch.tensor([seq]).to(device=self.device))
pred = self.decoder(embeddings)
preds.append(pred)
# we provide the predictions (you can pass back embeddings if you wish)
return preds
def predict_from_loader(self):
"""
Don't forget this returns a list of the labels too with the predictions
"""
all_preds = []
all_labels = []
# by default we'll use the test dataloader, but you can grab val_dataloader or train_dataloader too
for i, batch in enumerate(self.loader.test_dataloader()):
print('batch {}'.format(i))
x, y = batch
x = x.to(self.device)
# y = y.to(self.device)
# save the labels y
all_labels.append(y.cpu().detach().numpy())
embeddings, _ = self.backbone(x)
pred_batch = self.decoder(embeddings)
# take argmax of the predictions
pred_batch = torch.argmax(pred_batch, dim=1)
all_preds.append(pred_batch.cpu().detach().numpy())
# convert list to tensor
all_preds = np.concatenate(all_preds, axis=0)
all_labels = np.concatenate(all_labels, axis=0)
return all_preds, all_labels
def load_model(self, cfg, ckpt_path):
# get the configs
cfg = yaml.load(open(cfg, 'r'), Loader=yaml.FullLoader)
train_cfg = cfg['train'] # grab section `train` section of config
model_cfg = cfg['model'] # grab the `model` section of config
self.d_output = train_cfg['d_output'] # number of classes the head was trained on
# the state dict has both the backbone model and the decoder (normally as a Lightning module), but we need to instantiate both separately
# when not using Lightning.
# instantiate the model
backbone = DNAEmbeddingModel(**model_cfg) # instantiate the backbone separately from the decoder
# instantiate the decoder
decoder = SequenceDecoder(model_cfg['d_model'], d_output=self.d_output, l_output=0, mode='pool') # needs to know the d_model
state_dict = torch.load(ckpt_path, map_location='cpu') # has both backbone and decoder
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
# the state_dict keys slightly mismatch from Lightning..., so we fix it here
decoder_state_dict = {}
decoder_state_dict['output_transform.weight'] = model_state_dict.pop('decoder.0.output_transform.weight')
decoder_state_dict['output_transform.bias'] = model_state_dict.pop('decoder.0.output_transform.bias')
# now actually load the state dict to the decoder and backbone separately
decoder.load_state_dict(decoder_state_dict, strict=True)
backbone.load_state_dict(model_state_dict, strict=True)
# setup tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
return backbone, decoder, tokenizer
if __name__ == "__main__":
"""
Example cmd for loading a pretrained model (that was finedtuned). This checkpoint was trained on the 'human_nontata_promoters path' dataset.
# (from safari-internal-inf root, note the -m and no '.py')
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-14/04-32-17-578382/checkpoints/val/accuracy.ckpt
# enhancer (genomic benchmark)
python -m evals.hg38_inference_decoder --config /home/workspace/eric/safari-internal/configs/evals/hg38_decoder.yaml \
--ckpt_path /home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt --output_path /home/workspace/eric/safari-internal/outputs
# config is located here:
configs/evals/hg38_decoder.yaml
# download the checkpoints from google drive, and put it in the outputs/ dir
https://drive.google.com/drive/folders/11cDmLZgBHr3KkiCtS2V6sqI3Kf8lTW39?usp=share_link
# enhancer weights, from nucleotide transformer, binary classification
/home/workspace/eric/safari-internal/outputs/2023-04-12/23-40-51-542457/checkpoints/val/mcc.ckpt
https://drive.google.com/drive/folders/1wIijtwlqWwzNe_0d3meAXSk7oYJ2POMC?usp=share_link
# promoter tata weights
/home/workspace/eric/safari-internal/outputs/2023-05-01/04-13-05-495708/checkpoints/val/f1_macro.ckpt
note, this model is larger, 2 layers, d_model=256 (not 128!!), and d_inner=1024
https://drive.google.com/drive/folders/1tbIUYwScEox4SLFqeZIFp7Z4YvmIN0M3?usp=share_link
# In general, you need to make sure there config has the same model settings as it was trained on.
"""
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default=f"",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
parser.add_argument(
"--output_path",
default=f"",
help="Path to where to save npy file"
)
args = parser.parse_args()
task = HG38Inference(args.config, args.ckpt_path, max_seq_len=1024, use_dataloader=True)
# sample sequence, can pass a list of seqs (themselves a list of chars)
# seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
# if you just have a list of sequences, as strings, you can use this function, returns list
# preds = task.predict_on_list(seqs) # return a list of predictions
# print(preds[0].shape) # shape is [batch, 2] for binary class prediction
# OR...
# or if you rather use the existing dataloader for the enhancer dataset, you can call this instead
# returns a np array
preds, labels = task.predict_from_loader()
# print(preds.shape) # shape is [batch, 2] for binary class prediction
# calculate accuracy of preds vs labels
acc = np.mean(preds.squeeze() == labels.squeeze())
print("Acc: ", acc)
breakpoint()
pred_path = os.path.join(args.output_path, "preds.npy")
label_path = os.path.join(args.output_path, "labels.npy")
# save as numpy arr
preds_np = np.array(preds)
labels_np = np.array(labels)
with open(pred_path, 'wb') as f:
np.save(f, preds_np)
with open(label_path, 'wb') as f:
np.save(f, labels_np)
|
hyena-dna-main
|
evals/hg38_inference_decoder.py
|
import torch
import argparse
import os
import sys
import yaml
from tqdm import tqdm
import json
sys.path.append(os.environ.get("SAFARI_PATH", "."))
from src.models.sequence.long_conv_lm import ConvLMHeadModel
# from transformers import AutoTokenizer, GPT2LMHeadModel
# from spacy.lang.en.stop_words import STOP_WORDS
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
try:
from tokenizers import Tokenizer
except:
pass
# https://github.com/openai/gpt-2/issues/131#issuecomment-492786058
# def preprocess(text):
# text = text.replace("“", '"')
# text = text.replace("”", '"')
# return '\n'+text.strip()
class HG38Encoder:
"Encoder inference for HG38 sequences"
def __init__(self, model_cfg, ckpt_path, max_seq_len):
self.max_seq_len = max_seq_len
self.model, self.tokenizer = self.load_model(model_cfg, ckpt_path)
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = self.model.to(self.device)
def encode(self, seqs):
results = []
# sample code to loop thru each sample and tokenize first (char level)
for seq in tqdm(seqs):
if isinstance(self.tokenizer, Tokenizer):
tokenized_seq = self.tokenizer.encode(seq).ids
else:
tokenized_seq = self.tokenizer.encode(seq)
# can accept a batch, shape [B, seq_len, hidden_dim]
logits, __ = self.model(torch.tensor([tokenized_seq]).to(device=self.device))
# Using head, so just have logits
results.append(logits)
return results
def load_model(self, model_cfg, ckpt_path):
config = yaml.load(open(model_cfg, 'r'), Loader=yaml.FullLoader)
model = ConvLMHeadModel(**config['model_config'])
state_dict = torch.load(ckpt_path, map_location='cpu')
# loads model from ddp by removing prexix to single if necessary
torch.nn.modules.utils.consume_prefix_in_state_dict_if_present(
state_dict["state_dict"], "model."
)
model_state_dict = state_dict["state_dict"]
# need to remove torchmetrics. to remove keys, need to convert to list first
for key in list(model_state_dict.keys()):
if "torchmetrics" in key:
model_state_dict.pop(key)
model.load_state_dict(state_dict["state_dict"])
# setup tokenizer
if config['tokenizer_name'] == 'char':
print("**Using Char-level tokenizer**")
# add to vocab
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_seq_len + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
print(tokenizer._vocab_str_to_int)
else:
raise NotImplementedError("You need to provide a custom tokenizer!")
return model, tokenizer
if __name__ == "__main__":
SAFARI_PATH = os.getenv('SAFARI_PATH', '.')
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_cfg",
default=f"{SAFARI_PATH}/configs/evals/hyena_small_150b.yaml",
)
parser.add_argument(
"--ckpt_path",
default=f"",
help="Path to model state dict checkpoint"
)
args = parser.parse_args()
task = HG38Encoder(args.model_cfg, args.ckpt_path, max_seq_len=1024)
# sample sequence, can pass a list of seqs (themselves a list of chars)
seqs = ["ACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGTACGT"]
logits = task.encode(seqs)
print(logits)
print(logits[0].logits.shape)
breakpoint()
|
hyena-dna-main
|
evals/hg38_inference.py
|
import math
import torch
import torch.nn.functional as F
from sklearn.metrics import f1_score, roc_auc_score
from functools import partial
import torchmetrics.functional as tm_f
import torch.distributions as dist
from sklearn.metrics import f1_score, roc_auc_score, matthews_corrcoef
from torchmetrics import Metric
from torchmetrics.classification import MulticlassRecall, MulticlassPrecision
class CorrectAggregatedMetric(Metric):
"""This is needed to calculate some metrics b/c small batch sizes cause aggregation via a simple
average to be off, as some classes might not be present in batch but will get penalized with a 0."""
def __init__(self, class_idx: int, dist_sync_on_step=False):
# call `self.add_state`for every internal state that is needed for the metrics computations
# dist_reduce_fx indicates the function that should be used to reduce
# state from multiple processes
super().__init__(dist_sync_on_step=dist_sync_on_step)
self.class_idx = torch.tensor(class_idx)
self.add_state("numerator", default=torch.tensor(0.0), dist_reduce_fx="sum")
self.add_state("denominator", default=torch.tensor(0.0), dist_reduce_fx="sum")
def _update(self, numerator, denominator, preds, y) -> tuple:
raise NotImplemented
def update(self, logits: torch.Tensor, y: torch.Tensor):
# update metric states
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
assert preds.shape == y.shape, f"preds shape {preds.shape} != y shape {y.shape}"
self.numerator, self.denominator = self._update(self.numerator, self.denominator, preds, y)
def compute(self):
# compute final result
value = self.numerator.float() / self.denominator if self.denominator > 0 else torch.tensor(0.0)
return value
def reset(self):
self.numerator = torch.tensor(0.0)
self.denominator = torch.tensor(0.0)
class AccuracyPerClass(CorrectAggregatedMetric):
"""Calculate per class accuracy, i.e. P(y_hat = class_idx AND y = class_idx OR y_hat != class_idx AND y != class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == class_idx).sum()
denominator += relevant_idxs.sum()
relevant_idxs = (y != class_idx)
numerator += (preds[relevant_idxs] != class_idx).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class PrecisionPerClass(CorrectAggregatedMetric):
"""Calculate per class precision, i.e. P(y_hat = y | y_hat = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (preds == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
class RecallPerClass(CorrectAggregatedMetric):
"""Calculate per class recall, i.e. P(y_hat = y | y = class_idx)
"""
def _update(self, numerator, denominator, preds, y) -> tuple:
# Filter down to the class of interest
class_idx = self.class_idx
relevant_idxs = (y == class_idx)
numerator += (preds[relevant_idxs] == y[relevant_idxs]).sum()
denominator += relevant_idxs.sum()
return numerator, denominator
def mcc(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return matthews_corrcoef(y.cpu().numpy(), y_hat.cpu().numpy())
def last_k_ppl(logits, y, seq_len=1024, k=None):
'''
Calculate perplexity for last k tokens in a sequence.
logits: (batch_size * seq_len, vocab_size), note, already flattened
y: (batch_size * seq_len), note, already flattened
seq_len: int, length of each sequence in the batch
k: if None, use all tokens in sequence
returns: (batch_size,) ppl for each sequence in the batch
'''
if k is None:
k = 0 # use the entire sequence
# need to reshape logits and y to be (batch_size, seq_len, vocab_size) and (batch_size, seq_len)
# respectively
# breakpoint()
logits = logits.view(-1, seq_len, logits.shape[-1])
y = y.view(-1, seq_len)
# only use the last k values of seq dim in logits and y
logits = logits[:, -k:, :]
y = y[:, -k:]
# reshape to flatten the batch and seq_len dimensions
logits = logits.reshape(-1, logits.shape[-1])
y = y.reshape(-1)
# get avg and put on cpu
return F.cross_entropy(logits, y, reduction='none').view(y.shape[0], -1).mean().exp().cpu()
def _student_t_map(mu, sigma, nu):
sigma = F.softplus(sigma)
nu = 2.0 + F.softplus(nu)
return mu.squeeze(axis=-1), sigma.squeeze(axis=-1), nu.squeeze(axis=-1)
def student_t_loss(outs, y):
mu, sigma, nu = outs[..., 0], outs[..., 1], outs[..., 2]
mu, sigma, nu = _student_t_map(mu, sigma, nu)
y = y.squeeze(axis=-1)
nup1_half = (nu + 1.0) / 2.0
part1 = 1.0 / nu * torch.square((y - mu) / sigma)
Z = (
torch.lgamma(nup1_half)
- torch.lgamma(nu / 2.0)
- 0.5 * torch.log(math.pi * nu)
- torch.log(sigma)
)
ll = Z - nup1_half * torch.log1p(part1)
return -ll.mean()
def gaussian_ll_loss(outs, y):
mu, sigma = outs[..., 0], outs[..., 1]
y = y.squeeze(axis=-1)
sigma = F.softplus(sigma)
ll = -1.0 * (
torch.log(sigma)
+ 0.5 * math.log(2 * math.pi)
+ 0.5 * torch.square((y - mu) / sigma)
)
return -ll.mean()
def binary_cross_entropy(logits, y):
# BCE loss requires squeezing last dimension of logits so it has the same shape as y
# requires y to be float, since it's overloaded to represent a probability
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
def binary_accuracy(logits, y):
return torch.eq(logits.squeeze(-1) >= 0, y).float().mean()
def padded_cross_entropy(logits, y, pad_mask, pad_value=-1):
"""Will ignore the pad value in label (eg, -1)
logits: (batch_size, seq_len, vocab_size)
y: (batch_size, seq_len)
pad_mask: (batch_size, seq_len)
"""
# need to apply pad mask to y
y_pad = y + pad_mask * pad_value
logits = logits.view(-1, logits.shape[-1])
y_pad = y_pad.view(-1)
return F.cross_entropy(logits, y_pad, ignore_index=pad_value)
def cross_entropy(logits, y, ignore_index=-100):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return F.cross_entropy(logits, y, ignore_index=ignore_index)
def soft_cross_entropy(logits, y, label_smoothing=0.0):
logits = logits.view(-1, logits.shape[-1])
# target is now 2d (no target flattening)
return F.cross_entropy(logits, y, label_smoothing=label_smoothing)
def accuracy(logits, y):
logits = logits.view(-1, logits.shape[-1])
preds = torch.argmax(logits, dim=-1)
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.eq(preds, y).float().mean()
def accuracy_ignore_index(logits, y, ignore_index=-100):
num_classes = logits.shape[-1]
preds = torch.argmax(logits, dim=-1)
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
accuracy = tm_f.classification.accuracy(preds, y, 'multiclass', num_classes=num_classes, ignore_index=ignore_index, average='micro')
return accuracy
def accuracy_at_k(logits, y, k=1):
logits = logits.view(-1, logits.shape[-1])
if y.numel() > logits.shape[0]:
# Mixup leads to this case: use argmax class
y = y.argmax(dim=-1)
y = y.view(-1)
return torch.topk(logits, k, dim=-1)[1].eq(y.unsqueeze(-1)).any(dim=-1).float().mean()
def f1_binary(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="binary")
def f1_macro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="macro")
def f1_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
y_hat = torch.argmax(logits, dim=-1)
return f1_score(y.cpu().numpy(), y_hat.cpu().numpy(), average="micro")
def roc_auc_macro(logits, y):
logits = logits.view(
-1, logits.shape[-1]
).detach() # KS: had to add detach to eval while training
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="macro"
)
def roc_auc_micro(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return roc_auc_score(
y.cpu().numpy(), F.softmax(logits, dim=-1).cpu().numpy()[:, 1], average="micro"
)
def mse(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
# TODO document the use case of this
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
def forecast_rmse(outs, y, len_batch=None):
# TODO: generalize, currently for Monash dataset
return torch.sqrt(F.mse_loss(outs, y, reduction='none').mean(1)).mean()
def mae(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.l1_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.l1_loss(outs_masked, y_masked)
# Metrics that can depend on the loss
def loss(x, y, loss_fn):
""" This metric may be useful because the training loss may add extra regularization (e.g. weight decay implemented as L2 penalty), while adding this as a metric skips the additional losses """
return loss_fn(x, y)
def bpb(x, y, loss_fn):
""" bits per byte (image density estimation, speech generation, char LM) """
return loss_fn(x, y) / math.log(2)
def ppl(x, y, loss_fn):
return torch.exp(loss_fn(x, y))
# should have a better way to do this
output_metric_fns = {
"binary_cross_entropy": binary_cross_entropy,
"cross_entropy": cross_entropy,
"padded_cross_entropy": padded_cross_entropy,
"binary_accuracy": binary_accuracy,
"precision": MulticlassPrecision,
"precision_per_class": PrecisionPerClass,
"recall": MulticlassRecall,
"recall_per_class": RecallPerClass,
"accuracy": accuracy,
"accuracy_per_class": AccuracyPerClass,
"accuracy_ignore_index": accuracy_ignore_index,
'accuracy@3': partial(accuracy_at_k, k=3),
'accuracy@5': partial(accuracy_at_k, k=5),
'accuracy@10': partial(accuracy_at_k, k=10),
"eval_loss": loss,
"mcc": mcc,
"mse": mse,
"mae": mae,
"forecast_rmse": forecast_rmse,
"f1_binary": f1_binary,
"f1_macro": f1_macro,
"f1_micro": f1_micro,
"roc_auc_macro": roc_auc_macro,
"roc_auc_micro": roc_auc_micro,
"soft_cross_entropy": soft_cross_entropy, # only for pytorch 1.10+
"student_t": student_t_loss,
"gaussian_ll": gaussian_ll_loss,
}
loss_metric_fns = {
"loss": loss,
"bpb": bpb,
"ppl": ppl,
}
metric_fns = {**output_metric_fns, **loss_metric_fns} # TODO py3.9
|
hyena-dna-main
|
src/tasks/metrics.py
|
# Inspired by https://github.com/NVIDIA/NeMo/blob/main/nemo/collections/common/metrics/perplexity.py
# But we compute the perplexity correctly: exp(average(nll)), not average(exp(nll))
# Also adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/text/perplexity.py
# But we pass in the loss to avoid recomputation
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import Tensor
from torchmetrics import Metric
try:
from flash_attn.losses.cross_entropy import CrossEntropyLoss
except ImportError:
CrossEntropyLoss = torch.nn.CrossEntropyLoss
try:
from apex.transformer import parallel_state
except ImportError:
parallel_state = None
class Perplexity(Metric):
r"""
Perplexity measures how well a language model predicts a text sample. It's calculated as the average number of bits
per word a model needs to represent the sample.
Args:
kwargs:
Additional keyword arguments, see :ref:`Metric kwargs` for more info.
Examples:
>>> import torch
>>> preds = torch.rand(2, 8, 5, generator=torch.manual_seed(22))
>>> target = torch.randint(5, (2, 8), generator=torch.manual_seed(22))
>>> target[0, 6:] = -100
>>> metric = Perplexity(ignore_index=-100)
>>> metric(preds, target)
tensor(5.2545)
"""
is_differentiable = True
higher_is_better = False
full_state_update = False
total_log_probs: Tensor
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("total_log_probs", default=torch.tensor(0.0, dtype=torch.float64),
dist_reduce_fx="sum")
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum")
self.loss_fn = CrossEntropyLoss()
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
"""Compute and store intermediate statistics for Perplexity.
Args:
preds:
Probabilities assigned to each token in a sequence with shape [batch_size, seq_len, vocab_size].
target:
Ground truth values with a shape [batch_size, seq_len].
"""
count = target.numel()
if loss is None:
loss = self.loss_fn(preds, target)
self.total_log_probs += loss.double() * count
self.count += count
def compute(self) -> Tensor:
"""Compute the Perplexity.
Returns:
Perplexity
"""
return torch.exp(self.total_log_probs / self.count)
class NumTokens(Metric):
"""Keep track of how many tokens we've seen.
"""
# TODO: how do we prevent the reset between the epochs? The reset happens on the 1st batch
# of the next epoch.
# Right now the hack is that we override reset(), which would mess up the forward method.
# We then override forward to do the right thing.
is_differentiable = False
higher_is_better = False
full_state_update = False
count: Tensor
def __init__(self, **kwargs: Dict[str, Any]):
super().__init__(**kwargs)
self.add_state("count", default=torch.tensor(0, dtype=torch.int64), dist_reduce_fx="sum",
persistent=True) # We want the count to be saved to state-dict
if parallel_state is not None and not parallel_state.is_unitialized():
self.tensor_parallel_world_size = parallel_state.get_tensor_model_parallel_world_size()
else:
self.tensor_parallel_world_size = 1
def update(self, preds: Tensor, target: Tensor, loss: Optional[Tensor] = None) -> None: # type: ignore
self.count += target.numel() // self.tensor_parallel_world_size
def compute(self) -> Tensor:
return self.count
def reset(self):
count = self.count
super().reset()
self.count = count
# Adapted from https://github.com/Lightning-AI/metrics/blob/master/src/torchmetrics/metric.py
def _forward_reduce_state_update(self, *args: Any, **kwargs: Any) -> Any:
"""forward computation using single call to `update` to calculate the metric value on the current batch and
accumulate global state.
This can be done when the global metric state is a sinple reduction of batch states.
"""
self.update(*args, **kwargs)
return self.compute()
torchmetric_fns = {
"perplexity": Perplexity,
"num_tokens": NumTokens,
}
|
hyena-dna-main
|
src/tasks/torchmetrics.py
|
from typing import Optional, List, Tuple
import math
import functools
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from omegaconf import ListConfig
from src.models.nn.components import ReversibleInstanceNorm1dInput, ReversibleInstanceNorm1dOutput, \
TSNormalization, TSInverseNormalization
from src.models.nn.adaptive_softmax import AdaptiveEmbedding, ProjectedAdaptiveLogSoftmax
import src.tasks.metrics as M
from src.tasks.torchmetrics import torchmetric_fns as tm_mine
import src.models.nn.utils as U
import torchmetrics as tm
from src.utils.config import to_list, instantiate
from torchmetrics import MetricCollection
class BaseTask:
""" Abstract class that takes care of:
- loss function
- arbitrary metrics
- forward pass
- (optional) encoder module that interfaces with dataset (inputs) and model
- (optional) decoder module that interfaces with dataset (targets) and model
"""
encoder = None
decoder = None
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None):
""" This class is allowed to grab attributes directly off a constructed dataset and model object """
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
def _init_torchmetrics(self):
"""
Instantiate torchmetrics.
"""
tracked_torchmetrics = {}
for name in self.torchmetric_names:
if name in tm_mine:
tracked_torchmetrics[name] = tm_mine[name]().to('cuda')
elif name in ['AUROC', 'StatScores', 'Precision', 'Recall', 'F1', 'F1Score']:
tracked_torchmetrics[name] = getattr(tm, name)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False).to('cuda')
elif '@' in name:
k = int(name.split('@')[1])
mname = name.split('@')[0]
tracked_torchmetrics[name] = getattr(tm, mname)(average='macro', num_classes=self.dataset.d_output, compute_on_step=False, top_k=k).to('cuda')
else:
tracked_torchmetrics[name] = getattr(tm, name)(compute_on_step=False).to('cuda')
return tracked_torchmetrics
def _reset_torchmetrics(self, prefix=None):
"""
Reset torchmetrics for a prefix
associated with a particular dataloader (e.g. train, val, test).
Generally do this at the start of an epoch.
"""
all_prefixes = [prefix] if prefix is not None else self._tracked_torchmetrics
for prefix in all_prefixes:
if prefix in self._tracked_torchmetrics:
self._tracked_torchmetrics[prefix].reset()
def get_torchmetrics(self, prefix):
"""
Compute torchmetrics for a prefix associated with
a particular dataloader (e.g. train, val, test).
Generally do this at the end of an epoch.
"""
return {name: self._tracked_torchmetrics[prefix][name].compute() for name in self.torchmetric_names}
def torchmetrics(self, x, y, prefix, loss=None):
"""
Update torchmetrics with new x, y .
Prefix corresponds to a particular dataloader (e.g. train, val, test).
Generally call this every batch.
"""
if prefix not in self._tracked_torchmetrics:
self._init_torchmetrics(prefix)
self._tracked_torchmetrics[prefix](x, y, loss=loss)
# for name in self.torchmetric_names:
# if name.startswith('Accuracy'):
# if len(x.shape) > 2:
# # Multi-dimensional, multi-class
# self._tracked_torchmetrics[prefix][name].update(x.transpose(1, 2), y.squeeze())
# continue
# self._tracked_torchmetrics[prefix][name].update(x, y)
def get_torchmetrics(self, prefix):
return self._tracked_torchmetrics[prefix]
def metrics(self, x, y, **kwargs):
"""
Metrics are just functions
output metrics are a function of output and target
loss metrics are a function of loss (e.g. perplexity)
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
return x, y, w
class Scalar(nn.Module):
def __init__(self, c=1):
super().__init__()
self.c = c
def forward(self, x):
return x * self.c
class LMTask(BaseTask):
def forward(self, batch, encoder, model, decoder, _state):
"""Passes a batch through the encoder, backbone, and decoder"""
# z holds arguments such as sequence length
x, y, *z = batch # z holds extra dataloader info such as resolution
if len(z) == 0:
z = {}
else:
assert len(z) == 1 and isinstance(z[0], dict), "Dataloader must return dictionary of extra arguments"
z = z[0]
x, w = encoder(x, **z) # w can model-specific constructions such as key_padding_mask for transformers or state for RNNs
x, state = model(x, **w, state=_state)
self._state = state
x, w = decoder(x, state=state, **z)
x = x.logits
x = rearrange(x, '... C -> (...) C')
y = rearrange(y, '... -> (...)')
return x, y, w
class MultiClass(BaseTask):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.continual_metrics = {}
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = M.output_metric_fns[name](spec_idx)
def metrics(self, x, y, **kwargs):
output_metrics = {}
for name in self.metric_names:
if name in M.output_metric_fns:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].to(x.device)
self.continual_metrics[name + '_' + spec].update(x, y)
output_metrics[name + '_' + spec] = self.continual_metrics[name + '_' + spec].compute()
elif name in ['precision', 'recall']:
self.continual_metrics[name] = self.continual_metrics[name].to(x.device)
output_metrics[name] = self.continual_metrics[name](x, y)
else:
output_metrics[name] = U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
return {**output_metrics, **loss_metrics}
def _reset_torchmetrics(self, prefix=None):
super()._reset_torchmetrics(prefix)
for name in self.metric_names:
if name.endswith('_per_class'):
for spec_idx, spec in enumerate(self.dataset.species):
self.continual_metrics[name + '_' + spec].reset()
class HG38Task(LMTask):
def __init__(self, dataset=None, model=None, loss=None, loss_val=None, metrics=None, torchmetrics=None, last_k_ppl=None, per_token_ppl=None):
""" Extending LMTask to add custom metrics for HG38 task
last_k_ppl: config for custom ppl, with hparams to pass with it
per_token_ppl: config for per token ppl calc, with list of k (ppls) to track
"""
self.dataset = dataset
self.model = model
if metrics is None: metrics = []
self.metric_names = to_list(metrics)
self.last_k_ppl = last_k_ppl
self.per_token_ppl = per_token_ppl
if torchmetrics is None: torchmetrics = []
self.torchmetric_names = to_list(torchmetrics)
self._tracked_torchmetrics = {}
# The decoder might pass through arguments that the loss needs (e.g. sequence lengths)
# but might also pass through extraneous arguments (e.g. sampling rate)
# Wrap loss and metrics so that they accept kwargs and
# Create loss function
self.loss = instantiate(M.output_metric_fns, loss, partial=True)
self.loss = U.discard_kwargs(self.loss)
if loss_val is not None:
self.loss_val = instantiate(M.output_metric_fns, loss_val, partial=True)
self.loss_val = U.discard_kwargs(self.loss_val)
torchmetrics = MetricCollection(self._init_torchmetrics())
self.train_torchmetrics = torchmetrics.clone(prefix='train/')
self.val_torchmetrics = torchmetrics.clone(prefix='val/')
self.test_torchmetrics = torchmetrics.clone(prefix='test/')
# Create custom metrics for last k ppl
# last_k_ppl is a list of dicts (configs), so loop thru them
if self.last_k_ppl is not None:
self.custom_ppl_dict = {}
for k in self.last_k_ppl:
key_name = "last_" + str(k) + "_ppl"
# create config
custom_ppl_config = {"_name_": "last_k_ppl", "k": k, "seq_len": self.dataset.max_length}
k_ppl_fn = instantiate(M.output_metric_fns, custom_ppl_config, partial=True)
k_ppl_fn = U.discard_kwargs(k_ppl_fn)
self.custom_ppl_dict[key_name] = k_ppl_fn
# Create custom metric for per token ppl
if self.per_token_ppl is not None:
per_token_ppl_config = {"_name_": "per_token_ppl", "ks": self.per_token_ppl["ks"], "seq_len": self.dataset.max_length}
per_token_fn = instantiate(M.output_metric_fns, per_token_ppl_config, partial=True)
per_token_fn = U.discard_kwargs(per_token_fn)
self.per_token_fn = per_token_fn
def metrics(self, x, y, **kwargs):
"""
Need to modify metrics to include custom metrics
"""
output_metrics = {
name: U.discard_kwargs(M.output_metric_fns[name])(x, y, **kwargs)
for name in self.metric_names if name in M.output_metric_fns
}
loss_metrics = {
name: U.discard_kwargs(M.loss_metric_fns[name])(x, y, self.loss, **kwargs)
for name in self.metric_names if name in M.loss_metric_fns
}
# loop thru all custom ppls and add them to output_metrics
if self.last_k_ppl is not None:
for key_name, k_ppl_fn in self.custom_ppl_dict.items():
output_metrics[key_name] = k_ppl_fn(x, y, **kwargs)
# loop thru all custom ppls and add them to output_metrics
if self.per_token_ppl is not None:
# returns k ppl values, (averaged over batch)
per_k_ppl = self.per_token_fn(x, y, **kwargs)
# loop over ks to log metric
for ind, k in enumerate(self.per_token_ppl["ks"]):
key_name = "ppl_at_{}".format(k)
k = k-1 # 0 index in the background
output_metrics[key_name] = per_k_ppl[ind] # should be in order
return {**output_metrics, **loss_metrics}
class AdaptiveLMTask(BaseTask):
def __init__(
self,
div_val,
cutoffs : List[int],
tie_weights : bool,
tie_projs : List[bool],
init_scale=1.0,
bias_scale=0.0,
dropemb=0.0,
dropsoft=0.0,
**kwargs,
):
super().__init__(**kwargs)
n_tokens = self.dataset.n_tokens
d_model = self.model.d_model
d_output = self.model.d_output
encoder = AdaptiveEmbedding(
n_tokens,
d_model,
d_model,
cutoffs=cutoffs,
div_val=div_val,
init_scale=init_scale,
dropout=dropemb,
)
if tie_weights:
assert d_model == d_output
emb_layers = [i.weight for i in encoder.emb_layers]
else:
emb_layers = None
# Construct decoder/loss
emb_projs = encoder.emb_projs
loss = ProjectedAdaptiveLogSoftmax(
n_tokens, d_output, d_output,
cutoffs, div_val=div_val,
tie_projs=tie_projs,
out_projs=emb_projs,
out_layers_weights=emb_layers,
bias_scale=bias_scale,
dropout=dropsoft,
)
self.encoder = encoder
self.loss = loss
registry = {
'base': BaseTask,
'multiclass': MultiClass,
'lm': LMTask,
'hg38': HG38Task,
}
|
hyena-dna-main
|
src/tasks/tasks.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange, reduce
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
import src.utils.train
log = src.utils.train.get_logger(__name__)
class Decoder(nn.Module):
"""This class doesn't do much but just signals the interface that Decoders are expected to adhere to
TODO: is there a way to enforce the signature of the forward method?
"""
def forward(self, x, **kwargs):
"""
x: (batch, length, dim) input tensor
state: additional state from the model backbone
*args, **kwargs: additional info from the dataset
Returns:
y: output tensor
*args: other arguments to pass into the loss function
"""
return x
def step(self, x):
"""
x: (batch, dim)
"""
return self.forward(x.unsqueeze(1)).squeeze(1)
class SequenceDecoder(Decoder):
def __init__(
self, d_model, d_output=None, l_output=None, use_lengths=False, mode="last"
):
super().__init__()
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
if l_output is None:
self.l_output = None
self.squeeze = False
elif l_output == 0:
# Equivalent to getting an output of length 1 and then squeezing
self.l_output = 1
self.squeeze = True
else:
assert l_output > 0
self.l_output = l_output
self.squeeze = False
self.use_lengths = use_lengths
self.mode = mode
if mode == 'ragged':
assert not use_lengths
def forward(self, x, state=None, lengths=None, l_output=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.l_output is None:
if l_output is not None:
assert isinstance(l_output, int) # Override by pass in
else:
# Grab entire output
l_output = x.size(-2)
squeeze = False
else:
l_output = self.l_output
squeeze = self.squeeze
if self.mode == "last":
restrict = lambda x: x[..., -l_output:, :]
elif self.mode == "first":
restrict = lambda x: x[..., :l_output, :]
elif self.mode == "pool":
restrict = lambda x: (
torch.cumsum(x, dim=-2)
/ torch.arange(
1, 1 + x.size(-2), device=x.device, dtype=x.dtype
).unsqueeze(-1)
)[..., -l_output:, :]
def restrict(x):
L = x.size(-2)
s = x.sum(dim=-2, keepdim=True)
if l_output > 1:
c = torch.cumsum(x[..., -(l_output - 1) :, :].flip(-2), dim=-2)
c = F.pad(c, (0, 0, 1, 0))
s = s - c # (B, l_output, D)
s = s.flip(-2)
denom = torch.arange(
L - l_output + 1, L + 1, dtype=x.dtype, device=x.device
)
s = s / denom
return s
elif self.mode == "sum":
restrict = lambda x: torch.cumsum(x, dim=-2)[..., -l_output:, :]
# TODO use same restrict function as pool case
elif self.mode == 'ragged':
assert lengths is not None, "lengths must be provided for ragged mode"
# remove any additional padding (beyond max length of any sequence in the batch)
restrict = lambda x: x[..., : max(lengths), :]
else:
raise NotImplementedError(
"Mode must be ['last' | 'first' | 'pool' | 'sum']"
)
# Restrict to actual length of sequence
if self.use_lengths:
assert lengths is not None
x = torch.stack(
[
restrict(out[..., :length, :])
for out, length in zip(torch.unbind(x, dim=0), lengths)
],
dim=0,
)
else:
x = restrict(x)
if squeeze:
assert x.size(-2) == 1
x = x.squeeze(-2)
x = self.output_transform(x)
return x
def step(self, x, state=None):
# Ignore all length logic
return self.output_transform(x)
class TokenDecoder(Decoder):
"""Decoder for token level classification"""
def __init__(
self, d_model, d_output=3
):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
x = self.output_transform(x)
return x
class NDDecoder(Decoder):
"""Decoder for single target (e.g. classification or regression)"""
def __init__(
self, d_model, d_output=None, mode="pool"
):
super().__init__()
assert mode in ["pool", "full"]
self.output_transform = nn.Identity() if d_output is None else nn.Linear(d_model, d_output)
self.mode = mode
def forward(self, x, state=None):
"""
x: (n_batch, l_seq, d_model)
Returns: (n_batch, l_output, d_output)
"""
if self.mode == 'pool':
x = reduce(x, 'b ... h -> b h', 'mean')
x = self.output_transform(x)
return x
class StateDecoder(Decoder):
"""Use the output state to decode (useful for stateful models such as RNNs or perhaps Transformer-XL if it gets implemented"""
def __init__(self, d_model, state_to_tensor, d_output):
super().__init__()
self.output_transform = nn.Linear(d_model, d_output)
self.state_transform = state_to_tensor
def forward(self, x, state=None):
return self.output_transform(self.state_transform(state))
class RetrievalHead(nn.Module):
def __init__(self, d_input, d_model, n_classes, nli=True, activation="relu"):
super().__init__()
self.nli = nli
if activation == "relu":
activation_fn = nn.ReLU()
elif activation == "gelu":
activation_fn = nn.GELU()
else:
raise NotImplementedError
if (
self.nli
): # Architecture from https://github.com/mlpen/Nystromformer/blob/6539b895fa5f798ea0509d19f336d4be787b5708/reorganized_code/LRA/model_wrapper.py#L74
self.classifier = nn.Sequential(
nn.Linear(4 * d_input, d_model),
activation_fn,
nn.Linear(d_model, n_classes),
)
else: # Head from https://github.com/google-research/long-range-arena/blob/ad0ff01a5b3492ade621553a1caae383b347e0c1/lra_benchmarks/models/layers/common_layers.py#L232
self.classifier = nn.Sequential(
nn.Linear(2 * d_input, d_model),
activation_fn,
nn.Linear(d_model, d_model // 2),
activation_fn,
nn.Linear(d_model // 2, n_classes),
)
def forward(self, x):
"""
x: (2*batch, dim)
"""
outs = rearrange(x, "(z b) d -> z b d", z=2)
outs0, outs1 = outs[0], outs[1] # (n_batch, d_input)
if self.nli:
features = torch.cat(
[outs0, outs1, outs0 - outs1, outs0 * outs1], dim=-1
) # (batch, dim)
else:
features = torch.cat([outs0, outs1], dim=-1) # (batch, dim)
logits = self.classifier(features)
return logits
class RetrievalDecoder(Decoder):
"""Combines the standard FeatureDecoder to extract a feature before passing through the RetrievalHead"""
def __init__(
self,
d_input,
n_classes,
d_model=None,
nli=True,
activation="relu",
*args,
**kwargs
):
super().__init__()
if d_model is None:
d_model = d_input
self.feature = SequenceDecoder(
d_input, d_output=None, l_output=0, *args, **kwargs
)
self.retrieval = RetrievalHead(
d_input, d_model, n_classes, nli=nli, activation=activation
)
def forward(self, x, state=None, **kwargs):
x = self.feature(x, state=state, **kwargs)
x = self.retrieval(x)
return x
class PackedDecoder(Decoder):
def forward(self, x, state=None):
x, _ = nn.utils.rnn.pad_packed_sequence(x, batch_first=True)
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Decoder,
"id": nn.Identity,
"linear": nn.Linear,
"sequence": SequenceDecoder,
"nd": NDDecoder,
"retrieval": RetrievalDecoder,
"state": StateDecoder,
"pack": PackedDecoder,
"token": TokenDecoder,
}
model_attrs = {
"linear": ["d_output"],
"sequence": ["d_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_state", "state_to_tensor"],
"forecast": ["d_output"],
"token": ["d_output"],
}
dataset_attrs = {
"linear": ["d_output"],
"sequence": ["d_output", "l_output"],
"nd": ["d_output"],
"retrieval": ["d_output"],
"state": ["d_output"],
"forecast": ["d_output", "l_output"],
"token": ["d_output"],
}
def _instantiate(decoder, model=None, dataset=None):
"""Instantiate a single decoder"""
if decoder is None:
return None
if isinstance(decoder, str):
name = decoder
else:
name = decoder["_name_"]
# Extract arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate decoder
obj = utils.instantiate(registry, decoder, *model_args, *dataset_args)
return obj
def instantiate(decoder, model=None, dataset=None):
"""Instantiate a full decoder config, e.g. handle list of configs
Note that arguments are added in reverse order compared to encoder (model first, then dataset)
"""
decoder = utils.to_list(decoder)
return U.PassthroughSequential(
*[_instantiate(d, model=model, dataset=dataset) for d in decoder]
)
|
hyena-dna-main
|
src/tasks/decoders.py
|
import datetime
import math
from typing import ForwardRef
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange, repeat
import src.models.nn.utils as U
import src.utils as utils
import src.utils.config
from src.models.sequence.block import SequenceResidualBlock
from src.models.nn.components import Normalization
class Encoder(nn.Module):
"""Encoder abstraction
Accepts a tensor and optional kwargs. Outside of the main tensor, all other arguments should be kwargs.
Returns a tensor and optional kwargs.
Encoders are combined via U.PassthroughSequential which passes these kwargs through in a pipeline. The resulting kwargs are accumulated and passed into the model backbone.
"""
def forward(self, x, **kwargs):
"""
x: input tensor
*args: additional info from the dataset (e.g. sequence lengths)
Returns:
y: output tensor
*args: other arguments to pass into the model backbone
"""
return x, {}
class PositionalIDEncoder(Encoder):
def forward(self, x):
position_ids = torch.arange(x.shape[-1], dtype=torch.long, device=x.device)
position_ids = repeat(position_ids, 'l -> b l', b=x.shape[0])
return x, { 'position_ids': position_ids }
# Adapted from https://github.com/pytorch/examples/blob/master/word_language_model/model.py
class PositionalEncoder(Encoder):
r"""Inject some information about the relative or absolute position of the tokens
in the sequence. The positional encodings have the same dimension as
the embeddings, so that the two can be summed. Here, we use sine and cosine
functions of different frequencies.
.. math::
\text{PosEncoder}(pos, 2i) = sin(pos/10000^(2i/d_model))
\text{PosEncoder}(pos, 2i+1) = cos(pos/10000^(2i/d_model))
\text{where pos is the word position and i is the embed idx)
Args:
d_model: the embed dim (required).
dropout: the dropout value (default=0.1).
max_len: the max. length of the incoming sequence (default=5000).
Examples:
>>> pos_encoder = PositionalEncoder(d_model)
"""
def __init__(self, d_model, dropout=0.1, max_len=16384, pe_init=None):
super().__init__()
self.dropout = nn.Dropout(p=dropout)
if pe_init is not None:
self.pe = nn.Parameter(torch.empty(max_len, 1, d_model))
nn.init.normal_(self.pe, 0, pe_init)
# self.pe = pe.unsqueeze(1)
else:
pe = torch.zeros(max_len, d_model)
position = torch.arange(0.0, max_len).unsqueeze(1)
div_term = torch.exp(
-math.log(10000.0) * torch.arange(0.0, d_model, 2.0) / d_model
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
self.register_buffer("pe", pe)
self.attn_mask = None
def forward(self, x):
r"""Inputs of forward function
Args:
x: the sequence fed to the positional encoder model (required).
lens: actual lengths of sequences
Shape:
x: [l_sequence, n_batch, d_model]
Returns: [l_sequence, n_batch, d_model]
attn_mask: [l_sequence, l_sequence]
padding_mask:
"""
x = x + self.pe[: x.size(-2)]
return self.dropout(x)
class ClassEmbedding(Encoder):
# Should also be able to define this by subclassing Embedding
def __init__(self, n_classes, d_model):
super().__init__()
self.embedding = nn.Embedding(n_classes, d_model)
def forward(self, x, y):
x = x + self.embedding(y).unsqueeze(-2) # (B, L, D)
return x
class Conv1DEncoder(Encoder):
def __init__(self, d_input, d_model, kernel_size=25, stride=1, padding='same'):
super().__init__()
self.conv = nn.Conv1d(
in_channels=d_input,
out_channels=d_model,
kernel_size=kernel_size,
stride=stride,
padding=padding,
)
def forward(self, x):
# BLD -> BLD
x = self.conv(x.transpose(1, 2)).transpose(1, 2)
return x
class LayerEncoder(Encoder):
"""Use an arbitrary SequenceModule layer"""
def __init__(self, d_model, prenorm=False, norm='layer', layer=None):
super().__init__()
# Simple stack of blocks
layer["transposed"] = False
self.layer = SequenceResidualBlock(
d_input=d_model,
prenorm=prenorm,
layer=layer,
residual='R',
norm=norm,
pool=None,
)
def forward(self, x):
x, _ = self.layer(x) # Discard state
return x
class TimestampEmbeddingEncoder(Encoder):
"""
General time encoder for Pandas Timestamp objects (encoded as torch tensors).
See MonashDataset for an example of how to return time features as 'z's.
"""
cardinalities = {
'day': (1, 31),
'hour': (0, 23),
'minute': (0, 59),
'second': (0, 59),
'month': (1, 12),
'year': (1950, 2010), # (1800, 3000) used to be (1970, datetime.datetime.now().year + 1) but was not enough for all datasets in monash
'dayofweek': (0, 6),
'dayofyear': (1, 366),
'quarter': (1, 4),
'week': (1, 53),
'is_month_start': (0, 1),
'is_month_end': (0, 1),
'is_quarter_start': (0, 1),
'is_quarter_end': (0, 1),
'is_year_start': (0, 1),
'is_year_end': (0, 1),
'is_leap_year': (0, 1),
}
def __init__(self, d_model, table=False, features=None):
super().__init__()
self.table = table
self.ranges = {k: max_val - min_val + 2 for k, (min_val, max_val) in self.cardinalities.items()} # padding for null included
if features is None:
pass
else:
self.cardinalities = {k: v for k, v in self.cardinalities.items() if k in features}
if table:
self.embedding = nn.ModuleDict({
attr: nn.Embedding(maxval - minval + 2, d_model, padding_idx=0)
for attr, (minval, maxval) in self.cardinalities.items()
})
else:
self.embedding = nn.ModuleDict({
attr: nn.Linear(1, d_model)
for attr in self.cardinalities
})
def forward(self, x, timestamps=None):
for attr in timestamps:
mask = timestamps[attr] == -1
timestamps[attr] = timestamps[attr] - self.cardinalities[attr][0]
timestamps[attr][mask] = 0
if self.table:
x = x + self.embedding[attr](timestamps[attr].to(torch.long))
else:
x = x + self.embedding[attr]((2 * timestamps[attr] / self.ranges[attr] - 1).unsqueeze(-1))
#x = x + self.embedding(timestamps[attr].to(torch.float)).unsqueeze(1)
return x
class TimeEncoder(Encoder):
def __init__(self, n_tokens_time, d_model, timeenc=0):
super().__init__()
self.timeenc = timeenc
if self.timeenc == 0:
self.encoders = nn.ModuleList(
[nn.Embedding(v, d_model) for v in n_tokens_time]
)
else:
self.encoders = nn.Linear(len(n_tokens_time), d_model)
self.mask_embed = nn.Embedding(2, d_model)
def forward(self, x, mark=None, mask=None):
assert mark is not None and mask is not None, "Extra arguments should be returned by collate function"
if self.timeenc == 0:
assert mark.size(-1) == len(self.encoders)
embeddings = [
embed(z) for embed, z in zip(self.encoders, torch.unbind(mark, dim=-1))
]
time_encode = torch.sum(torch.stack(embeddings), dim=0)
else:
time_encode = self.encoders(mark)
mask_encode = self.mask_embed(mask.squeeze(-1))
return x + time_encode + mask_encode # (B, L, d_model)
class PackedEncoder(Encoder):
def forward(self, x, len_batch=None):
assert len_batch is not None
x = nn.utils.rnn.pack_padded_sequence(
x, len_batch.cpu(), enforce_sorted=False, batch_first=True,
)
return x
class OneHotEncoder(Encoder):
def __init__(self, n_tokens, d_model):
super().__init__()
assert n_tokens <= d_model
self.d_model = d_model
def forward(self, x):
return F.one_hot(x.squeeze(-1), self.d_model).float()
class Conv2DPatchEncoder(Encoder):
"""
For encoding images into a sequence of patches.
"""
def __init__(self, d_input, d_model, filter_sizes, flat=False):
"""
d_input: dim of encoder input (data dimension)
d_model: dim of encoder output (model dimension)
filter_sizes: tuple with fh, fw
flat: if image is flattened from dataloader (like in cifar),
then we need to reshape back to 2D before conv
"""
fh, fw = filter_sizes
self.flat = flat
super().__init__()
assert len(filter_sizes) == 2
self.encoder = nn.Conv2d(d_input, d_model, kernel_size=(fh, fw), stride=(fh, fw))
def forward(self, x):
"""
x shape expected = [b, h, w, c]
returns tuple with x, with new shape = [b, seq_len, c_out]
"""
x = rearrange(x, 'b h w c -> b c h w')
x = self.encoder(x)
x = rearrange(x, 'b c h w -> b (h w) c')
return x
# For every type of encoder/decoder, specify:
# - constructor class
# - list of attributes to grab from dataset
# - list of attributes to grab from model
registry = {
"stop": Encoder,
"id": nn.Identity,
"embedding": nn.Embedding,
"linear": nn.Linear,
"position": PositionalEncoder,
"position_id": PositionalIDEncoder,
"class": ClassEmbedding,
"pack": PackedEncoder,
"time": TimeEncoder,
"onehot": OneHotEncoder,
"conv1d": Conv1DEncoder,
"patch2d": Conv2DPatchEncoder,
"timestamp_embedding": TimestampEmbeddingEncoder,
"layer": LayerEncoder,
}
dataset_attrs = {
"embedding": ["n_tokens"],
"linear": ["d_input"], # TODO make this d_data?
"class": ["n_classes"],
"time": ["n_tokens_time"],
"onehot": ["n_tokens"],
"conv1d": ["d_input"],
"patch2d": ["d_input"],
}
model_attrs = {
"embedding": ["d_model"],
"linear": ["d_model"],
"position": ["d_model"],
"class": ["d_model"],
"time": ["d_model"],
"onehot": ["d_model"],
"conv1d": ["d_model"],
"patch2d": ["d_model"],
"timestamp_embedding": ["d_model"],
"layer": ["d_model"],
}
def _instantiate(encoder, dataset=None, model=None):
"""Instantiate a single encoder"""
if encoder is None:
return None
if isinstance(encoder, str):
name = encoder
else:
name = encoder["_name_"]
# Extract dataset/model arguments from attribute names
dataset_args = utils.config.extract_attrs_from_obj(
dataset, *dataset_attrs.get(name, [])
)
model_args = utils.config.extract_attrs_from_obj(model, *model_attrs.get(name, []))
# Instantiate encoder
obj = utils.instantiate(registry, encoder, *dataset_args, *model_args)
return obj
def instantiate(encoder, dataset=None, model=None):
encoder = utils.to_list(encoder)
return U.PassthroughSequential(
*[_instantiate(e, dataset=dataset, model=model) for e in encoder]
)
|
hyena-dna-main
|
src/tasks/encoders.py
|
from typing import Any
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
class ParamsLog(pl.Callback):
""" Log the number of parameters of the model """
def __init__(
self,
total: bool = True,
trainable: bool = True,
fixed: bool = True,
):
super().__init__()
self._log_stats = AttributeDict(
{
'total_params_log': total,
'trainable_params_log': trainable,
'non_trainable_params_log': fixed,
}
)
@rank_zero_only
def on_fit_start(self, trainer: pl.Trainer, pl_module: pl.LightningModule) -> None:
logs = {}
if self._log_stats.total_params_log:
logs["params/total"] = sum(p.numel() for p in pl_module.parameters())
if self._log_stats.trainable_params_log:
logs["params/trainable"] = sum(p.numel() for p in pl_module.parameters()
if p.requires_grad)
if self._log_stats.non_trainable_params_log:
logs["params/fixed"] = sum(p.numel() for p in pl_module.parameters()
if not p.requires_grad)
if trainer.logger:
trainer.logger.log_hyperparams(logs)
|
hyena-dna-main
|
src/callbacks/params.py
|
import torch
from pytorch_lightning import Callback, Trainer, LightningModule
import logging
log = logging.getLogger(__name__) # We want a logger for each process, not just the rank 0
def l2_promote():
import ctypes
_libcudart = ctypes.CDLL('libcudart.so')
# Set device limit on the current device
# cudaLimitMaxL2FetchGranularity = 0x05
pValue = ctypes.cast((ctypes.c_int*1)(), ctypes.POINTER(ctypes.c_int))
_libcudart.cudaDeviceSetLimit(ctypes.c_int(0x05), ctypes.c_int(128))
_libcudart.cudaDeviceGetLimit(pValue, ctypes.c_int(0x05))
assert pValue.contents.value == 128
def set_affinity(trainer):
try:
from src.utils.gpu_affinity import set_affinity
nproc_per_node = torch.cuda.device_count()
affinity = set_affinity(trainer.local_rank, nproc_per_node, 'socket_unique_continuous')
log.info(f'{trainer.local_rank}: thread affinity: {affinity}')
# TD [2022-05-07] Somehow calling this causes GPU 0 to allocate extra ~800MB of memory per
# number of GPUs (e.g., 6.4GB of extra memory in a 8-GPU setup). H/t Dan.
# l2_promote()
except:
pass
class GpuAffinity(Callback):
"""Set GPU affinity and increase the L2 fetch granularity.
Adapted from https://github.com/NVIDIA/DeepLearningExamples/tree/master/PyTorch/LanguageModeling/Transformer-XL
"""
def setup(self, trainer: Trainer, pl_module: LightningModule, stage=None) -> None:
set_affinity(trainer)
|
hyena-dna-main
|
src/callbacks/gpu_affinity.py
|
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/wandb_callbacks.py
import glob
import os
from typing import List
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sn
import torch
import wandb
from pytorch_lightning import Callback, Trainer
from pytorch_lightning.loggers import LoggerCollection, WandbLogger
from pytorch_lightning.utilities import rank_zero_only
from sklearn import metrics
from sklearn.metrics import f1_score, precision_score, recall_score
def get_wandb_logger(trainer: Trainer) -> WandbLogger:
"""Safely get Weights&Biases logger from Trainer."""
if isinstance(trainer.logger, WandbLogger):
return trainer.logger
if isinstance(trainer.logger, LoggerCollection):
for logger in trainer.logger:
if isinstance(logger, WandbLogger):
return logger
raise Exception(
"You are using wandb related callback, but WandbLogger was not found for some reason..."
)
class WatchModel(Callback):
"""Make wandb watch model at the beginning of the run."""
def __init__(self, log: str = "gradients", log_freq: int = 100):
self.log = log
self.log_freq = log_freq
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
logger.watch(model=trainer.model, log=self.log, log_freq=self.log_freq)
class UploadCodeAsArtifact(Callback):
"""Upload all *.py files to wandb as an artifact, at the beginning of the run."""
def __init__(self, code_dir: str):
self.code_dir = code_dir
@rank_zero_only
def on_train_start(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
code = wandb.Artifact("project-source", type="code")
for path in glob.glob(os.path.join(self.code_dir, "**/*.py"), recursive=True):
code.add_file(path)
experiment.log_artifact(code)
class UploadCheckpointsAsArtifact(Callback):
"""Upload checkpoints to wandb as an artifact, at the end of run."""
def __init__(self, ckpt_dir: str = "checkpoints/", upload_best_only: bool = False):
self.ckpt_dir = ckpt_dir
self.upload_best_only = upload_best_only
@rank_zero_only
def on_train_end(self, trainer, pl_module):
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
ckpts = wandb.Artifact("experiment-ckpts", type="checkpoints")
if self.upload_best_only:
ckpts.add_file(trainer.checkpoint_callback.best_model_path)
else:
for path in glob.glob(os.path.join(self.ckpt_dir, "**/*.ckpt"), recursive=True):
ckpts.add_file(path)
experiment.log_artifact(ckpts)
class LogConfusionMatrix(Callback):
"""Generate confusion matrix every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module) -> None:
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate confusion matrix."""
if self.ready:
logger = get_wandb_logger(trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
confusion_matrix = metrics.confusion_matrix(y_true=targets, y_pred=preds)
# set figure size
plt.figure(figsize=(14, 8))
# set labels size
sn.set(font_scale=1.4)
# set font size
sn.heatmap(confusion_matrix, annot=True, annot_kws={"size": 8}, fmt="g")
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"confusion_matrix/{experiment.name}": wandb.Image(plt)}, commit=False)
# according to wandb docs this should also work but it crashes
# experiment.log(f{"confusion_matrix/{experiment.name}": plt})
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogF1PrecRecHeatmap(Callback):
"""Generate f1, precision, recall heatmap every epoch and send it to wandb.
Expects validation step to return predictions and targets.
"""
def __init__(self, class_names: List[str] = None):
self.preds = []
self.targets = []
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_batch_end(
self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx
):
"""Gather data from single batch."""
if self.ready:
self.preds.append(outputs["preds"])
self.targets.append(outputs["targets"])
def on_validation_epoch_end(self, trainer, pl_module):
"""Generate f1, precision and recall heatmap."""
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
preds = torch.cat(self.preds).cpu().numpy()
targets = torch.cat(self.targets).cpu().numpy()
f1 = f1_score(preds, targets, average=None)
r = recall_score(preds, targets, average=None)
p = precision_score(preds, targets, average=None)
data = [f1, p, r]
# set figure size
plt.figure(figsize=(14, 3))
# set labels size
sn.set(font_scale=1.2)
# set font size
sn.heatmap(
data,
annot=True,
annot_kws={"size": 10},
fmt=".3f",
yticklabels=["F1", "Precision", "Recall"],
)
# names should be uniqe or else charts from different experiments in wandb will overlap
experiment.log({f"f1_p_r_heatmap/{experiment.name}": wandb.Image(plt)}, commit=False)
# reset plot
plt.clf()
self.preds.clear()
self.targets.clear()
class LogImagePredictions(Callback):
"""Logs a validation batch and their predictions to wandb.
Example adapted from:
https://wandb.ai/wandb/wandb-lightning/reports/Image-Classification-using-PyTorch-Lightning--VmlldzoyODk1NzY
"""
def __init__(self, num_samples: int = 8):
super().__init__()
self.num_samples = num_samples
self.ready = True
def on_sanity_check_start(self, trainer, pl_module):
self.ready = False
def on_sanity_check_end(self, trainer, pl_module):
"""Start executing this callback only after all validation sanity checks end."""
self.ready = True
def on_validation_epoch_end(self, trainer, pl_module):
if self.ready:
logger = get_wandb_logger(trainer=trainer)
experiment = logger.experiment
# get a validation batch from the validation dat loader
val_samples = next(iter(trainer.datamodule.val_dataloader()))
val_imgs, val_labels = val_samples
# run the batch through the network
val_imgs = val_imgs.to(device=pl_module.device)
logits = pl_module(val_imgs)
preds = torch.argmax(logits, axis=-1)
# log the images as wandb Image
experiment.log(
{
f"Images/{experiment.name}": [
wandb.Image(x, caption=f"Pred:{pred}, Label:{y}")
for x, pred, y in zip(
val_imgs[: self.num_samples],
preds[: self.num_samples],
val_labels[: self.num_samples],
)
]
}
)
class LogDT(Callback):
""" Log the dt values (from NeurIPS 2021 LSSL submission) """
def on_train_epoch_end(self, trainer, pl_module):
log_dict = {}
for name, m in pl_module.model.named_modules():
if pl_module.hparams.train.get('log_dt', False) \
and hasattr(m, "log_dt"):
log_dict[f"{name}.log_dt"] = (
m.log_dt.detach().cpu().numpy().flatten()
)
log_dict[f"{name}.log_dt.image"] = wandb.Image(
m.log_dt.detach().cpu().numpy().flatten().reshape(1, -1)
)
log_dict[f"{name}.log_dt"] = wandb.Table(
dataframe=pd.DataFrame(
{"log_dt": m.log_dt.detach().cpu().numpy().flatten()}
)
)
if torch.distributed.is_initialized() and torch.distributed.get_rank() == 0:
if trainer.logger is not None:
trainer.logger.experiment.log(log_dict)
|
hyena-dna-main
|
src/callbacks/wandb.py
|
### https://github.com/HazyResearch/transformers/blob/master/src/callbacks/speed_monitor.py
# Adapted from https://pytorch-lightning.readthedocs.io/en/latest/_modules/pytorch_lightning/callbacks/gpu_stats_monitor.html#GPUStatsMonitor
# We only need the speed monitoring, not the GPU monitoring
import time
from typing import Any
from pytorch_lightning import Callback, Trainer, LightningModule
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from pytorch_lightning.utilities.types import STEP_OUTPUT
class Timer(Callback):
"""Monitor the speed of each step and each epoch.
"""
def __init__(
self,
step: bool = True,
inter_step: bool = True,
epoch: bool = True,
val: bool = True,
):
super().__init__()
self._log_stats = AttributeDict( {
'step_time': step,
'inter_step_time': inter_step,
'epoch_time': epoch,
'val_time': val,
})
def on_train_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_epoch_time = None
def on_train_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_step_time = None
self._snap_inter_step_time = None
self._snap_epoch_time = time.time()
def on_train_batch_start(
self,
trainer: Trainer,
pl_module: LightningModule,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.step_time:
self._snap_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.inter_step_time and self._snap_inter_step_time:
# First log at beginning of second step
logs["timer/inter_step"] = (time.time() - self._snap_inter_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_batch_end(
self,
trainer: Trainer,
pl_module: LightningModule,
outputs: STEP_OUTPUT,
batch: Any,
batch_idx: int,
) -> None:
if self._log_stats.inter_step_time:
self._snap_inter_step_time = time.time()
if not self._should_log(trainer):
return
logs = {}
if self._log_stats.step_time and self._snap_step_time:
logs["timer/step"] = (time.time() - self._snap_step_time) # * 1000
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
@rank_zero_only
def on_train_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.epoch_time and self._snap_epoch_time:
logs["timer/epoch"] = time.time() - self._snap_epoch_time
if trainer.logger: trainer.logger.log_metrics(logs, step=trainer.global_step)
def on_validation_epoch_start(self, trainer: Trainer, pl_module: LightningModule) -> None:
self._snap_val_time = time.time()
@rank_zero_only
def on_validation_epoch_end(self, trainer: Trainer, pl_module: LightningModule,) -> None:
logs = {}
if self._log_stats.val_time and self._snap_val_time:
logs["timer/validation"] = time.time() - self._snap_val_time
if trainer.logger: trainer.logger.log_metrics(logs) # , step=trainer.global_step)
@staticmethod
def _should_log(trainer) -> bool:
return (trainer.global_step + 1) % trainer.log_every_n_steps == 0 or trainer.should_stop
|
hyena-dna-main
|
src/callbacks/timer.py
|
r"""
Sequence Length Warmup by Reloading
====================
Change sequence lengths according to a stage schedule. The stage parameters sets the sequence length
and batch size.
TODO (not yet supported):
If batch size is not provided for that stage, calculate the batch size based on the
sequence length reshaping into the batch size.
"""
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class SeqlenWarmupReload(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'seq_len': 512, 'epochs': 50},
{'seq_len': 256, 'epochs': 30},
{'seq_len': 128, 'epochs': 20},
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'seq_len', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: seq_len and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Seq Len Warmup: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model) # we don't need to update the model, yet
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
# set new seq len and reset the dataloader
# max_length should be set in the config of the dataloader
seq_len = self.stage_params[self._current_stage]['seq_len']
model.hparams.loader.max_length = seq_len
# we need to resize the batch size too
batch_size = self.stage_params[self._current_stage].get('batch_size', None)
# need to change the dataset params, and the set the phase, which reinits the dataset
model.dataset.max_length = seq_len # progressively update the seq len
# model.dataset.max_length_val = seq_len # we update the val len to be same as train
# model.dataset.max_length_test = seq_len # we don't change the test set, always the longest
model.dataset.batch_size = batch_size # need to adjust the batch size
# model.dataset.batch_size_eval = batch_size * 2 #
# model.dataset.dataset_train.max_length = seq_len
model.dataset.init_datasets() # reinit the datasets with new batch size and seq len
trainer.reset_train_dataloader(model) # tells PTL to use the new dataloaders/datasets
trainer.reset_val_dataloader(model)
print('\tAt epoch {}, changed Seq Len to {}, and batch size to {}'.format(trainer.current_epoch, seq_len, batch_size))
# def _update_model(self, trainer, model):
# if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
# return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
# for module in model.modules():
# if hasattr(module, 'bandlimit'):
# module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
# print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Seq Len Warmup: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
# self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
|
hyena-dna-main
|
src/callbacks/seqlen_warmup_reload.py
|
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_only
from pytorch_lightning.utilities.parsing import AttributeDict
from omegaconf import OmegaConf
class TrackNorms(pl.Callback):
# TODO do callbacks happen before or after the method in the main LightningModule?
# @rank_zero_only # needed?
def on_after_training_step(self, batch, batch_idx, trainer: pl.Trainer, pl_module: pl.LightningModule):
# Log extra metrics
metrics = {}
if hasattr(pl_module, "_grad_norms"):
metrics.update(pl_module._grad_norms)
self.log_dict(
metrics,
on_step=True,
on_epoch=False,
prog_bar=False,
add_dataloader_idx=False,
sync_dist=True,
)
def on_after_backward(self, trainer: pl.Trainer, pl_module: pl.LightningModule):
# example to inspect gradient information in tensorboard
if OmegaConf.select(trainer.hparams, 'trainer.track_grad_norms'): # TODO dot notation should work with omegaconf?
norms = {}
for name, p in pl_module.named_parameters():
if p.grad is None:
continue
# param_norm = float(p.grad.data.norm(norm_type))
param_norm = torch.mean(p.grad.data ** 2)
norms[f"grad_norm.{name}"] = param_norm
pl_module._grad_norms = norms
|
hyena-dna-main
|
src/callbacks/norms.py
|
import numpy as np
from pytorch_lightning.callbacks import Callback
import src.utils as utils
from src.utils import registry
class ProgressiveResizing(Callback):
def __init__(self, stage_params: list):
"""
stage_params is a list of dicts
e.g. stage_params = [
{'resolution': 4, 'epochs': 50}, # 32 x 32
{'resolution': 2, 'epochs': 30}, # 64 x 64
{'resolution': 1, 'epochs': 20}, # 128 x 128
]
"""
super().__init__()
assert len(stage_params) > 0, 'No stages specified'
assert all([{'resolution', 'epochs'} <= set(stage.keys()) for stage in stage_params]), \
'stage_params must contain keys: resolution and epochs'
self.stage_params = stage_params
self.stage_epochs_cume = np.cumsum([stage['epochs'] for stage in stage_params])
self._current_stage = 0
def _verify_stages(self, trainer, model):
# Double-check that stage parameters are correct, otherwise we'll fail in the middle of training
for stage in self.stage_params:
if hasattr(stage, 'scheduler'):
# Verify that we can actually create the scheduler when we need to update it in each stage
scheduler = utils.instantiate(registry.scheduler, {**model.hparams.scheduler, **stage['scheduler']}, trainer.optimizers[0])
del scheduler
def on_train_start(self, trainer, model) -> None:
# Verify all the stage parameters are correct
self._verify_stages(trainer, model)
print(f"Training starts at {trainer.current_epoch}")
if trainer.current_epoch == 0:
# Update the model to the first stage
self._update_to_current_stage(trainer, model)
else:
# Preemption or resumption of progressive resizing
# Update the stage to the current one
self._current_stage = int(np.searchsorted(self.stage_epochs_cume - 1, trainer.current_epoch))
self._starting_stage = np.any(trainer.current_epoch == self.stage_epochs_cume)
print("Progressive Resizing: Restarting at Stage {}".format(self._current_stage))
if self._starting_stage:
self._update_lr_scheduler(trainer, model)
# Set the dataloader and model
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
return super().on_train_start(trainer, model)
def _update_lr_scheduler(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'scheduler'):
# No scheduler specified, so don't update the current scheduler
return
assert len(trainer.lr_schedulers) == 1
# Reinitialize the scheduler
# We don't need to carry over information from the last scheduler e.g. the last_epoch property,
# because that will mess with the new scheduler when we step it
hparams = {**model.hparams.scheduler, **self.stage_params[self._current_stage]['scheduler']}
# Note that passing in the optimizer below is okay: the scheduler will be reinitialized and doesn't seem to inherit any current lr info from the optimizer
trainer.lr_schedulers[0]['scheduler'] = utils.instantiate(registry.scheduler, hparams, trainer.optimizers[0])
print("\tChanged scheduler to {}".format(hparams))
def _update_dataloaders(self, trainer, model):
# Set the train resolution and reset the dataloader
model.hparams.loader.train_resolution = self.stage_params[self._current_stage]['resolution']
trainer.reset_train_dataloader(model)
print('\tChanged resolution to {}'.format(self.stage_params[self._current_stage]['resolution']))
def _update_model(self, trainer, model):
if not hasattr(self.stage_params[self._current_stage], 'bandlimit'):
return
# Update the bandlimit value for the model: this is a hack to make sure the model is updated
# Iterate over all the modules
for module in model.modules():
if hasattr(module, 'bandlimit'):
module.bandlimit = self.stage_params[self._current_stage]['bandlimit']
print('\tChanged bandlimit to {}'.format(self.stage_params[self._current_stage]['bandlimit']))
def _update_to_current_stage(self, trainer, model):
print("Progressive Resizing: Moving to Stage {}".format(self._current_stage))
# Update the train dataloader, model and scheduler
self._update_dataloaders(trainer, model)
self._update_model(trainer, model)
self._update_lr_scheduler(trainer, model)
def on_train_epoch_end(self, trainer, model):
"""
Check to see if new stage is reached for the next epoch, and if so, prepare the new stage by
changing the dataloader.
(We do next epoch so that the dataloader is prepared before the next epoch)
"""
next_epoch = trainer.current_epoch + 1
# Check if stage should be increased
if next_epoch >= self.stage_epochs_cume[self._current_stage] and self._current_stage < len(self.stage_params) - 1:
self._current_stage += 1
self._update_to_current_stage(trainer, model)
return super().on_train_epoch_end(trainer, model)
|
hyena-dna-main
|
src/callbacks/progressive_resizing.py
|
"""
ET Dataset from Informer Paper.
Dataset: https://github.com/zhouhaoyi/ETDataset
Dataloader: https://github.com/zhouhaoyi/Informer2020
"""
from typing import List
import os
import numpy as np
import pandas as pd
from pandas.tseries import offsets
from pandas.tseries.frequencies import to_offset
import torch
from torch.utils import data
from torch.utils.data import Dataset, DataLoader
import warnings
warnings.filterwarnings("ignore")
from src.dataloaders.base import SequenceDataset, default_data_path
class TimeFeature:
def __init__(self):
pass
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
pass
def __repr__(self):
return self.__class__.__name__ + "()"
class SecondOfMinute(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.second / 59.0 - 0.5
class MinuteOfHour(TimeFeature):
"""Minute of hour encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.minute / 59.0 - 0.5
class HourOfDay(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.hour / 23.0 - 0.5
class DayOfWeek(TimeFeature):
"""Hour of day encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return index.dayofweek / 6.0 - 0.5
class DayOfMonth(TimeFeature):
"""Day of month encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.day - 1) / 30.0 - 0.5
class DayOfYear(TimeFeature):
"""Day of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.dayofyear - 1) / 365.0 - 0.5
class MonthOfYear(TimeFeature):
"""Month of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.month - 1) / 11.0 - 0.5
class WeekOfYear(TimeFeature):
"""Week of year encoded as value between [-0.5, 0.5]"""
def __call__(self, index: pd.DatetimeIndex) -> np.ndarray:
return (index.isocalendar().week - 1) / 52.0 - 0.5
def time_features_from_frequency_str(freq_str: str) -> List[TimeFeature]:
"""
Returns a list of time features that will be appropriate for the given frequency string.
Parameters
----------
freq_str
Frequency string of the form [multiple][granularity] such as "12H", "5min", "1D" etc.
"""
features_by_offsets = {
offsets.YearEnd: [],
offsets.QuarterEnd: [MonthOfYear],
offsets.MonthEnd: [MonthOfYear],
offsets.Week: [DayOfMonth, WeekOfYear],
offsets.Day: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.BusinessDay: [DayOfWeek, DayOfMonth, DayOfYear],
offsets.Hour: [HourOfDay, DayOfWeek, DayOfMonth, DayOfYear],
offsets.Minute: [
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
offsets.Second: [
SecondOfMinute,
MinuteOfHour,
HourOfDay,
DayOfWeek,
DayOfMonth,
DayOfYear,
],
}
offset = to_offset(freq_str)
for offset_type, feature_classes in features_by_offsets.items():
if isinstance(offset, offset_type):
return [cls() for cls in feature_classes]
supported_freq_msg = f"""
Unsupported frequency {freq_str}
The following frequencies are supported:
Y - yearly
alias: A
M - monthly
W - weekly
D - daily
B - business days
H - hourly
T - minutely
alias: min
S - secondly
"""
raise RuntimeError(supported_freq_msg)
def time_features(dates, timeenc=1, freq="h"):
"""
> `time_features` takes in a `dates` dataframe with a 'dates' column and extracts the date down to `freq` where freq can be any of the following if `timeenc` is 0:
> * m - [month]
> * w - [month]
> * d - [month, day, weekday]
> * b - [month, day, weekday]
> * h - [month, day, weekday, hour]
> * t - [month, day, weekday, hour, *minute]
>
> If `timeenc` is 1, a similar, but different list of `freq` values are supported (all encoded between [-0.5 and 0.5]):
> * Q - [month]
> * M - [month]
> * W - [Day of month, week of year]
> * D - [Day of week, day of month, day of year]
> * B - [Day of week, day of month, day of year]
> * H - [Hour of day, day of week, day of month, day of year]
> * T - [Minute of hour*, hour of day, day of week, day of month, day of year]
> * S - [Second of minute, minute of hour, hour of day, day of week, day of month, day of year]
*minute returns a number from 0-3 corresponding to the 15 minute period it falls into.
"""
if timeenc == 0:
dates["month"] = dates.date.apply(lambda row: row.month, 1)
dates["day"] = dates.date.apply(lambda row: row.day, 1)
dates["weekday"] = dates.date.apply(lambda row: row.weekday(), 1)
dates["hour"] = dates.date.apply(lambda row: row.hour, 1)
dates["minute"] = dates.date.apply(lambda row: row.minute, 1)
dates["minute"] = dates.minute.map(lambda x: x // 15)
freq_map = {
"y": [],
"m": ["month"],
"w": ["month"],
"d": ["month", "day", "weekday"],
"b": ["month", "day", "weekday"],
"h": ["month", "day", "weekday", "hour"],
"t": ["month", "day", "weekday", "hour", "minute"],
}
return dates[freq_map[freq.lower()]].values
if timeenc == 1:
dates = pd.to_datetime(dates.date.values)
return np.vstack(
[feat(dates) for feat in time_features_from_frequency_str(freq)]
).transpose(1, 0)
class StandardScaler:
def __init__(self):
self.mean = 0.0
self.std = 1.0
def fit(self, data):
self.mean = data.mean(0)
self.std = data.std(0)
def transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data - mean) / std
def inverse_transform(self, data):
mean = (
torch.from_numpy(self.mean).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.mean
)
std = (
torch.from_numpy(self.std).type_as(data).to(data.device)
if torch.is_tensor(data)
else self.std
)
return (data * std) + mean
class InformerDataset(Dataset):
def __init__(
self,
root_path,
flag="train",
size=None,
features="S",
data_path="ETTh1.csv",
target="OT",
scale=True,
inverse=False,
timeenc=0,
freq="h",
cols=None,
eval_stamp=False,
eval_mask=False,
):
# size [seq_len, label_len, pred_len]
# info
if size == None:
self.seq_len = 24 * 4 * 4
self.label_len = 24 * 4
self.pred_len = 24 * 4
else:
self.seq_len = size[0]
self.label_len = size[1]
self.pred_len = size[2]
# init
assert flag in ["train", "test", "val"]
type_map = {"train": 0, "val": 1, "test": 2}
self.set_type = type_map[flag]
self.features = features
self.target = target
self.scale = scale
self.inverse = inverse
self.timeenc = timeenc
self.freq = freq
self.cols = cols
self.eval_stamp = eval_stamp
self.eval_mask = eval_mask
self.forecast_horizon = self.pred_len
self.root_path = root_path
self.data_path = data_path
self.__read_data__()
def _borders(self, df_raw):
num_train = int(len(df_raw) * 0.7)
num_test = int(len(df_raw) * 0.2)
num_vali = len(df_raw) - num_train - num_test
border1s = [0, num_train - self.seq_len, len(df_raw) - num_test - self.seq_len]
border2s = [num_train, num_train + num_vali, len(df_raw)]
return border1s, border2s
def _process_columns(self, df_raw):
if self.cols:
cols = self.cols.copy()
cols.remove(self.target)
else:
cols = list(df_raw.columns)
cols.remove(self.target)
cols.remove("date")
return df_raw[["date"] + cols + [self.target]]
def __read_data__(self):
self.scaler = StandardScaler()
df_raw = pd.read_csv(os.path.join(self.root_path, self.data_path))
df_raw = self._process_columns(df_raw)
border1s, border2s = self._borders(df_raw)
border1 = border1s[self.set_type]
border2 = border2s[self.set_type]
if self.features == "M" or self.features == "MS":
cols_data = df_raw.columns[1:]
df_data = df_raw[cols_data]
elif self.features == "S":
df_data = df_raw[[self.target]]
if self.scale:
train_data = df_data[border1s[0] : border2s[0]]
self.scaler.fit(train_data.values)
data = self.scaler.transform(df_data.values)
else:
data = df_data.values
df_stamp = df_raw[["date"]][border1:border2]
df_stamp["date"] = pd.to_datetime(df_stamp.date)
data_stamp = time_features(df_stamp, timeenc=self.timeenc, freq=self.freq)
self.data_x = data[border1:border2]
if self.inverse:
self.data_y = df_data.values[border1:border2]
else:
self.data_y = data[border1:border2]
self.data_stamp = data_stamp
def __getitem__(self, index):
s_begin = index
s_end = s_begin + self.seq_len
r_begin = s_end - self.label_len
r_end = r_begin + self.label_len + self.pred_len
seq_x = self.data_x[s_begin:s_end]
seq_x = np.concatenate(
[seq_x, np.zeros((self.pred_len, self.data_x.shape[-1]))], axis=0
)
if self.inverse:
seq_y = np.concatenate(
[
self.data_x[r_begin : r_begin + self.label_len],
self.data_y[r_begin + self.label_len : r_end],
],
0,
)
raise NotImplementedError
else:
# seq_y = self.data_y[r_begin:r_end] # OLD in Informer codebase
seq_y = self.data_y[s_end:r_end]
# OLD in Informer codebase
# seq_x_mark = self.data_stamp[s_begin:s_end]
# seq_y_mark = self.data_stamp[r_begin:r_end]
if self.eval_stamp:
mark = self.data_stamp[s_begin:r_end]
else:
mark = self.data_stamp[s_begin:s_end]
mark = np.concatenate([mark, np.zeros((self.pred_len, mark.shape[-1]))], axis=0)
if self.eval_mask:
mask = np.concatenate([np.zeros(self.seq_len), np.ones(self.pred_len)], axis=0)
else:
mask = np.concatenate([np.zeros(self.seq_len), np.zeros(self.pred_len)], axis=0)
mask = mask[:, None]
# Add the mask to the timestamps: # 480, 5
# mark = np.concatenate([mark, mask[:, np.newaxis]], axis=1)
seq_x = seq_x.astype(np.float32)
seq_y = seq_y.astype(np.float32)
if self.timeenc == 0:
mark = mark.astype(np.int64)
else:
mark = mark.astype(np.float32)
mask = mask.astype(np.int64)
return torch.tensor(seq_x), torch.tensor(seq_y), torch.tensor(mark), torch.tensor(mask)
def __len__(self):
return len(self.data_x) - self.seq_len - self.pred_len + 1
def inverse_transform(self, data):
return self.scaler.inverse_transform(data)
@property
def d_input(self):
return self.data_x.shape[-1]
@property
def d_output(self):
if self.features in ["M", "S"]:
return self.data_x.shape[-1]
elif self.features == "MS":
return 1
else:
raise NotImplementedError
@property
def n_tokens_time(self):
if self.freq == 'h':
return [13, 32, 7, 24]
elif self.freq == 't':
return [13, 32, 7, 24, 4]
else:
raise NotImplementedError
class _Dataset_ETT_hour(InformerDataset):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 - self.seq_len,
12 * 30 * 24 + 4 * 30 * 24 - self.seq_len,
]
border2s = [
12 * 30 * 24,
12 * 30 * 24 + 4 * 30 * 24,
12 * 30 * 24 + 8 * 30 * 24,
]
return border1s, border2s
def _process_columns(self, df_raw):
return df_raw
@property
def n_tokens_time(self):
assert self.freq == "h"
return [13, 32, 7, 24]
class _Dataset_ETT_minute(_Dataset_ETT_hour):
def __init__(self, data_path="ETTm1.csv", freq="t", **kwargs):
super().__init__(data_path=data_path, freq=freq, **kwargs)
def _borders(self, df_raw):
border1s = [
0,
12 * 30 * 24 * 4 - self.seq_len,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4 - self.seq_len,
]
border2s = [
12 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 4 * 30 * 24 * 4,
12 * 30 * 24 * 4 + 8 * 30 * 24 * 4,
]
return border1s, border2s
@property
def n_tokens_time(self):
assert self.freq == "t"
return [13, 32, 7, 24, 4]
class _Dataset_Weather(InformerDataset):
def __init__(self, data_path="WTH.csv", target="WetBulbCelsius", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class _Dataset_ECL(InformerDataset):
def __init__(self, data_path="ECL.csv", target="MT_320", **kwargs):
super().__init__(data_path=data_path, target=target, **kwargs)
class InformerSequenceDataset(SequenceDataset):
@property
def n_tokens_time(self):
# Shape of the dates: depends on `timeenc` and `freq`
return self.dataset_train.n_tokens_time # data_stamp.shape[-1]
@property
def d_input(self):
return self.dataset_train.d_input
@property
def d_output(self):
return self.dataset_train.d_output
@property
def l_output(self):
return self.dataset_train.pred_len
def _get_data_filename(self, variant):
return self.variants[variant]
_collate_arg_names = ["mark", "mask"] # Names of the two extra tensors that the InformerDataset returns
def setup(self):
self.data_dir = self.data_dir or default_data_path / 'informer' / self._name_
self.dataset_train = self._dataset_cls(
root_path=self.data_dir,
flag="train",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_val = self._dataset_cls(
root_path=self.data_dir,
flag="val",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
self.dataset_test = self._dataset_cls(
root_path=self.data_dir,
flag="test",
size=self.size,
features=self.features,
data_path=self._get_data_filename(self.variant),
target=self.target,
scale=self.scale,
inverse=self.inverse,
timeenc=self.timeenc,
freq=self.freq,
cols=self.cols,
eval_stamp=self.eval_stamp,
eval_mask=self.eval_mask,
)
class ETTHour(InformerSequenceDataset):
_name_ = "etth"
_dataset_cls = _Dataset_ETT_hour
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ETTh1.csv",
1: "ETTh2.csv",
}
class ETTMinute(InformerSequenceDataset):
_name_ = "ettm"
_dataset_cls = _Dataset_ETT_minute
init_defaults = {
"size": None,
"features": "S",
"target": "OT",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "t",
"cols": None,
}
variants = {
0: "ETTm1.csv",
1: "ETTm2.csv",
}
class Weather(InformerSequenceDataset):
_name_ = "weather"
_dataset_cls = _Dataset_Weather
init_defaults = {
"size": None,
"features": "S",
"target": "WetBulbCelsius",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "WTH.csv",
}
class ECL(InformerSequenceDataset):
_name_ = "ecl"
_dataset_cls = _Dataset_ECL
init_defaults = {
"size": None,
"features": "S",
"target": "MT_320",
"variant": 0,
"scale": True,
"inverse": False,
"timeenc": 0,
"freq": "h",
"cols": None,
}
variants = {
0: "ECL.csv",
}
|
hyena-dna-main
|
src/dataloaders/et.py
|
from . import et, genomics
from .base import SequenceDataset
|
hyena-dna-main
|
src/dataloaders/__init__.py
|
# Adapted from https://github.com/huggingface/transformers/blob/master/examples/pytorch/language-modeling/run_clm.py
# Adapted from https://github.com/HazyResearch/flash-attention/blob/main/training/src/datamodules/language_modeling_hf.py
from pathlib import Path
from typing import Any, List, Union
from torch.utils.data.dataloader import DataLoader, Dataset
from transformers import AutoTokenizer
from datasets import Dataset
from src.dataloaders.base import SequenceDataset, default_data_path
from src.dataloaders.fault_tolerant_sampler import RandomFaultTolerantSampler
from src.dataloaders.fault_tolerant_sampler import FaultTolerantDistributedSampler
# genomics datasets
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from src.dataloaders.datasets.hg38_dataset import HG38Dataset
from src.dataloaders.datasets.genomic_bench_dataset import GenomicBenchmarkDataset
from src.dataloaders.datasets.nucleotide_transformer_dataset import NucleotideTransformerDataset
from src.dataloaders.datasets.chromatin_profile_dataset import ChromatinProfileDataset
from src.dataloaders.datasets.species_dataset import SpeciesDataset
from src.dataloaders.datasets.icl_genomics_dataset import ICLGenomicsDataset
from src.dataloaders.datasets.hg38_fixed_dataset import HG38FixedDataset
"""
Dataloaders for genomics datasets, including pretraining and downstream tasks. First works in HyenaDNA project, May 2023.
"""
class HG38(SequenceDataset):
"""
Base class, other dataloaders can inherit from this class.
You must implement the following functions:
- __init__
- setup
You can then use (already have access to) the following functions:
- train_dataloader
- val_dataloader
- test_dataloader
"""
###### very important to set this! ######
_name_ = "hg38" # this name is how the dataset config finds the right dataloader
#########################################
def __init__(self, bed_file, fasta_file, tokenizer_name=None, dataset_config_name=None, max_length=1024, d_output=2, rc_aug=False,
max_length_val=None, max_length_test=None, val_ratio=0.0005, val_split_seed=2357, use_fixed_len_val=False,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, replace_N_token=False, pad_interval=False,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.bed_file = bed_file
self.fasta_file = fasta_file
self.use_fixed_len_val = use_fixed_len_val
self.replace_N_token = replace_N_token
self.pad_interval = pad_interval
# handle if file paths are None (default paths)
if self.bed_file is None:
self.bed_file = default_data_path / self._name_ / 'human-sequences.bed'
if self.fasta_file is None:
self.fasta_file = default_data_path / self._name_ / 'hg38.ml.fa'
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
"""Set up the tokenizer and init the datasets."""
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
self.init_datasets() # creates the datasets. You can also just create this inside the setup() here.
def init_datasets(self):
"""Init the datasets (separate from the tokenizer)"""
# delete old datasets to free memory
if hasattr(self, 'dataset_train'):
self.dataset_train.fasta.seqs.close()
del self.dataset_train.fasta.seqs
# delete old datasets to free memory
if hasattr(self, 'dataset_test'):
self.dataset_test.fasta.seqs.close()
del self.dataset_test.fasta.seqs
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
HG38Dataset(split=split,
bed_file=self.bed_file,
fasta_file=self.fasta_file,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
return_seq_indices=False,
shift_augs=None,
rc_aug=self.rc_aug,
return_augs=False,
replace_N_token=self.replace_N_token,
pad_interval=self.pad_interval)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
if self.use_fixed_len_val:
# we're placing the fixed test set in the val dataloader, for visualization!!!
# that means we should track mode with test loss, not val loss
# new option to use fixed val set
print("Using fixed length val set!")
# start end of chr14 and chrX grabbed from Enformer
chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
self.dataset_val = HG38FixedDataset(
chr_ranges=chr_ranges,
fasta_file=self.fasta_file,
max_length=self.max_length,
pad_max_length=self.max_length,
tokenizer=self.tokenizer,
add_eos=True,
)
return
def train_dataloader(self, *args: Any, **kwargs: Any) -> DataLoader:
""" The train dataloader """
if self.shuffle and self.fault_tolerant:
shuffle = False
# TD [2022-12-26]: We need the distributed_sampler_kwargs in case of model parallel:
# In that case the number of replicas and the data parallel rank are more complicated.
distributed_sampler_kwargs = self.trainer.distributed_sampler_kwargs
sampler = (FaultTolerantDistributedSampler(self.dataset_train,
**self.trainer.distributed_sampler_kwargs)
if self.ddp else RandomFaultTolerantSampler(self.dataset_train))
# TD [2022-08-06]: Only the DDP sampler supports fast-forwarding for now
# We assume that it's being resumed with the same number of GPUs
if self.ddp and self.fast_forward_epochs is not None and self.fast_forward_batches is not None:
sampler.load_state_dict({
'epoch': self.fast_forward_epochs,
'counter': self.fast_forward_batches * self.batch_size
})
else:
shuffle = self.shuffle
sampler = None
return self._data_loader(self.dataset_train, batch_size=self.batch_size,
shuffle=shuffle, sampler=sampler)
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
return self._data_loader(self.dataset_test, batch_size=self.batch_size_eval)
def _data_loader(self, dataset: Dataset, batch_size: int, shuffle: bool = False,
sampler=None) -> DataLoader:
return DataLoader(
dataset,
batch_size=batch_size,
num_workers=1, # Data is already in memory, we don't need many workers
shuffle=shuffle,
sampler=sampler,
drop_last=self.drop_last,
pin_memory=self.pin_memory,
)
def load_state_dict(self, checkpoint):
if self.fault_tolerant:
self.fast_forward_epochs = checkpoint['loops']['fit_loop']['epoch_progress']['current']['completed']
# TD [2022-08-07] ['epoch_loop.batch_progress']['total']['completed'] is 1 iteration
# behind, so we're using the optimizer's progress. This is set correctly in seq.py.
self.fast_forward_batches = checkpoint['loops']['fit_loop']['epoch_loop.batch_progress']['current']['completed']
# At this point the train loader hasn't been constructed yet
class GenomicBenchmark(HG38):
_name_ = "genomic_benchmark"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
GenomicBenchmarkDataset(split=split,
max_length=max_len,
dataset_name=self.dataset_name,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class NucleotideTransformer(HG38):
_name_ = "nucleotide_transformer"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None,
padding_side='left', val_ratio=0.0005, val_split_seed=2357, add_eos=False,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=True, shuffle_eval=None, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.shuffle_eval = shuffle_eval if shuffle_eval is not None else shuffle # default is to use the same as train shuffle arg
self.pin_memory = pin_memory
self.drop_last = drop_last
if self.dest_path is None:
self.dest_path = default_data_path / self._name_
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side=self.padding_side,
)
# Create all splits: torch datasets (only train/test in this benchmark)
self.dataset_train, self.dataset_val = [
NucleotideTransformerDataset(split=split,
max_length=max_len,
tokenizer=self.tokenizer, # pass the tokenize wrapper
dataset_name = self.dataset_name,
tokenizer_name=self.tokenizer_name,
use_padding=self.use_padding,
d_output=self.d_output,
add_eos=self.add_eos,
dest_path=self.dest_path,
rc_aug=self.rc_aug,
return_augs=False)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def val_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The val dataloader """
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, shuffle=self.shuffle_eval)
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader """
# note: we're combining val/test into one
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval, shuffle=self.shuffle_eval)
class ChromatinProfile(HG38):
_name_= 'chromatin_profile'
l_output = 0 # need to set this for decoder to work correctly for seq level
def __init__(self, data_path, ref_genome_path, ref_genome_version=None,
tokenizer_name=None, dataset_config_name=None,
max_length=1000, d_output=2, rc_aug=False, add_eos=True, val_only=False,
batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
*args, **kwargs):
self.data_path = data_path
self.ref_genome_path = ref_genome_path
self.ref_genome_version = ref_genome_version
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug # reverse compliment augmentation
self.max_length = max_length
self.add_eos = add_eos
self.val_only=val_only
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
if self.val_only:
splits=['val']*3
else:
splits=['train','val','test']
self.dataset_train, self.dataset_val, self.dataset_test = [
ChromatinProfileDataset(
max_length=self.max_length,
ref_genome_path = self.ref_genome_path,
ref_genome_version = self.ref_genome_version,
coords_target_path = f'{self.data_path}/{split}_{self.ref_genome_version}_coords_targets.csv',
tokenizer=self.tokenizer,
tokenizer_name=self.tokenizer_name,
use_padding=True,
)
for split in splits
]
class Species(HG38):
_name_ = "species"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, species: list, species_dir: str, tokenizer_name=None, dataset_config_name=None, d_output=None, max_length=1024, rc_aug=False,
max_length_val=None, max_length_test=None, cache_dir=None, val_ratio=0.0005, val_split_seed=2357,
add_eos=True, detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=1,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, chromosome_weights='uniform', species_weights='uniform',
total_size=None, task='species_classification', remove_tail_ends=False, cutoff_train=0.1, cutoff_test=0.2,
*args, **kwargs):
self.dataset_config_name = dataset_config_name
self.tokenizer_name = tokenizer_name
self.rc_aug = rc_aug # reverse compliment augmentation
self.cache_dir = None if cache_dir is None else Path(cache_dir).expanduser()
self.max_length = max_length
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.add_eos = add_eos
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
self.species = species # list of species to load
self.species_dir = species_dir
self.chromosome_weights = chromosome_weights
self.species_weights = species_weights
self.total_size = total_size
self.task = task
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
self.d_output = len(self.species)
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
def setup(self, stage=None):
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
elif self.tokenizer_name == 'bpe':
print("**using pretrained AIRI tokenizer**")
self.tokenizer = AutoTokenizer.from_pretrained('AIRI-Institute/gena-lm-bert-base')
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
self.vocab_size = len(self.tokenizer)
# Create datasets
self.init_datasets()
def init_datasets(self):
# delete old datasets
# NOTE: For some reason only works to close files for train
if hasattr(self, 'dataset_train'):
for spec in list(self.dataset_train.fastas.keys()):
for chromosome in list(self.dataset_train.fastas[spec].keys()):
self.dataset_train.fastas[spec][chromosome].close()
del self.dataset_train.fastas[spec][chromosome]
if hasattr(self, 'dataset_val'):
pass
if hasattr(self, 'dataset_test'):
pass
# Create all splits: torch datasets
self.dataset_train, self.dataset_val, self.dataset_test = [
SpeciesDataset(species=self.species,
species_dir=self.species_dir,
split=split,
max_length=max_len,
total_size=self.total_size * (1 if split == 'test' else (self.max_length_test + 2) // max_len), # See the same # of tokens every epoch across train/val/test
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
add_eos=self.add_eos,
rc_aug=self.rc_aug,
chromosome_weights=self.chromosome_weights,
species_weights=self.species_weights,
task=self.task,
remove_tail_ends=self.remove_tail_ends,
cutoff_train=self.cutoff_train,
cutoff_test=self.cutoff_test,
)
for split, max_len in zip(['train', 'valid', 'test'], [self.max_length, self.max_length_val, self.max_length_test])
]
return
class ICLGenomics(HG38):
_name_ = "icl_genomics"
l_output = 0 # need to set this for decoder to work correctly
def __init__(self, dataset_name, dest_path=None, tokenizer_name='char', d_output=None, rc_aug=False,
max_length=1024, use_padding=True, max_length_val=None, max_length_test=None, shots=1, label_to_token=None,
add_eos=True, characters=None, padding_side='left', val_ratio=0.0005, val_split_seed=2357,
detokenize=False, val_only=False, batch_size=32, batch_size_eval=None, num_workers=0,
shuffle=True, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None,
use_shmem=True, *args, **kwargs):
self.dataset_name = dataset_name
self.dest_path = dest_path
self.tokenizer_name = tokenizer_name
self.d_output = d_output
self.rc_aug = rc_aug
self.max_length = max_length
self.use_padding = use_padding
self.max_length_val = max_length_val if max_length_val is not None else max_length
self.max_length_test = max_length_test if max_length_test is not None else max_length
self.padding_side = padding_side
self.val_ratio = val_ratio
self.val_split_seed = val_split_seed
self.val_only = val_only
self.shots = shots # num shots in ICL sample
self.label_to_token = label_to_token # this maps the label to a token in the vocab already, arbitrary
self.add_eos = add_eos
self.characters = list('ACTGN') if characters is None else characters
self.detokenize = detokenize
self.batch_size = batch_size
self.batch_size_eval = batch_size_eval if batch_size_eval is not None else self.batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
self.use_shmem = use_shmem
# if self.use_shmem:
# assert cache_dir is not None
def setup(self, stage=None):
# TODO instantiate with registry
if self.tokenizer_name == 'char':
print("**Using Char-level tokenizer**")
self.tokenizer = CharacterTokenizer(
characters=self.characters,
model_max_length=self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
self.vocab_size = len(self.tokenizer)
# Create all splits: torch datasets
self.dataset_train, self.dataset_val = [
ICLGenomicsDataset(
dataset_name=self.dataset_name,
split=split,
shots=self.shots,
use_padding=self.use_padding,
d_output=self.d_output,
max_length=max_len,
dest_path=self.dest_path,
tokenizer=self.tokenizer, # pass the tokenize wrapper
tokenizer_name=self.tokenizer_name,
label_to_token=self.label_to_token,
rc_aug=self.rc_aug,
add_eos=self.add_eos,
)
for split, max_len in zip(['train', 'val'], [self.max_length, self.max_length_val])
]
def test_dataloader(self, *args: Any, **kwargs: Any) -> Union[DataLoader, List[DataLoader]]:
""" The test dataloader, it's a dummy loader just to make the trainer happy, we don't use it."""
return self._data_loader(self.dataset_val, batch_size=self.batch_size_eval)
class HG38Fixed(HG38):
_name_ = "hg38_fixed"
"""Just used for testing a fixed length, *non-overlapping* dataset for HG38."""
def __init__(self, fasta_file=None, chr_ranges=None, pad_max_length=None, batch_size=32,
max_length=None, num_workers=1, add_eos=True,
shuffle=False, pin_memory=False, drop_last=False, fault_tolerant=False, ddp=False,
fast_forward_epochs=None, fast_forward_batches=None, *args, **kwargs):
self.fasta_file = fasta_file
self.chr_ranges = chr_ranges
self.max_length = max_length
self.pad_max_length = pad_max_length
self.add_eos = add_eos
self.batch_size = batch_size
self.batch_size_eval = batch_size
self.num_workers = num_workers
self.shuffle = shuffle
self.pin_memory = pin_memory
self.drop_last = drop_last
if fault_tolerant:
assert self.shuffle
self.fault_tolerant = fault_tolerant
if ddp:
assert fault_tolerant
self.ddp = ddp
self.fast_forward_epochs = fast_forward_epochs
self.fast_forward_batches = fast_forward_batches
if self.fast_forward_epochs is not None or self.fast_forward_batches is not None:
assert ddp and fault_tolerant
if self.fasta_file is None:
self.fasta_file = default_data_path / "hg38" / 'hg38.ml.fa'
if self.chr_ranges is None:
# start end of chr14 and chrX grabbed from Enformer
self.chr_ranges = {'chr14': [19726402, 106677047],
'chrX': [2825622, 144342320],
}
def setup(self, stage=None):
# Create tokenizer
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
model_max_length= self.max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
)
# we only need one
self.dataset_train = HG38FixedDataset(
fasta_file=self.fasta_file,
chr_ranges=self.chr_ranges, # a dict of chr: (start, end) to use for test set
max_length=self.max_length,
pad_max_length=self.pad_max_length,
tokenizer=tokenizer,
add_eos=self.add_eos,
)
self.dataset_val = self.dataset_train
self.dataset_test = self.dataset_train
# if __name__ == '__main__':
# """Quick test using dataloader. Can't call from here though."""
# loader = HG38(
# bed_file='/home/exnx/enformer-pytorch/data/basenji/human-sequences.bed',
# fasta_file='/home/exnx/enformer-pytorch/data/basenji/hg38.ml.fa',
# tokenizer_name='char_level', max_length=2000
# )
# breakpoint()
# it = iter(ds)
# elem = next(it)
# print(len(elem))
# breakpoint()
|
hyena-dna-main
|
src/dataloaders/genomics.py
|
# Adapted from https://github.com/Lightning-AI/lightning/blob/2845e7565dbe6b765ae32870e7d2bc456529c30a/tests/tests_pytorch/utilities/test_auto_restart.py#L1397
from typing import Iterator
import math
import torch
from torch.utils.data import RandomSampler, DistributedSampler
class RandomFaultTolerantSampler(RandomSampler):
def __init__(self, *args, generator=None, **kwargs):
# generator = torch.Generator().manual_seed(seed)
# super().__init__(*args, generator=generator, **kwargs)
# TD [2022-07-17]: We don't force the seed to be zero. We generate random seed,
# which should be reproducible if pl.seed_everything was called before hand.
# This means that changing the seed of the experiment will also change the
# sampling order.
if generator is None:
seed = int(torch.empty((), dtype=torch.int64).random_().item())
generator = torch.Generator().manual_seed(seed)
super().__init__(*args, generator=generator, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"random_state": self.state, "counter": self.counter}
def load_state_dict(self, state_dict):
self.generator.set_state(state_dict.get("random_state"))
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self):
# # We need a separate self.start_counter because PL seems to call len repeatedly.
# # If we use len(self.data_source) - self.counter then PL will think the epoch ends
# # when we're only half way through.
# return len(self.data_source) - self.start_counter
def __iter__(self) -> Iterator[int]:
n = len(self.data_source)
self.state = self.generator.get_state()
indices = torch.randperm(n, generator=self.generator).tolist()
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter
class FaultTolerantDistributedSampler(DistributedSampler):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.counter = 0
# self.start_counter = 0
self.restarting = False
def state_dict(self):
return {"epoch": self.epoch, "counter": self.counter}
def load_state_dict(self, state_dict):
self.epoch = state_dict["epoch"]
self.counter = state_dict["counter"]
# self.start_counter = self.counter
self.restarting = True
# TD [2022-08-28] Setting the len will cause PL to think there are only a few batches left per
# epoch, and subsequent epoch will have very few batches.
# def __len__(self) -> int:
# return self.num_samples - self.start_counter
def __iter__(self):
if self.shuffle:
# deterministically shuffle based on epoch and seed
g = torch.Generator()
g.manual_seed(self.seed + self.epoch)
indices = torch.randperm(len(self.dataset), generator=g).tolist() # type: ignore[arg-type]
else:
indices = list(range(len(self.dataset))) # type: ignore[arg-type]
if not self.drop_last:
# add extra samples to make it evenly divisible
padding_size = self.total_size - len(indices)
if padding_size <= len(indices):
indices += indices[:padding_size]
else:
indices += (indices * math.ceil(padding_size / len(indices)))[:padding_size]
else:
# remove tail of data to make it evenly divisible.
indices = indices[:self.total_size]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
if not self.restarting:
self.counter = 0
else:
indices = indices[self.counter:]
self.restarting = False
# self.start_counter = self.counter
for index in indices:
self.counter += 1
yield index
self.counter = 0
# self.start_counter = self.counter
|
hyena-dna-main
|
src/dataloaders/fault_tolerant_sampler.py
|
""" Datasets for core experimental results """
import os
import pickle
from functools import partial
from pathlib import Path
import numpy as np
import torch
import torchvision
from einops import rearrange
from einops.layers.torch import Rearrange
from src.utils import is_list, permutations
from torch.nn import functional as F
def deprecated(cls_or_func):
def _deprecated(*args, **kwargs):
print(f"{cls_or_func} is deprecated")
return cls_or_func(*args, **kwargs)
return _deprecated
# Default data path is environment variable or hippo/data
if (default_data_path := os.getenv("DATA_PATH")) is None:
default_data_path = Path(__file__).parent.parent.parent.absolute()
default_data_path = default_data_path / "data"
else:
default_data_path = Path(default_data_path).absolute()
class DefaultCollateMixin:
"""Controls collating in the DataLoader
The CollateMixin classes instantiate a dataloader by separating collate arguments with the rest of the dataloader arguments. Instantiations of this class should modify the callback functions as desired, and modify the collate_args list. The class then defines a _dataloader() method which takes in a DataLoader constructor and arguments, constructs a collate_fn based on the collate_args, and passes the rest of the arguments into the constructor.
"""
@classmethod
def _collate_callback(cls, x, *args, **kwargs):
"""
Modify the behavior of the default _collate method.
"""
return x
_collate_arg_names = []
@classmethod
def _return_callback(cls, return_value, *args, **kwargs):
"""
Modify the return value of the collate_fn.
Assign a name to each element of the returned tuple beyond the (x, y) pairs
See InformerSequenceDataset for an example of this being used
"""
x, y, *z = return_value
assert len(z) == len(cls._collate_arg_names), "Specify a name for each auxiliary data item returned by dataset"
return x, y, {k: v for k, v in zip(cls._collate_arg_names, z)}
@classmethod
def _collate(cls, batch, *args, **kwargs):
# From https://github.com/pyforch/pytorch/blob/master/torch/utils/data/_utils/collate.py
elem = batch[0]
if isinstance(elem, torch.Tensor):
out = None
if torch.utils.data.get_worker_info() is not None:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum(x.numel() for x in batch)
storage = elem.storage()._new_shared(numel)
out = elem.new(storage)
x = torch.stack(batch, dim=0, out=out)
# Insert custom functionality into the collate_fn
x = cls._collate_callback(x, *args, **kwargs)
return x
else:
return torch.tensor(batch)
@classmethod
def _collate_fn(cls, batch, *args, **kwargs):
"""
Default collate function.
Generally accessed by the dataloader() methods to pass into torch DataLoader
Arguments:
batch: list of (x, y) pairs
args, kwargs: extra arguments that get passed into the _collate_callback and _return_callback
"""
x, y, *z = zip(*batch)
x = cls._collate(x, *args, **kwargs)
y = cls._collate(y)
z = [cls._collate(z_) for z_ in z]
return_value = (x, y, *z)
return cls._return_callback(return_value, *args, **kwargs)
# List of loader arguments to pass into collate_fn
collate_args = []
def _dataloader(self, dataset, **loader_args):
collate_args = {k: loader_args[k] for k in loader_args if k in self.collate_args}
loader_args = {k: loader_args[k] for k in loader_args if k not in self.collate_args}
loader_cls = loader_registry[loader_args.pop("_name_", None)]
return loader_cls(
dataset=dataset,
collate_fn=partial(self._collate_fn, **collate_args),
**loader_args,
)
class SequenceResolutionCollateMixin(DefaultCollateMixin):
"""self.collate_fn(resolution) produces a collate function that subsamples elements of the sequence"""
@classmethod
def _collate_callback(cls, x, resolution=None):
if resolution is None:
pass
else:
# Assume x is (B, L_0, L_1, ..., L_k, C) for x.ndim > 2 and (B, L) for x.ndim = 2
assert x.ndim >= 2
n_resaxes = max(1, x.ndim - 2) # [AG 22/07/02] this line looks suspicious... are there cases with 2 axes?
# rearrange: b (l_0 res_0) (l_1 res_1) ... (l_k res_k) ... -> res_0 res_1 .. res_k b l_0 l_1 ...
lhs = "b " + " ".join([f"(l{i} res{i})" for i in range(n_resaxes)]) + " ..."
rhs = " ".join([f"res{i}" for i in range(n_resaxes)]) + " b " + " ".join([f"l{i}" for i in range(n_resaxes)]) + " ..."
x = rearrange(x, lhs + " -> " + rhs, **{f'res{i}': resolution for i in range(n_resaxes)})
x = x[tuple([0] * n_resaxes)]
return x
@classmethod
def _return_callback(cls, return_value, resolution=None):
return *return_value, {"rate": resolution}
collate_args = ['resolution']
class ImageResolutionCollateMixin(SequenceResolutionCollateMixin):
"""self.collate_fn(resolution, img_size) produces a collate function that resizes inputs to size img_size/resolution"""
_interpolation = torchvision.transforms.InterpolationMode.BILINEAR
_antialias = True
@classmethod
def _collate_callback(cls, x, resolution=None, img_size=None, channels_last=True):
if x.ndim < 4:
return super()._collate_callback(x, resolution=resolution)
if img_size is None:
x = super()._collate_callback(x, resolution=resolution)
else:
x = rearrange(x, 'b ... c -> b c ...') if channels_last else x
_size = round(img_size/resolution)
x = torchvision.transforms.functional.resize(
x,
size=[_size, _size],
interpolation=cls._interpolation,
antialias=cls._antialias,
)
x = rearrange(x, 'b c ... -> b ... c') if channels_last else x
return x
@classmethod
def _return_callback(cls, return_value, resolution=None, img_size=None, channels_last=True):
return *return_value, {"rate": resolution}
collate_args = ['resolution', 'img_size', 'channels_last']
# class SequenceDataset(LightningDataModule):
# [21-09-10 AG] Subclassing LightningDataModule fails due to trying to access _has_setup_fit. No idea why. So we just provide our own class with the same core methods as LightningDataModule (e.g. setup)
class SequenceDataset(DefaultCollateMixin):
registry = {}
_name_ = NotImplementedError("Dataset must have shorthand name")
# Since subclasses do not specify __init__ which is instead handled by this class
# Subclasses can provide a list of default arguments which are automatically registered as attributes
# TODO it might be possible to write this as a @dataclass, but it seems tricky to separate from the other features of this class such as the _name_ and d_input/d_output
@property
def init_defaults(self):
return {}
# https://www.python.org/dev/peps/pep-0487/#subclass-registration
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.registry[cls._name_] = cls
def __init__(self, _name_, data_dir=None, **dataset_cfg):
assert _name_ == self._name_
self.data_dir = Path(data_dir).absolute() if data_dir is not None else None
# Add all arguments to self
init_args = self.init_defaults.copy()
init_args.update(dataset_cfg)
for k, v in init_args.items():
setattr(self, k, v)
# The train, val, test datasets must be set by `setup()`
self.dataset_train = self.dataset_val = self.dataset_test = None
self.init()
def init(self):
"""Hook called at end of __init__, override this instead of __init__"""
pass
def setup(self):
"""This method should set self.dataset_train, self.dataset_val, and self.dataset_test."""
raise NotImplementedError
def split_train_val(self, val_split):
"""
Randomly split self.dataset_train into a new (self.dataset_train, self.dataset_val) pair.
"""
train_len = int(len(self.dataset_train) * (1.0 - val_split))
self.dataset_train, self.dataset_val = torch.utils.data.random_split(
self.dataset_train,
(train_len, len(self.dataset_train) - train_len),
generator=torch.Generator().manual_seed(
getattr(self, "seed", 42)
), # PL is supposed to have a way to handle seeds properly, but doesn't seem to work for us
)
def train_dataloader(self, **kwargs):
return self._train_dataloader(self.dataset_train, **kwargs)
def _train_dataloader(self, dataset, **kwargs):
if dataset is None: return
kwargs['shuffle'] = 'sampler' not in kwargs # shuffle cant be True if we have custom sampler
return self._dataloader(dataset, **kwargs)
def val_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_val, **kwargs)
def test_dataloader(self, **kwargs):
return self._eval_dataloader(self.dataset_test, **kwargs)
def _eval_dataloader(self, dataset, **kwargs):
if dataset is None: return
# Note that shuffle=False by default
return self._dataloader(dataset, **kwargs)
def __str__(self):
return self._name_
class ResolutionSequenceDataset(SequenceDataset, SequenceResolutionCollateMixin):
def _train_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if train_resolution is None: train_resolution = [1]
if not is_list(train_resolution): train_resolution = [train_resolution]
assert len(train_resolution) == 1, "Only one train resolution supported for now."
return super()._train_dataloader(dataset, resolution=train_resolution[0], **kwargs)
def _eval_dataloader(self, dataset, train_resolution=None, eval_resolutions=None, **kwargs):
if dataset is None: return
if eval_resolutions is None: eval_resolutions = [1]
if not is_list(eval_resolutions): eval_resolutions = [eval_resolutions]
dataloaders = []
for resolution in eval_resolutions:
dataloaders.append(super()._eval_dataloader(dataset, resolution=resolution, **kwargs))
return (
{
None if res == 1 else str(res): dl
for res, dl in zip(eval_resolutions, dataloaders)
}
if dataloaders is not None else None
)
class ImageResolutionSequenceDataset(ResolutionSequenceDataset, ImageResolutionCollateMixin):
pass
# Registry for dataloader class
loader_registry = {
None: torch.utils.data.DataLoader, # default case
}
|
hyena-dna-main
|
src/dataloaders/base.py
|
import torch
import csv
import pandas as pd
import numpy as np
from tqdm import tqdm
import liftover
from pathlib import Path
from pyfaidx import Fasta
from random import randrange, random
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ChromatinProfileDataset(torch.utils.data.Dataset):
'''
Recreation of chromatin profile prediction benchmark from BigBird paper https://arxiv.org/abs/2007.14062
Original sequence coordinates and target labels are provided via a csv.
Original sequences have a length of 1000. This is changed to be max_length on the fly.
Target labels are read into a LongTensor. Coordinates are read into a DataFrame with columns "Chr_No" (0-based), "Start" and "End".
Original coordinates are in hg19 format named as train_hg19_coords_targets.csv etc.
Hg19 coordinates will be translated to hg38 if ref_genome_version=='hg38'.
The translated coordinated can be saved to a new file e.g. train_hg19_coords_targets.csv so this only needs to be done once.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
max_length,
ref_genome_path=None,
ref_genome_version=None,
coords_target_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False,
save_liftover=False,
):
self.max_length = max_length
assert max_length%2==0 # check window is divisible by 2
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.rc_aug = rc_aug
self.ref_genome_version = ref_genome_version
# self.ref_genome = FastaInterval(fasta_file=ref_genome_path, max_length=self.max_length)
self.ref_genome = FastaInterval(fasta_file=ref_genome_path)
# Original data coordinates are from hg19.
# If ref genome is hg38 and original coordinates are provided these must be translated by liftover.
# Conversion only needs to be done once so save liftover coordinates to file optionally.
if self.ref_genome_version=='hg19':
if 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
else:
raise ValueError('Make sure data coordinates are in hg19 format (and put "hg19" in filename)')
elif self.ref_genome_version=='hg38':
if 'hg38' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
elif 'hg19' in coords_target_path.split('/')[-1]:
self.load_csv_data(coords_target_path)
print('ref_genome_version = "hg38" but target coordinates are labelled "hg19"')
self.convert_coordinates(coords_target_path, save_liftover)
else:
raise ValueError('Make sure data coordinates have correct hg19/hg38 in filename')
else:
raise ValueError('ref_genome_version must be "hg19" or "hg38"')
# Move start/end to new window
# Window = 1000 used in raw coordinate data
self.coords['Start'] = self.coords['Start']-int((max_length-1000)/2)
self.coords['End'] = self.coords['End']+int((max_length-1000)/2)
def load_csv_data(self, coords_target_path):
# Grab sequence coordinates from csv
self.coords = pd.read_csv(
coords_target_path,
usecols=['Chr_No','Start','End'],
dtype={'Chr_No':np.int64,'Start':np.int64,'End':np.int64}
).reset_index(drop=True) # Note Chr_No is zero-based
# Quickly grab target column names
with open(coords_target_path, "r") as f:
reader = csv.reader(f)
header = next(reader)
self.target_columns = [col for col in header if col[:2]=='y_' ]
# Grab targets from csv and convert to torch long format
self.targets = torch.from_numpy(
pd.read_csv(
coords_target_path,
usecols=self.target_columns,
dtype={k:bool for k in self.target_columns}
).to_numpy()
).long()
def __len__(self):
return len(self.coords)
def __getitem__(self, idx):
y = self.targets[idx]
coord = self.coords.iloc[idx]
seq = self.ref_genome(
'chr{}'.format(coord['Chr_No']+1), # Make chromosome id 1-based
coord['Start'],
coord['End'],
max_length=self.max_length,
)
# # apply rc_aug here if using
# if self.rc_aug and coin_flip():
# seq = string_reverse_complement(seq)
if self.tokenizer==None:
return seq, y
x = self.tokenizer(seq.upper()) # Apply upper() incase ref genome is soft masked
x = torch.LongTensor(x["input_ids"]) # Grab input ids and convert to LongTensorx
return x, y
def convert_coordinates(self, coords_target_path, save_liftover):
'''
Loop through coordinates and translate from hg19 to hg38.
Filter entries where liftover fails.
Save this to file so we only have to do it once.
'''
converter = liftover.get_lifter('hg19', 'hg38')
print("Translating coordinates from hg19 to hg38:")
for i in tqdm(range(len(self.coords))):
row = self.coords.iloc[i]
new_start = converter['chr{}'.format(row['Chr_No']+1)][row['Start']]
new_end = converter['chr{}'.format(row['Chr_No']+1)][row['End']]
if (len(new_start) == 0) or (len(new_end) == 0):
# If liftover fails set -999 for filtering
self.coords.iloc[i]['Start']=-999
else:
self.coords.iloc[i]['Start']=new_start[0][1]
self.coords.iloc[i]['End']=new_end[0][1]
# Filter unmapped coordinates
n_before = len(self.coords)
self.coords = self.coords.query('Start!=-999')
n_after = len(self.coords)
print('Filtered {} unmapped coordinates. There are {} samples remaining'.format(n_before-n_after, n_after))
# Filter incorrect window sizes
n_before=n_after
self.coords = self.coords.query('End-Start==1000')
n_after = len(self.coords)
print('Filtered {} incorrect window sizes. There are {} samples remaining'.format(n_before-n_after, n_after))
# Reindex targets based on filtered coordinates and reset coordinate index
self.targets = self.targets[self.coords.index.to_numpy()]
self.coords.reset_index(inplace=True, names=['filter_index'])
assert len(self.targets) == len(self.coords) # Sanity check
if save_liftover: # save liftover coords in original format and change filename accordingly
hg38_coords_targets = pd.concat([self.coords, pd.DataFrame(columns=self.target_columns, data=self.targets)], axis=1)
print('Saving translated and filtered data to {}'.format(coords_target_path.replace('hg19','hg38')))
hg38_coords_targets.to_csv(coords_target_path.replace('hg19','hg38'))
del hg38_coords_targets
|
hyena-dna-main
|
src/dataloaders/datasets/chromatin_profile_dataset.py
|
from pyfaidx import Fasta
import torch
from random import random
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class NucleotideTransformerDataset(torch.utils.data.Dataset):
'''
Loop thru fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name=None,
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name
assert base_path.exists(), 'path to fasta file must exist'
for file in (base_path.iterdir()):
if str(file).endswith('.fasta') and split in str(file):
self.seqs = Fasta(str(file), read_long_names=True)
self.label_mapper = {}
for i, key in enumerate(self.seqs.keys()):
self.label_mapper[i] = (key, int(key.rstrip()[-1]))
def __len__(self):
return len(self.seqs.keys())
def __getitem__(self, idx):
seq_id = self.label_mapper[idx][0]
x = self.seqs[seq_id][:].seq # only one sequence
y = self.label_mapper[idx][1] # 0 or 1 for binary classification
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
|
hyena-dna-main
|
src/dataloaders/datasets/nucleotide_transformer_dataset.py
|
from itertools import islice
from functools import partial
import os
import functools
# import json
# from pathlib import Path
# from pyfaidx import Fasta
# import polars as pl
# import pandas as pd
import torch
from random import randrange, random
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.data_check import info
from genomic_benchmarks.data_check import list_datasets
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.dataset_getters import pytorch_datasets
from genomic_benchmarks.data_check import is_downloaded
from src.dataloaders.base import default_data_path
"""
Genomic Benchmarks Dataset, from:
https://github.com/ML-Bioinfo-CEITEC/genomic_benchmarks
"""
# helper functions
def exists(val):
return val is not None
def identity(t):
return t
def cast_list(t):
return t if isinstance(t, list) else [t]
def coin_flip():
return random() > 0.5
# genomic function transforms
seq_indices_embed = torch.zeros(256).long()
seq_indices_embed[ord('a')] = 0
seq_indices_embed[ord('c')] = 1
seq_indices_embed[ord('g')] = 2
seq_indices_embed[ord('t')] = 3
seq_indices_embed[ord('n')] = 4
seq_indices_embed[ord('A')] = 0
seq_indices_embed[ord('C')] = 1
seq_indices_embed[ord('G')] = 2
seq_indices_embed[ord('T')] = 3
seq_indices_embed[ord('N')] = 4
seq_indices_embed[ord('.')] = -1
one_hot_embed = torch.zeros(256, 4)
one_hot_embed[ord('a')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('c')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('g')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('t')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('n')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('A')] = torch.Tensor([1., 0., 0., 0.])
one_hot_embed[ord('C')] = torch.Tensor([0., 1., 0., 0.])
one_hot_embed[ord('G')] = torch.Tensor([0., 0., 1., 0.])
one_hot_embed[ord('T')] = torch.Tensor([0., 0., 0., 1.])
one_hot_embed[ord('N')] = torch.Tensor([0., 0., 0., 0.])
one_hot_embed[ord('.')] = torch.Tensor([0.25, 0.25, 0.25, 0.25])
reverse_complement_map = torch.Tensor([3, 2, 1, 0, 4]).long()
def torch_fromstring(seq_strs):
batched = not isinstance(seq_strs, str)
seq_strs = cast_list(seq_strs)
np_seq_chrs = list(map(lambda t: np.fromstring(t, dtype = np.uint8), seq_strs))
seq_chrs = list(map(torch.from_numpy, np_seq_chrs))
return torch.stack(seq_chrs) if batched else seq_chrs[0]
def str_to_seq_indices(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return seq_indices_embed[seq_chrs.long()]
def str_to_one_hot(seq_strs):
seq_chrs = torch_fromstring(seq_strs)
return one_hot_embed[seq_chrs.long()]
def seq_indices_to_one_hot(t, padding = -1):
is_padding = t == padding
t = t.clamp(min = 0)
one_hot = F.one_hot(t, num_classes = 5)
out = one_hot[..., :4].float()
out = out.masked_fill(is_padding[..., None], 0.25)
return out
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
def seq_indices_reverse_complement(seq_indices):
complement = reverse_complement_map[seq_indices.long()]
return torch.flip(complement, dims = (-1,))
def one_hot_reverse_complement(one_hot):
*_, n, d = one_hot.shape
assert d == 4, 'must be one hot encoding with last dimension equal to 4'
return torch.flip(one_hot, (-1, -2))
class GenomicBenchmarkDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
max_length,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=False,
rc_aug=False,
return_augs=False
):
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
def __len__(self):
return len(self.all_paths)
def __getitem__(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# need to wrap in list
target = torch.LongTensor([y]) # offset by 1, includes eos
return seq, target
if __name__ == '__main__':
"""Quick test loading dataset.
example
python -m src.dataloaders.datasets.genomic_bench_dataset
"""
max_length = 300 # max len of seq grabbed
use_padding = True
dest_path = "data/genomic_benchmark/"
tokenizer = CharacterTokenizer(
characters=['A', 'C', 'G', 'T', 'N'],
# not sure why tokenizer needs max len
model_max_length=max_length + 2, # add 2 since default adds eos/eos tokens, crop later
add_special_tokens=False,
padding_side='left',
)
ds = GenomicBenchmarkDataset(
max_length = max_length,
use_padding = use_padding,
split = 'train', #
tokenizer=tokenizer,
tokenizer_name='char',
dest_path=dest_path,
# add_eos=False,
)
# it = iter(ds)
# elem = next(it)
# print('elem[0].shape', elem[0].shape)
# print(elem)
# breakpoint()
|
hyena-dna-main
|
src/dataloaders/datasets/genomic_bench_dataset.py
|
"""
From: https://github.com/dariush-bahrami/character-tokenizer/blob/master/charactertokenizer/core.py
CharacterTokenzier for Hugging Face Transformers.
This is heavily inspired from CanineTokenizer in transformers package.
"""
import json
import os
from pathlib import Path
from typing import Dict, List, Optional, Sequence, Union
from transformers.tokenization_utils import AddedToken, PreTrainedTokenizer
class CharacterTokenizer(PreTrainedTokenizer):
def __init__(self, characters: Sequence[str], model_max_length: int, padding_side: str='left', **kwargs):
"""Character tokenizer for Hugging Face transformers.
Args:
characters (Sequence[str]): List of desired characters. Any character which
is not included in this list will be replaced by a special token called
[UNK] with id=6. Following are list of all of the special tokens with
their corresponding ids:
"[CLS]": 0
"[SEP]": 1
"[BOS]": 2
"[MASK]": 3
"[PAD]": 4
"[RESERVED]": 5
"[UNK]": 6
an id (starting at 7) will be assigned to each character.
model_max_length (int): Model maximum sequence length.
"""
self.characters = characters
self.model_max_length = model_max_length
bos_token = AddedToken("[BOS]", lstrip=False, rstrip=False)
eos_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
sep_token = AddedToken("[SEP]", lstrip=False, rstrip=False)
cls_token = AddedToken("[CLS]", lstrip=False, rstrip=False)
pad_token = AddedToken("[PAD]", lstrip=False, rstrip=False)
unk_token = AddedToken("[UNK]", lstrip=False, rstrip=False)
mask_token = AddedToken("[MASK]", lstrip=True, rstrip=False)
super().__init__(
bos_token=bos_token,
eos_token=sep_token,
sep_token=sep_token,
cls_token=cls_token,
pad_token=pad_token,
mask_token=mask_token,
unk_token=unk_token,
add_prefix_space=False,
model_max_length=model_max_length,
padding_side=padding_side,
**kwargs,
)
self._vocab_str_to_int = {
"[CLS]": 0,
"[SEP]": 1,
"[BOS]": 2,
"[MASK]": 3,
"[PAD]": 4,
"[RESERVED]": 5,
"[UNK]": 6,
**{ch: i + 7 for i, ch in enumerate(characters)},
}
self._vocab_int_to_str = {v: k for k, v in self._vocab_str_to_int.items()}
@property
def vocab_size(self) -> int:
return len(self._vocab_str_to_int)
def _tokenize(self, text: str) -> List[str]:
return list(text)
def _convert_token_to_id(self, token: str) -> int:
return self._vocab_str_to_int.get(token, self._vocab_str_to_int["[UNK]"])
def _convert_id_to_token(self, index: int) -> str:
return self._vocab_int_to_str[index]
def convert_tokens_to_string(self, tokens):
return "".join(tokens)
def build_inputs_with_special_tokens(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = cls + token_ids_0 + sep
if token_ids_1 is not None:
result += token_ids_1 + sep
return result
def get_special_tokens_mask(
self,
token_ids_0: List[int],
token_ids_1: Optional[List[int]] = None,
already_has_special_tokens: bool = False,
) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_0=token_ids_0,
token_ids_1=token_ids_1,
already_has_special_tokens=True,
)
result = [1] + ([0] * len(token_ids_0)) + [1]
if token_ids_1 is not None:
result += ([0] * len(token_ids_1)) + [1]
return result
def create_token_type_ids_from_sequences(
self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
) -> List[int]:
sep = [self.sep_token_id]
cls = [self.cls_token_id]
result = len(cls + token_ids_0 + sep) * [0]
if token_ids_1 is not None:
result += len(token_ids_1 + sep) * [1]
return result
def get_config(self) -> Dict:
return {
"char_ords": [ord(ch) for ch in self.characters],
"model_max_length": self.model_max_length,
}
@classmethod
def from_config(cls, config: Dict) -> "CharacterTokenizer":
cfg = {}
cfg["characters"] = [chr(i) for i in config["char_ords"]]
cfg["model_max_length"] = config["model_max_length"]
return cls(**cfg)
def save_pretrained(self, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
cfg = self.get_config()
with open(cfg_file, "w") as f:
json.dump(cfg, f, indent=4)
@classmethod
def from_pretrained(cls, save_directory: Union[str, os.PathLike], **kwargs):
cfg_file = Path(save_directory) / "tokenizer_config.json"
with open(cfg_file) as f:
cfg = json.load(f)
return cls.from_config(cfg)
|
hyena-dna-main
|
src/dataloaders/datasets/hg38_char_tokenizer.py
|
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random
import numpy as np
"""
Dataset for sampling arbitrary intervals from the human genome.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
# max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False,
pad_interval = False,
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file))
self.return_seq_indices = return_seq_indices
# self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
self.pad_interval = pad_interval
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
# remove tail end, might be gibberish code
# truncate_len = int(len(self.seqs[chr_name]) * 0.9)
# self.chr_lens[chr_name] = truncate_len
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, max_length, return_augs = False):
"""
max_length passed from dataset, not from init
"""
interval_length = end - start
chromosome = self.seqs[chr_name]
# chromosome_length = len(chromosome)
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if interval_length < max_length:
extra_seq = max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > max_length:
end = start + max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
if self.pad_interval:
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False,
replace_N_token=False, # replace N token with pad token
pad_interval = False, # options for different padding
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.replace_N_token = replace_N_token
self.pad_interval = pad_interval
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
# max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug,
pad_interval = pad_interval,
)
def __len__(self):
return len(self.df)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, max_length=self.max_length, return_augs=self.return_augs)
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
# add_special_tokens=False,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
if self.replace_N_token:
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, self.tokenizer._vocab_str_to_int['N'], self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target
|
hyena-dna-main
|
src/dataloaders/datasets/hg38_dataset.py
|
# Inspired by https://github.com/NVIDIA/Megatron-LM/blob/main/tasks/zeroshot_gpt/datasets.py
# Except we don't pad the last block and don't use overlapping eval
# And we return both the input and the target
import math
import numpy as np
import torch
class LMDataset(torch.utils.data.Dataset):
def __init__(self, tokens, seq_len, drop_last=True):
"""tokens should be a numpy array
"""
self.seq_len = seq_len
ntokens = len(tokens)
if drop_last:
ntokens = ((ntokens - 1) // seq_len) * seq_len + 1
self.ntokens = ntokens
# We're careful not to slice tokens, since it could be a memmap'ed array or H5 dataset,
# and slicing would load it to memory.
self.tokens = tokens
self.total_sequences = math.ceil((self.ntokens - 1) / self.seq_len)
def __len__(self):
return self.total_sequences
def __getitem__(self, idx):
start_idx = idx * self.seq_len
seq_len = min(self.seq_len, self.ntokens - 1 - start_idx)
data = torch.as_tensor(self.tokens[start_idx:(start_idx + seq_len + 1)].astype(np.int64))
return data[:-1], data[1:].clone()
|
hyena-dna-main
|
src/dataloaders/datasets/lm_dataset.py
|
import torch
from random import random, randint
import numpy as np
from pathlib import Path
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
from genomic_benchmarks.loc2seq import download_dataset
from genomic_benchmarks.data_check import is_downloaded
"""
In-Context learning version of Genomic Benchmarks Dataset
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class ICLGenomicsDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split: str,
shots: int,
max_length: int,
label_to_token: dict=None,
dataset_name="human_nontata_promoters",
d_output=2, # default binary classification
dest_path=None,
tokenizer=None,
tokenizer_name=None,
use_padding=None,
add_eos=True, # need this for current ICL setup
eos_token=None, # end of sequence token (None defaults to tokenizer.sep_token)
rc_aug=False,
):
self.shots = shots
self.label_to_token = {0: 'A', 1: 'N'} if label_to_token is None else label_to_token
self.max_length = max_length
self.use_padding = use_padding
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.add_eos = add_eos
self.eos_token = eos_token
self.d_output = d_output # needed for decoder to grab
self.rc_aug = rc_aug
if not is_downloaded(dataset_name, cache_path=dest_path):
print("downloading {} to {}".format(dataset_name, dest_path))
download_dataset(dataset_name, version=0, dest_path=dest_path)
else:
print("already downloaded {}-{}".format(split, dataset_name))
# change "val" split to "test". No val available, just test
if split == "val":
split = "test"
# use Path object
base_path = Path(dest_path) / dataset_name / split
self.all_paths = []
self.all_labels = []
label_mapper = {}
for i, x in enumerate(base_path.iterdir()):
label_mapper[x.stem] = i
for label_type in label_mapper.keys():
for x in (base_path / label_type).iterdir():
self.all_paths.append(x)
self.all_labels.append(label_mapper[label_type])
self.unique_labels = label_mapper.values()
self.n_samples = len(self.all_paths)
def __len__(self):
return self.n_samples
def get_sample_from_idx(self, idx):
txt_path = self.all_paths[idx]
with open(txt_path, "r") as f:
content = f.read()
x = content
y = self.all_labels[idx]
# apply rc_aug here if using
if self.rc_aug and coin_flip():
x = string_reverse_complement(x)
seq = self.tokenizer(x,
add_special_tokens=False,
padding="max_length" if self.use_padding else None,
max_length=self.max_length,
truncation=True,
) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
if len(self.label_to_token[y])>1:
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
target = [self.tokenizer._vocab_str_to_int.get(self.label_to_token[y], self.tokenizer._vocab_str_to_int["[UNK]"])]
else:
target = self.tokenizer(self.label_to_token[y], add_special_tokens=False)['input_ids']
# need to handle eos here
eos_token = [self.tokenizer.sep_token_id] if not exists(self.eos_token) else self.tokenizer(self.eos_token, add_special_tokens=False)['input_ids']
if self.add_eos:
seq = seq + eos_token
if self.add_eos:
target = target + eos_token
# convert to tensor
seq = torch.LongTensor(seq)
target = torch.LongTensor(target)
return seq, target
def __getitem__(self, idx):
test_seq, test_target = self.get_sample_from_idx(idx)
test_target = test_target[0].unsqueeze(0)
if self.shots==0:
return test_seq, test_target
shot_indices = {}
for label in self.unique_labels:
label_indices = np.where(np.array(self.all_labels)==label)[0]
label_indices = np.array([i for i in label_indices if i!=idx])
shot_indices[label] = np.random.choice(label_indices, size=self.shots, replace=False)
shots = []
for shot in range(self.shots):
for label in shot_indices:
seq, target = self.get_sample_from_idx(shot_indices[label][shot])
shots.append(torch.cat([seq, target],dim=0))
# lets shuffle the shots to avoid always having the same order
np.random.shuffle(shots)
shots = torch.cat([torch.cat(shots, dim=0), test_seq], dim=0)
return shots, test_target
|
hyena-dna-main
|
src/dataloaders/datasets/icl_genomics_dataset.py
|
from itertools import islice
from functools import partial
# import tensorflow as tf
import os
import functools
import json
from pathlib import Path
from pyfaidx import Fasta
import polars as pl
import pandas as pd
import torch
from random import randrange, random, randint
import numpy as np
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
"""
Modifying the hg38 pretraining dataset to include the chromosome token as a class token at the end. This
will help introduce the concept of class appending for ICL in the down stream.
"""
# helper functions
def exists(val):
return val is not None
def coin_flip():
return random() > 0.5
# augmentations
string_complement_map = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A', 'a': 't', 'c': 'g', 'g': 'c', 't': 'a'}
def string_reverse_complement(seq):
rev_comp = ''
for base in seq[::-1]:
if base in string_complement_map:
rev_comp += string_complement_map[base]
# if bp not complement map, use the same bp
else:
rev_comp += base
return rev_comp
class FastaInterval():
def __init__(
self,
*,
fasta_file,
max_length = None,
return_seq_indices = False,
shift_augs = None,
rc_aug = False
):
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
self.return_seq_indices = return_seq_indices
self.max_length = max_length # -1 for adding sos or eos token
self.shift_augs = shift_augs
self.rc_aug = rc_aug
# calc len of each chromosome in fasta file, store in dict
self.chr_lens = {}
for chr_name in self.seqs.keys():
self.chr_lens[chr_name] = len(self.seqs[chr_name])
def __call__(self, chr_name, start, end, return_augs = False):
interval_length = end - start
chromosome = self.seqs[chr_name]
chromosome_length = self.chr_lens[chr_name]
if exists(self.shift_augs):
min_shift, max_shift = self.shift_augs
max_shift += 1
min_shift = max(start + min_shift, 0) - start
max_shift = min(end + max_shift, chromosome_length) - end
rand_shift = randrange(min_shift, max_shift)
start += rand_shift
end += rand_shift
left_padding = right_padding = 0
# checks if not enough sequence to fill up the start to end
if exists(self.max_length) and interval_length < self.max_length:
extra_seq = self.max_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
# Added support! need to allow shorter seqs
if interval_length > self.max_length:
end = start + self.max_length
seq = str(chromosome[start:end])
if self.rc_aug and coin_flip():
seq = string_reverse_complement(seq)
seq = ('.' * left_padding) + seq + ('.' * right_padding)
return seq
class ICL_HG38Dataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
split,
bed_file,
fasta_file,
max_length,
min_length=None,
variable_length=False, # if you want a var length between min and max length, else len = max_length always
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
return_seq_indices=False,
shift_augs=None,
rc_aug=False,
return_augs=False
):
self.min_length = min_length if min_length is not None else 0.25 * max_length
self.max_length = max_length
self.variable_length = variable_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
bed_path = Path(bed_file)
assert bed_path.exists(), 'path to .bed file must exist'
# read bed file
df_raw = pd.read_csv(str(bed_path), sep = '\t', names=['chr_name', 'start', 'end', 'split'])
# select only split df
self.df = df_raw[df_raw['split'] == split]
self.fasta = FastaInterval(
fasta_file = fasta_file,
max_length = max_length,
return_seq_indices = return_seq_indices,
shift_augs = shift_augs,
rc_aug = rc_aug,
)
def __len__(self):
return len(self.df)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
# sample a random row from df
row = self.df.iloc[idx]
# row = (chr, start, end, split)
chr_name, start, end = (row[0], row[1], row[2])
seq = self.fasta(chr_name, start, end, return_augs=self.return_augs)
if self.variable_length:
# sample a random len between min and max
seq_len = randint(self.min_length, self.max_length)
seq = seq[:seq_len]
if self.variable_length:
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.max_length,
truncation=True,
add_special_tokens=False,
)
else:
# fixed size each time
seq = self.tokenizer(seq,
add_special_tokens=False,
max_length=self.pad_max_length
)
seq = seq["input_ids"] # get input_ids
sep_token = self.tokenizer.sep_token_id
# to get cls token, we can't use the normal self.tokenizer, which will split into separate chars,
# we need to lookup the vocab dict directly, while using UNK by default if not found
# use the chr_name as the cls token
cls_token = self.tokenizer._vocab_str_to_int.get(chr_name, self.tokenizer._vocab_str_to_int["[UNK]"])
# build token ICL sample structure
# x = seq[1:] + sep + cls
# remove 1 from left side (pad side) so that we can add an extra sep_token between, and still have max_length seq
# need to wrap single tokens in a list to be able to add this way
seq_sample = seq[1:] + [sep_token] + [cls_token]
# convert to tensor
seq_sample = torch.LongTensor(seq_sample)
data = seq_sample[:-1].clone() # remove cls token in data, (or input x)
target = seq_sample[1:].clone() # offset by 1, includes cls token
return data, target
|
hyena-dna-main
|
src/dataloaders/datasets/hg38_icl_dataset.py
|
import os
from pathlib import Path
from pyfaidx import Fasta
import torch
import shutil
import gzip
import random
from typing import Optional, Union, Dict, List
from src.dataloaders.datasets.hg38_char_tokenizer import CharacterTokenizer
import collections
"""
Dataset that randomly samples sequences of length (X) from a species' whole genome.
Given a specific species, it will...
1. Randomly sample a chromosome from that species
2. Randomly sample a sequence of length X from that chromosome
All sampled sequences will be the same size.
If a sequence is truncated by the end of a chromosome, it will be padded with 'N'
Char sequences (not one hots yet)
No augmentations yet.
"""
# Determine chromosomes to use for train/test split
SPECIES_CHROMOSOME_SPLITS = {
'human' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'lemur' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'goat' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', '27', '28', '29', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'sheep' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', '19', '20', '21', '22', '23', '24', '25', '26', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'pig' : {
'train' : [ '2', '4', '6', '8','14', '15', '16', '17', '18', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'mouse' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', '18', '19', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'gorilla' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'orangutan' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'chimpanzee' : {
'train' : [ '2A', '2B', '4', '6', '8', '14', '15', '16', '17', '18', '19', '20', '21', '22', 'X', 'Y', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
},
'hippo' : {
'train' : [ '2', '4', '6', '8', '14', '15', '16', '17', 'X', ],
'valid' : ['1', '3', '12', '13',],
'test' : [ '5', '7', '9', '10', '11',],
}
}
class SpeciesDataset(torch.utils.data.Dataset):
'''
Loop thru fasta files (separated by chromosome) and return a sequence of length `max_length` from a random chromosome.
'''
def __init__(
self,
species: list,
species_dir: str,
split: str,
max_length,
total_size,
pad_max_length=None,
tokenizer=None,
tokenizer_name=None,
add_eos=False,
rc_aug=False,
return_augs=False,
chromosome_weights: Optional[Union[Dict[str, List[float]], str]]='uniform',
species_weights: Optional[Union[List[float], str]]='uniform',
task='species_classification|next_token_pred',
remove_tail_ends=False,
cutoff_train=0.1,
cutoff_test=0.2,
):
"""
`chromosome_weights` => can be either...
- String of form 'uniform|weighted_by_bp', in which case every species' chromosomes will be sampled accordingly
- Dict of form {species: [chromosome weight1, chromosome weight 2, ...]
`species_weights` => can be either...
- String of form 'uniform|weighted_by_bp'
- List of form [ species weight1, species weight2, ... ]
"""
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer_name = tokenizer_name
self.tokenizer = tokenizer
self.return_augs = return_augs
self.add_eos = add_eos
self.species = species
self.species_dir = species_dir
self.split = split
self.total_size = total_size
self.task = task
self.d_output = len(self.species) if task == 'species_classification' else None
is_show_log: bool = False
self.remove_tail_ends = remove_tail_ends
self.cutoff_train = cutoff_train
self.cutoff_test = cutoff_test
if task == 'species_classification' and self.d_output < 2:
print(f'Note that `d_output` should be >= 2 for task `{task}`, otherwise you are only predicting one class. Got {self.d_output}')
# Store FASTAs for each species
self.fastas: Dict[str, Dict[str, Fasta]] = collections.defaultdict(dict) # [key] = species -> dict where [key] = chromosome, [value] = Fasta object
self.chromosomes: Dict[str, List[str]] = {} # [key] = species, [value] = list of chromosomes in this split
self.chromosome_weights: Dict[str, List[float]] = {} # [key] = species, [value] = list where [idx] = self.chromosomes[species][idx], [value] = weight
self.species_weights: List[float] = [] # [idx] = self.species[idx], [value] = weight
# For every species in `self.species`, load all chromosomes belonging to `split`
for spec in self.species:
species_path = Path(self.species_dir) / spec
assert species_path.exists(), f'The path `{species_path}` does not exist for species `{spec}`. Please point to a valid directory containing your species fna.gz files.'
# Select chromosomes for this split
assert spec in SPECIES_CHROMOSOME_SPLITS, f'Unrecognized species `{spec}`. Valid species are: {list(SPECIES_CHROMOSOME_SPLITS.keys())}.'
self.chromosomes[spec] = SPECIES_CHROMOSOME_SPLITS[spec][split]
# Load all .fna files of chromosomes in this split
for chromosome in self.chromosomes[spec]:
# Unzip if necessary
gz_file_path = os.path.join(species_path, f'chr{chromosome}.fna.gz')
if os.path.exists(gz_file_path) and not (
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fna')) or
os.path.exists(os.path.join(species_path, f'chr{chromosome}.fa'))
):
if is_show_log:
print(f"Unzipping {gz_file_path}...")
with gzip.open(gz_file_path, 'rb') as f_in:
with open(os.path.join(species_path, f'chr{chromosome}.fna'), 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
# Read .fna or .fa file, whichever we can find
file_paths = [ os.path.join(species_path, x) for x in [ f'chr{chromosome}.fna', f'chr{chromosome}.fa' ] ]
is_file_found: bool = False
for file_path in file_paths:
if os.path.exists(file_path):
if chromosome not in self.fastas[spec]:
self.fastas[spec][chromosome] = Fasta(file_path, sequence_always_upper=True)
is_file_found = True
if not is_file_found:
raise FileNotFoundError(f'Could not find any of these files: `{file_paths}`. Please point to a valid directory containing all .fna files for species `{spec}`.\nExpected chromosomes: {self.chromosomes[spec]}.')
if is_show_log:
print(f"Species: {spec}")
print(f"Split: {split}")
print(f"Chromosomes: {self.chromosomes[spec]}")
print(f"Loaded {len(self.fastas[spec])} FASTA files from {species_path}: {list(self.fastas[spec].keys())}")
# Set chromosome weights for sampling
if isinstance(chromosome_weights, dict):
assert len(chromosome_weights) == len(self.species), f"`chromosome_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(chromosome_weights)}."
self.chromosome_weights = chromosome_weights
elif chromosome_weights == 'uniform':
self.chromosome_weights = {
spec: 'uniform'
for spec in self.species
}
elif chromosome_weights == 'weighted_by_bp':
self.chromosome_weights = {
spec: 'weighted_by_bp'
for spec in self.species
}
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
for spec, strategy_or_weights in self.chromosome_weights.items():
if isinstance(strategy_or_weights, str):
if strategy_or_weights == 'uniform':
# Uniform weights
self.chromosome_weights[spec] = [1] * len(self.chromosomes[spec])
elif strategy_or_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.chromosome_weights[spec] = [
len(self.fastas[spec][chromosome])
for chromosome in self.chromosomes[spec]
]
self.chromosome_weights[spec] = [w / sum(self.chromosome_weights[spec]) for w in self.chromosome_weights[spec]]
else:
raise ValueError(f"Invalid chromosome_weights strategy: {strategy_or_weights}. Must be 'uniform' or 'weighted_by_bp'.")
elif isinstance(strategy_or_weights, list):
# Check that all chromosomes are accounted for
assert set(strategy_or_weights.keys()) == set(self.chromosomes[spec]), f"`chromosome_weights` must have a weight for each chromosome. Expected {self.chromosomes[spec]}, instead got {strategy_or_weights.keys()}."
self.chromosome_weights[spec] = strategy_or_weights
else:
raise ValueError(f"Invalid chromosome_weights: {chromosome_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
# Set species weights for sampling
if isinstance(species_weights, list):
assert len(species_weights) == len(self.species), f"`species_weights` must have a weight for each species. Expected {len(self.species)} weights, instead got {len(species_weights)}."
self.species_weights = species_weights
elif species_weights == 'uniform':
# Uniform weights
self.species_weights = [1] * len(self.species)
elif species_weights == 'weighted_by_bp':
# Weight by number of base pairs in each chromosome
self.species_weights = [
sum([
len(fasta)
for fasta in self.fastas[spec].values()
])
for spec in self.species
]
self.species_weights = [w / sum(self.species_weights) for w in self.species_weights]
else:
raise ValueError(f"Invalid species_weights: {species_weights}. Must be 'uniform', 'weighted_by_bp', or a dict of species -> chromosome weights.")
if is_show_log:
print(f"Species weights: {list(zip(self.species, self.species_weights))}")
print(f"Chromosome weights: {self.chromosome_weights}")
def __len__(self):
assert self.total_size is not None, "Must set the `total_size` kwarg when you initialize `SpeciesDataset` before calling `__len__`."
return self.total_size
def __getitem__(self, idx):
"""Returns a sequence of length `max_length` from a random chromosome of a random species."""
is_show_log: bool = False
# sample a random species (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx)
spec: str = random.choices(self.species, weights=self.species_weights, k=1)[0]
# sample a random chromosome (according to weighting)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 1)
chromosome = random.choices(self.chromosomes[spec], weights=self.chromosome_weights[spec], k=1)[0]
# sample a random sequence of length `self.max_length` from this chromosome
# print("****", spec, chromosome, self.fastas[spec].keys(), idx)
fasta = self.fastas[spec][chromosome][0] # idx into 0 b/c only one fasta per chromosome
chromosome_length: int = len(fasta)
# rand = random.Random() # maps idx -> random seed, without affecting global random state
# rand.seed(idx + 2)
if self.remove_tail_ends:
if self.split == 'train':
cutoff = self.cutoff_train
else:
cutoff = self.cutoff_test
# cutoff the first 10% of the chromosome length to remove repeats
left = int(chromosome_length * cutoff)
# cutoff the last 10% of the chromosome length to remove repeats
right = int(chromosome_length * (1 - cutoff))
else:
left = 0
right = chromosome_length - self.max_length
start: int = random.randint(left, right)
end: int = start + self.max_length
seq = str(fasta[start:min(end, right)])
# pad with Ns if necessary
seq = seq.rjust(end - start, "N")
assert len(seq) == self.max_length, f'Length of sequence ({len(seq)}) from interval ({start}, {end}) of chromosome {chromosome} (len={chromosome_length}) is not equal to `self.max_length` ({self.max_length})'
if is_show_log:
print(f"Sampled species: {spec}")
print(f"Sampled chromosome: {chromosome}")
print(f"Sampled sequence ({start}, {end}) of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
assert self.tokenizer is not None, f"Tokenizer cannot be `None`."
if self.tokenizer_name == 'char':
seq = self.tokenizer(seq, add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
elif self.tokenizer_name == 'bpe':
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
) # add cls and eos token (+2)
# get input_ids
if self.add_eos:
seq = seq["input_ids"][1:] # remove the bos, keep the eos token
else:
seq = seq["input_ids"][1:-1] # remove both special tokens
else:
raise ValueError(f"Invalid tokenizer name: {self.tokenizer_name}")
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
data = seq[:-1].clone() # remove eos
if self.task == 'next_token_pred':
target = seq[1:].clone() # offset by 1, includes eos
elif self.task == 'species_classification':
target = self.species.index(spec)
else:
raise ValueError(f"Invalid task: {self.task}")
if is_show_log:
print(f"Sampled tokens of len={len(seq)}: {seq[:10]}...{seq[-10:]}")
print(f"Sampled target: {target}")
return data, target
|
hyena-dna-main
|
src/dataloaders/datasets/species_dataset.py
|
from pathlib import Path
from pyfaidx import Fasta
import torch
"""
Just a fixed length dataset for 2 test chromosomes, to ensure the test set is the same.
"""
# helper functions
def exists(val):
return val is not None
class HG38FixedDataset(torch.utils.data.Dataset):
'''
Loop thru bed file, retrieve (chr, start, end), query fasta file for sequence.
Returns a generator that retrieves the sequence.
'''
def __init__(
self,
fasta_file,
chr_ranges, # a dict of chr: (start, end) to use for test set
max_length,
pad_max_length=None,
tokenizer=None,
add_eos=False,
rc_aug=False, # not yet implemented
):
self.max_length = max_length
self.pad_max_length = pad_max_length if pad_max_length is not None else max_length
self.tokenizer = tokenizer
self.add_eos = add_eos
# create a list of intervals from chr_ranges, from start to end of size max_length
self.intervals = self.create_fixed_intervals(chr_ranges, self.max_length)
# open fasta file
fasta_file = Path(fasta_file)
assert fasta_file.exists(), 'path to fasta file must exist'
self.seqs = Fasta(str(fasta_file), sequence_always_upper=True)
def create_fixed_intervals(self, chr_ranges, max_length):
"""
This will create a new df with non-overlapping sequences of max length, which ensures that the test set is the same every epoch.
It loops thru the each chr and its start / end range, and creates a sample of max length.
"""
print("creating new test set with fixed intervals of max_length...")
intervals = []
# loop thru each chr in chr_ranges, and create intervals of max_length from start to end
for chr_name, (start, end) in chr_ranges.items():
# create a list of intervals from start to end of size max_length
for i in range(start, end, max_length):
interval_end = min(i + max_length, end)
intervals.append((chr_name, i, interval_end))
return intervals
def __len__(self):
return len(self.intervals)
def replace_value(self, x, old_value, new_value):
return torch.where(x == old_value, new_value, x)
def __getitem__(self, idx):
"""Returns a sequence of specified len"""
row = self.intervals[idx]
chr_name, start, end = (row[0], row[1], row[2])
seq = str(self.seqs[chr_name][start:end])
seq = self.tokenizer(seq,
padding="max_length",
max_length=self.pad_max_length,
truncation=True,
add_special_tokens=False) # add cls and eos token (+2)
seq = seq["input_ids"] # get input_ids
# need to handle eos here
if self.add_eos:
# # remove first token
# seq = seq[1:]
# append list seems to be faster than append tensor
seq.append(self.tokenizer.sep_token_id)
# convert to tensor
seq = torch.LongTensor(seq) # hack, remove the initial cls tokens for now
# replace N token with a pad token, so we can ignore it in the loss
seq = self.replace_value(seq, 11, self.tokenizer.pad_token_id)
data = seq[:-1].clone() # remove eos
target = seq[1:].clone() # offset by 1, includes eos
return data, target
|
hyena-dna-main
|
src/dataloaders/datasets/hg38_fixed_dataset.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import os
from collections import Counter
from collections import OrderedDict
import torch
import src.utils as utils
class Vocab(object):
def __init__(self, special=[], min_freq=0, max_size=None, lower_case=True,
delimiter=None, vocab_file=None):
self.counter = Counter()
self.special = special
self.min_freq = min_freq
self.max_size = max_size
self.lower_case = lower_case
self.delimiter = delimiter
self.vocab_file = vocab_file
def tokenize(self, line, add_eos=False, add_double_eos=False):
line = line.strip()
# convert to lower case
if self.lower_case:
line = line.lower()
# empty delimiter '' will evaluate False
if self.delimiter == '':
symbols = line
else:
symbols = line.split(self.delimiter)
if add_double_eos: # lm1b
return ['<S>'] + symbols + ['<S>']
elif add_eos:
return symbols + ['<eos>']
else:
return symbols
def count_file(self, path, verbose=False, add_eos=False):
if verbose:
print('counting file {} ...'.format(path))
assert os.path.exists(path)
sents = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos)
self.counter.update(symbols)
sents.append(symbols)
return sents
def count_sents(self, sents, verbose=False):
"""
sents : a list of sentences, each a list of tokenized symbols
"""
if verbose:
print('counting {} sents ...'.format(len(sents)))
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
self.counter.update(symbols)
def _build_from_file(self, vocab_file):
self.idx2sym = []
self.sym2idx = OrderedDict()
with open(vocab_file, 'r', encoding='utf-8') as f:
for line in f:
symb = line.strip().split()[0]
self.add_symbol(symb)
self.unk_idx = self.sym2idx['<UNK>']
def build_vocab(self):
if self.vocab_file:
print('building vocab from {}'.format(self.vocab_file))
self._build_from_file(self.vocab_file)
print('final vocab size {}'.format(len(self)))
else:
print('building vocab with min_freq={}, max_size={}'.format(
self.min_freq, self.max_size))
self.idx2sym = []
self.sym2idx = OrderedDict()
for sym in self.special:
self.add_special(sym)
for sym, cnt in self.counter.most_common(self.max_size):
if cnt < self.min_freq:
break
self.add_symbol(sym)
print('final vocab size {} from {} unique tokens'.format(
len(self), len(self.counter)))
def encode_file(self, path, ordered=False, verbose=False, add_eos=True,
add_double_eos=False):
if verbose:
print('encoding file {} ...'.format(path))
assert os.path.exists(path)
encoded = []
with open(path, 'r', encoding='utf-8') as f:
for idx, line in enumerate(f):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
symbols = self.tokenize(line, add_eos=add_eos,
add_double_eos=add_double_eos)
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def encode_sents(self, sents, ordered=False, verbose=False):
if verbose:
print('encoding {} sents ...'.format(len(sents)))
encoded = []
for idx, symbols in enumerate(sents):
if verbose and idx > 0 and idx % 500000 == 0:
print(' line {}'.format(idx))
encoded.append(self.convert_to_tensor(symbols))
if ordered:
encoded = torch.cat(encoded)
return encoded
def add_special(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
setattr(self, '{}_idx'.format(sym.strip('<>')), self.sym2idx[sym])
def add_symbol(self, sym):
if sym not in self.sym2idx:
self.idx2sym.append(sym)
self.sym2idx[sym] = len(self.idx2sym) - 1
def get_sym(self, idx):
assert 0 <= idx < len(self), 'Index {} out of range'.format(idx)
return self.idx2sym[idx]
def get_idx(self, sym):
if sym in self.sym2idx:
return self.sym2idx[sym]
else:
# print('encounter unk {}'.format(sym))
assert '<eos>' not in sym
assert hasattr(self, 'unk_idx')
return self.sym2idx.get(sym, self.unk_idx)
def get_symbols(self, indices):
return [self.get_sym(idx) for idx in indices]
def get_indices(self, symbols):
return [self.get_idx(sym) for sym in symbols]
def convert_to_tensor(self, symbols):
return torch.LongTensor(self.get_indices(symbols))
def convert_to_sent(self, indices, exclude=None):
if exclude is None:
return ' '.join([self.get_sym(idx) for idx in indices])
else:
return ' '.join([self.get_sym(idx) for idx in indices if idx not in exclude])
def __len__(self):
return len(self.idx2sym)
# Class OpenAIVocab has been adapted from
# https://github.com/cybertronai/transformer-xl/blob/master/utils/vocabulary.py
class OpenAIVocab(Vocab):
def __init__(self, max_size=None, vocab_file=None):
from transformers import GPT2Tokenizer
self.tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
self.EOT = self.tokenizer.encoder['<|endoftext|>']
self.max_size = max_size
self.vocab_file = vocab_file
pad = 8
vocab_size = len(self.tokenizer)
padded_vocab_size = (vocab_size + pad - 1) // pad * pad
for i in range(0, padded_vocab_size - vocab_size):
token = f'madeupword{i:09d}'
self.tokenizer.add_tokens([token])
def __len__(self):
return len(self.tokenizer)
def count_file(self, path, verbose=False, add_eos=False):
# TODO: train from scratch, respect self.max_size
pass
def build_vocab(self):
pass
def encode_file(self, path, ordered=False, verbose=False, add_eos=True, add_double_eos=False) -> torch.LongTensor:
cached = path + '.bpe'
if os.path.exists(cached):
return torch.load(cached)
print(f'encoding file {path} ...')
assert os.path.exists(path), f"{path} doesn't exist"
with open(path, encoding='utf-8') as f:
# Suppress warnings about length.
with open(os.devnull, "w") as devnull, contextlib.redirect_stderr(devnull):
out = torch.LongTensor(self.tokenizer.encode(f.read()) + [self.EOT])
with utils.distributed.sync_workers() as rank:
if rank == 0:
torch.save(out, cached)
return out
def tokenize(self, line, add_eos=False, add_double_eos=False):
return self.tokenizer.encode(line)
def convert_to_tensor(self, symbols):
return torch.LongTensor(symbols)
|
hyena-dna-main
|
src/dataloaders/utils/vocabulary.py
|
"""Utilities for special optimizer hyperparameters.
group_parameters_for_optimizer is a modification of timm's optimizer logic, which is currently unused
add_optimizer_hooks is an improved version that uses this codebase's _optim dictionary
"""
import inspect
import torch.nn as nn
import hydra
def add_optimizer_hooks(
model,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Separate out all parameters to those that will and won't experience regularizing weight decay
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
if (not bias_weight_decay and pn.endswith('bias')) \
or getattr(p, '_no_weight_decay', False) \
or isinstance(m, blacklist_weight_modules):
setattr(p, "_optim", {"weight_decay": 0.0})
def group_parameters_for_optimizer(
model,
optimizer_cfg,
bias_weight_decay=False,
normalization_weight_decay=False,
):
"""Set weight_decay=0.0 for parameters in model.no_weight_decay, for parameters with
attribute _no_weight_decay==True, for bias parameters if bias_weight_decay==False, for
normalization parameters if normalization_weight_decay==False
"""
# Get the weight decay from the config, or from the default value of the optimizer constructor
# if it's not specified in the config.
if 'weight_decay' in optimizer_cfg:
weight_decay = optimizer_cfg.weight_decay
else:
# https://stackoverflow.com/questions/12627118/get-a-function-arguments-default-value
signature = inspect.signature(hydra.utils.get_class(optimizer_cfg._target_))
if 'weight_decay' in signature.parameters:
weight_decay = signature.parameters['weight_decay'].default
if weight_decay is inspect.Parameter.empty:
weight_decay = 0.0
else:
weight_decay = 0.0
# If none of the parameters have weight decay anyway, and there are no parameters with special
# optimization params
if weight_decay == 0.0 and not any(hasattr(p, '_optim') for p in model.parameters()):
return model.parameters()
skip = model.no_weight_decay() if hasattr(model, 'no_weight_decay') else set()
skip_keywords = (model.no_weight_decay_keywords() if hasattr(model, 'no_weight_decay_keywords')
else set())
# Adapted from https://github.com/karpathy/minGPT/blob/master/mingpt/model.py#L134
"""
This long function is unfortunately doing something very simple and is being very defensive:
We are separating out all parameters of the model into two buckets: those that will experience
weight decay for regularization and those that won't (biases, and layernorm/embedding weights).
We are then returning the PyTorch optimizer object.
"""
# separate out all parameters to those that will and won't experience regularizing weight decay
decay = set()
no_decay = set()
special = set()
whitelist_weight_modules = (nn.Linear, )
blacklist_weight_modules = (nn.Embedding, )
if not normalization_weight_decay:
blacklist_weight_modules += (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d,
# Not compatible with Pytorch 1.8.1
# nn.LazyBatchNorm1d, nn.LazyBatchNorm2d, nn.LazyBatchNorm3d,
nn.GroupNorm, nn.SyncBatchNorm,
nn.InstanceNorm1d, nn.InstanceNorm2d, nn.InstanceNorm3d,
nn.LayerNorm, nn.LocalResponseNorm)
for mn, m in model.named_modules():
for pn, p in m.named_parameters():
fpn = '%s.%s' % (mn, pn) if mn else pn # full param name
if not p.requires_grad:
continue # frozen weights
if hasattr(p, '_optim'):
special.add(fpn)
elif fpn in skip or any(skip_keyword in fpn for skip_keyword in skip_keywords):
no_decay.add(fpn)
elif getattr(p, '_no_weight_decay', False):
no_decay.add(fpn)
elif not bias_weight_decay and pn.endswith('bias'):
no_decay.add(fpn)
elif pn.endswith('weight') and isinstance(m, whitelist_weight_modules):
# weights of whitelist modules will be weight decayed
decay.add(fpn)
elif isinstance(m, blacklist_weight_modules):
# weights of blacklist modules will NOT be weight decayed
no_decay.add(fpn)
param_dict = {pn: p for pn, p in model.named_parameters() if p.requires_grad}
# special case the position embedding parameter in the root GPT module as not decayed
if 'pos_emb' in param_dict:
no_decay.add('pos_emb')
# In case of parameter sharing, some parameters show up in decay but are not in param_dict.keys()
decay &= param_dict.keys()
decay |= (param_dict.keys() - no_decay - special)
# validate that we considered every parameter
inter_params = decay & no_decay
union_params = decay | no_decay
assert len(inter_params) == 0, f"Parameters {str(inter_params)} made it into both decay/no_decay sets!"
assert len(param_dict.keys() - special - union_params) == 0, f"parameters {str(param_dict.keys() - union_params)} were not separated into either decay/no_decay set!"
if weight_decay == 0.0 or not no_decay:
param_groups = [{"params": [param_dict[pn] for pn in sorted(list(no_decay | decay))],
"weight_decay": weight_decay}]
else:
param_groups = [
{"params": [param_dict[pn] for pn in sorted(list(decay))], "weight_decay": weight_decay},
{"params": [param_dict[pn] for pn in sorted(list(no_decay))], "weight_decay": 0.0},
]
# Add parameters with special hyperparameters
# Unique dicts
hps = [dict(s) for s in set(frozenset(param_dict[pn]._optim.items()) for pn in special)]
for hp in hps:
params = [param_dict[pn] for pn in sorted(list(special)) if param_dict[pn]._optim == hp]
param_groups.append({"params": params, **hp})
return param_groups
|
hyena-dna-main
|
src/utils/optim_groups.py
|
""" Utilities for dealing with collection objects (lists, dicts) and configs """
from typing import Sequence, Mapping, Optional, Callable
import functools
import hydra
from omegaconf import ListConfig, DictConfig
# TODO this is usually used in a pattern where it's turned into a list, so can just do that here
def is_list(x):
return isinstance(x, Sequence) and not isinstance(x, str)
def is_dict(x):
return isinstance(x, Mapping)
def to_dict(x, recursive=True):
"""Convert Sequence or Mapping object to dict
lists get converted to {0: x[0], 1: x[1], ...}
"""
if is_list(x):
x = {i: v for i, v in enumerate(x)}
if is_dict(x):
if recursive:
return {k: to_dict(v, recursive=recursive) for k, v in x.items()}
else:
return dict(x)
else:
return x
def to_list(x, recursive=False):
"""Convert an object to list.
If Sequence (e.g. list, tuple, Listconfig): just return it
Special case: If non-recursive and not a list, wrap in list
"""
if is_list(x):
if recursive:
return [to_list(_x) for _x in x]
else:
return list(x)
else:
if recursive:
return x
else:
return [x]
def extract_attrs_from_obj(obj, *attrs):
if obj is None:
assert len(attrs) == 0
return []
return [getattr(obj, attr, None) for attr in attrs]
def auto_assign_attrs(cls, **kwargs):
for k, v in kwargs.items():
setattr(cls, k, v)
def instantiate(registry, config, *args, partial=False, wrap=None, **kwargs):
"""
registry: Dictionary mapping names to functions or target paths (e.g. {'model': 'models.SequenceModel'})
config: Dictionary with a '_name_' key indicating which element of the registry to grab, and kwargs to be passed into the target constructor
wrap: wrap the target class (e.g. ema optimizer or tasks.wrap)
*args, **kwargs: additional arguments to override the config to pass into the target constructor
"""
# Case 1: no config
if config is None:
return None
# Case 2a: string means _name_ was overloaded
if isinstance(config, str):
_name_ = None
_target_ = registry[config]
config = {}
# Case 2b: grab the desired callable from name
else:
_name_ = config.pop("_name_")
_target_ = registry[_name_]
# Retrieve the right constructor automatically based on type
if isinstance(_target_, str):
fn = hydra.utils.get_method(path=_target_)
elif isinstance(_target_, Callable):
fn = _target_
else:
raise NotImplementedError("instantiate target must be string or callable")
# Instantiate object
if wrap is not None:
fn = wrap(fn)
obj = functools.partial(fn, *args, **config, **kwargs)
# Restore _name_
if _name_ is not None:
config["_name_"] = _name_
if partial:
return obj
else:
return obj()
def get_class(registry, _name_):
return hydra.utils.get_class(path=registry[_name_])
def omegaconf_filter_keys(d, fn=None):
"""Only keep keys where fn(key) is True. Support nested DictConfig.
# TODO can make this inplace?
"""
if fn is None:
fn = lambda _: True
if is_list(d):
return ListConfig([omegaconf_filter_keys(v, fn) for v in d])
elif is_dict(d):
return DictConfig(
{k: omegaconf_filter_keys(v, fn) for k, v in d.items() if fn(k)}
)
else:
return d
|
hyena-dna-main
|
src/utils/config.py
|
optimizer = {
"adam": "torch.optim.Adam",
"adamw": "torch.optim.AdamW",
"rmsprop": "torch.optim.RMSprop",
"sgd": "torch.optim.SGD",
"lamb": "src.utils.optim.lamb.JITLamb",
}
scheduler = {
"constant": "transformers.get_constant_schedule",
"plateau": "torch.optim.lr_scheduler.ReduceLROnPlateau",
"step": "torch.optim.lr_scheduler.StepLR",
"multistep": "torch.optim.lr_scheduler.MultiStepLR",
"cosine": "torch.optim.lr_scheduler.CosineAnnealingLR",
"constant_warmup": "transformers.get_constant_schedule_with_warmup",
"linear_warmup": "transformers.get_linear_schedule_with_warmup",
"cosine_warmup": "transformers.get_cosine_schedule_with_warmup",
"cosine_warmup_timm": "src.utils.optim.schedulers.TimmCosineLRScheduler",
}
model = {
# Backbones from this repo
"model": "src.models.sequence.SequenceModel",
"lm": "src.models.sequence.long_conv_lm.ConvLMHeadModel",
"lm_simple": "src.models.sequence.simple_lm.SimpleLMHeadModel",
"vit_b_16": "src.models.baselines.vit_all.vit_base_patch16_224",
"dna_embedding": "src.models.sequence.dna_embedding.DNAEmbeddingModel",
"bpnet": "src.models.sequence.hyena_bpnet.HyenaBPNet"
}
layer = {
"id": "src.models.sequence.base.SequenceIdentity",
"ff": "src.models.sequence.ff.FF",
"mha": "src.models.sequence.mha.MultiheadAttention",
"s4d": "src.models.sequence.ssm.s4d.S4D",
"s4_simple": "src.models.sequence.ssm.s4_simple.SimpleS4Wrapper",
"long-conv": "src.models.sequence.long_conv.LongConv",
"h3": "src.models.sequence.h3.H3",
"h3-conv": "src.models.sequence.h3_conv.H3Conv",
"hyena": "src.models.sequence.hyena.HyenaOperator",
"hyena-filter": "src.models.sequence.hyena.HyenaFilter",
"vit": "src.models.sequence.mha.VitAttention",
}
callbacks = {
"timer": "src.callbacks.timer.Timer",
"params": "src.callbacks.params.ParamsLog",
"learning_rate_monitor": "pytorch_lightning.callbacks.LearningRateMonitor",
"model_checkpoint": "pytorch_lightning.callbacks.ModelCheckpoint",
"early_stopping": "pytorch_lightning.callbacks.EarlyStopping",
"swa": "pytorch_lightning.callbacks.StochasticWeightAveraging",
"rich_model_summary": "pytorch_lightning.callbacks.RichModelSummary",
"rich_progress_bar": "pytorch_lightning.callbacks.RichProgressBar",
"progressive_resizing": "src.callbacks.progressive_resizing.ProgressiveResizing",
"seqlen_warmup": "src.callbacks.seqlen_warmup.SeqlenWarmup",
"seqlen_warmup_reload": "src.callbacks.seqlen_warmup_reload.SeqlenWarmupReload",
"gpu_affinity": "src.callbacks.gpu_affinity.GpuAffinity"
}
model_state_hook = {
'load_backbone': 'src.models.sequence.long_conv_lm.load_backbone',
}
|
hyena-dna-main
|
src/utils/registry.py
|
from .config import is_list, is_dict, to_list, to_dict, get_class, instantiate
|
hyena-dna-main
|
src/utils/__init__.py
|
import math
import numpy as np
import torch
### Bit reversal permutation
def bitreversal_po2(n):
m = int(math.log(n)/math.log(2))
perm = np.arange(n).reshape(n,1)
for i in range(m):
n1 = perm.shape[0]//2
perm = np.hstack((perm[:n1],perm[n1:]))
return perm.squeeze(0)
def bitreversal_permutation(n):
m = int(math.ceil(math.log(n)/math.log(2)))
N = 1 << m
perm = bitreversal_po2(N)
return np.extract(perm < n, perm)
def transpose_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices = indices.T
indices = indices.reshape(h*w)
return indices
def snake_permutation(h, w):
indices = np.arange(h*w)
indices = indices.reshape((h, w))
indices[1::2, :] = indices[1::2, ::-1]
indices = indices.reshape(h*w)
return indices
def hilbert_permutation(n):
m = int(math.log2(n))
assert n == 2**m
inds = decode(list(range(n*n)), 2, m)
ind_x, ind_y = inds.T
indices = np.arange(n*n).reshape((n, n))
indices = indices[ind_x, ind_y]
return(indices)
""" Hilbert curve utilities taken from https://github.com/PrincetonLIPS/numpy-hilbert-curve """
def decode(hilberts, num_dims, num_bits):
''' Decode an array of Hilbert integers into locations in a hypercube.
This is a vectorized-ish version of the Hilbert curve implementation by John
Skilling as described in:
Skilling, J. (2004, April). Programming the Hilbert curve. In AIP Conference
Proceedings (Vol. 707, No. 1, pp. 381-387). American Institute of Physics.
Params:
-------
hilberts - An ndarray of Hilbert integers. Must be an integer dtype and
cannot have fewer bits than num_dims * num_bits.
num_dims - The dimensionality of the hypercube. Integer.
num_bits - The number of bits for each dimension. Integer.
Returns:
--------
The output is an ndarray of unsigned integers with the same shape as hilberts
but with an additional dimension of size num_dims.
'''
if num_dims*num_bits > 64:
raise ValueError(
'''
num_dims=%d and num_bits=%d for %d bits total, which can't be encoded
into a uint64. Are you sure you need that many points on your Hilbert
curve?
''' % (num_dims, num_bits)
)
# Handle the case where we got handed a naked integer.
hilberts = np.atleast_1d(hilberts)
# Keep around the shape for later.
orig_shape = hilberts.shape
# Treat each of the hilberts as a sequence of eight uint8.
# This treats all of the inputs as uint64 and makes things uniform.
hh_uint8 = np.reshape(hilberts.ravel().astype('>u8').view(np.uint8), (-1, 8))
# Turn these lists of uints into lists of bits and then truncate to the size
# we actually need for using Skilling's procedure.
hh_bits = np.unpackbits(hh_uint8, axis=1)[:,-num_dims*num_bits:]
# Take the sequence of bits and Gray-code it.
gray = binary2gray(hh_bits)
# There has got to be a better way to do this.
# I could index them differently, but the eventual packbits likes it this way.
gray = np.swapaxes(
np.reshape(gray, (-1, num_bits, num_dims)),
axis1=1, axis2=2,
)
# Iterate backwards through the bits.
for bit in range(num_bits-1, -1, -1):
# Iterate backwards through the dimensions.
for dim in range(num_dims-1, -1, -1):
# Identify which ones have this bit active.
mask = gray[:,dim,bit]
# Where this bit is on, invert the 0 dimension for lower bits.
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], mask[:,np.newaxis])
# Where the bit is off, exchange the lower bits with the 0 dimension.
to_flip = np.logical_and(
np.logical_not(mask[:,np.newaxis]),
np.logical_xor(gray[:,0,bit+1:], gray[:,dim,bit+1:])
)
gray[:,dim,bit+1:] = np.logical_xor(gray[:,dim,bit+1:], to_flip)
gray[:,0,bit+1:] = np.logical_xor(gray[:,0,bit+1:], to_flip)
# Pad back out to 64 bits.
extra_dims = 64 - num_bits
padded = np.pad(gray, ((0,0), (0,0), (extra_dims,0)),
mode='constant', constant_values=0)
# Now chop these up into blocks of 8.
locs_chopped = np.reshape(padded[:,:,::-1], (-1, num_dims, 8, 8))
# Take those blocks and turn them unto uint8s.
locs_uint8 = np.squeeze(np.packbits(locs_chopped, bitorder='little', axis=3))
# Finally, treat these as uint64s.
flat_locs = locs_uint8.view(np.uint64)
# Return them in the expected shape.
return np.reshape(flat_locs, (*orig_shape, num_dims))
def right_shift(binary, k=1, axis=-1):
''' Right shift an array of binary values.
Parameters:
-----------
binary: An ndarray of binary values.
k: The number of bits to shift. Default 1.
axis: The axis along which to shift. Default -1.
Returns:
--------
Returns an ndarray with zero prepended and the ends truncated, along
whatever axis was specified.
'''
# If we're shifting the whole thing, just return zeros.
if binary.shape[axis] <= k:
return np.zeros_like(binary)
# Determine the padding pattern.
padding = [(0,0)] * len(binary.shape)
padding[axis] = (k,0)
# Determine the slicing pattern to eliminate just the last one.
slicing = [slice(None)] * len(binary.shape)
slicing[axis] = slice(None, -k)
shifted = np.pad(binary[tuple(slicing)], padding,
mode='constant', constant_values=0)
return shifted
def binary2gray(binary, axis=-1):
''' Convert an array of binary values into Gray codes.
This uses the classic X ^ (X >> 1) trick to compute the Gray code.
Parameters:
-----------
binary: An ndarray of binary values.
axis: The axis along which to compute the gray code. Default=-1.
Returns:
--------
Returns an ndarray of Gray codes.
'''
shifted = right_shift(binary, axis=axis)
# Do the X ^ (X >> 1) trick.
gray = np.logical_xor(binary, shifted)
return gray
|
hyena-dna-main
|
src/utils/permutations.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from contextlib import contextmanager
import torch
def init_distributed(cuda):
"""
Initializes distributed backend.
:param cuda: (bool) if True initializes nccl backend, if False initializes
gloo backend
"""
world_size = int(os.environ.get('WORLD_SIZE', 1))
distributed = (world_size > 1)
if distributed:
backend = 'nccl' if cuda else 'gloo'
torch.distributed.init_process_group(backend=backend,
init_method='env://')
assert torch.distributed.is_initialized()
return distributed
def barrier():
"""
Call torch.distributed.barrier() if distritubed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
torch.distributed.barrier()
def get_rank():
"""
Gets distributed rank or returns zero if distributed is not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
rank = torch.distributed.get_rank()
else:
rank = 0
return rank
def get_world_size():
"""
Gets total number of distributed workers or returns one if distributed is
not initialized.
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
world_size = torch.distributed.get_world_size()
else:
world_size = 1
return world_size
def all_reduce_item(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = torch.tensor(value, device=device)
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor.item()
else:
ret = value
return ret
def all_reduce_tensor(value, op='sum'):
"""
All-reduces single scalar value if distributed is in use
"""
if torch.distributed.is_available() and torch.distributed.is_initialized():
if op == 'sum' or op == 'mean':
dop = torch.distributed.ReduceOp.SUM
elif op == 'min':
dop = torch.distributed.ReduceOp.MIN
elif op == 'max':
dop = torch.distributed.ReduceOp.MAX
elif op == 'product':
dop = torch.distributed.ReduceOp.PRODUCT
else:
raise RuntimeError('Unsupported reduce op')
backend = torch.distributed.get_backend()
if backend == torch.distributed.Backend.NCCL:
device = torch.device('cuda')
elif backend == torch.distributed.Backend.GLOO:
device = torch.device('cpu')
else:
raise RuntimeError('Unsupported distributed backend')
tensor = value
torch.distributed.all_reduce(tensor, dop)
if op == 'mean':
tensor /= get_world_size()
ret = tensor
else:
ret = value
return ret
@contextmanager
def sync_workers():
"""
Yields distributed rank and synchronizes all workers on exit.
"""
rank = get_rank()
yield rank
barrier()
|
hyena-dna-main
|
src/utils/distributed.py
|
""" Utils for the training loop. Copied from https://github.com/HazyResearch/transformers/blob/master/src/utils/utils.py """
import logging
import os
import warnings
from typing import List, Sequence
import torch.nn as nn
import pytorch_lightning as pl
import rich.syntax
import rich.tree
from omegaconf import DictConfig, OmegaConf
from pytorch_lightning.utilities import rank_zero_only
from src.utils.config import omegaconf_filter_keys
# Copied from https://docs.python.org/3/howto/logging-cookbook.html#using-a-context-manager-for-selective-logging
class LoggingContext:
def __init__(self, logger, level=None, handler=None, close=True):
self.logger = logger
self.level = level
self.handler = handler
self.close = close
def __enter__(self):
if self.level is not None:
self.old_level = self.logger.level
self.logger.setLevel(self.level)
if self.handler:
self.logger.addHandler(self.handler)
def __exit__(self, et, ev, tb):
if self.level is not None:
self.logger.setLevel(self.old_level)
if self.handler:
self.logger.removeHandler(self.handler)
if self.handler and self.close:
self.handler.close()
# implicit return of None => don't swallow exceptions
def get_logger(name=__name__, level=logging.INFO) -> logging.Logger:
"""Initializes multi-GPU-friendly python logger."""
logger = logging.getLogger(name)
logger.setLevel(level)
# this ensures all logging levels get marked with the rank zero decorator
# otherwise logs would get multiplied for each GPU process in multi-GPU setup
for level in ("debug", "info", "warning", "error", "exception", "fatal", "critical"):
setattr(logger, level, rank_zero_only(getattr(logger, level)))
return logger
def process_config(config: DictConfig) -> DictConfig: # TODO because of filter_keys, this is no longer in place
"""A couple of optional utilities, controlled by main config file:
- disabling warnings
- easier access to debug mode
- forcing debug friendly configuration
Modifies DictConfig in place.
Args:
config (DictConfig): Configuration composed by Hydra.
"""
log = get_logger()
# Filter out keys that were used just for interpolation
# config = dictconfig_filter_keys(config, lambda k: not k.startswith('__'))
config = omegaconf_filter_keys(config, lambda k: not k.startswith('__'))
# enable adding new keys to config
OmegaConf.set_struct(config, False)
# disable python warnings if <config.ignore_warnings=True>
if config.get("ignore_warnings"):
log.info("Disabling python warnings! <config.ignore_warnings=True>")
warnings.filterwarnings("ignore")
if config.get("debug"):
log.info("Running in debug mode! <config.debug=True>")
config.trainer.fast_dev_run = True
# force debugger friendly configuration
log.info("Forcing debugger friendly configuration! <config.trainer.fast_dev_run=True>")
# Debuggers don't like GPUs or multiprocessing
if config.trainer.get("gpus"):
config.trainer.gpus = 0
if config.loader.get("pin_memory"):
config.loader.pin_memory = False
if config.loader.get("num_workers"):
config.loader.num_workers = 0
# disable adding new keys to config
# OmegaConf.set_struct(config, True) # [21-09-17 AG] I need this for .pop(_name_) pattern among other things
return config
@rank_zero_only
def print_config(
config: DictConfig,
resolve: bool = True,
save_cfg=True,
) -> None:
"""Prints content of DictConfig using Rich library and its tree structure.
Args:
config (DictConfig): Configuration composed by Hydra.
fields (Sequence[str], optional): Determines which main fields from config will
be printed and in what order.
resolve (bool, optional): Whether to resolve reference fields of DictConfig.
"""
style = "dim"
tree = rich.tree.Tree("CONFIG", style=style, guide_style=style)
fields = config.keys()
for field in fields:
branch = tree.add(field, style=style, guide_style=style)
config_section = config.get(field)
branch_content = str(config_section)
if isinstance(config_section, DictConfig):
branch_content = OmegaConf.to_yaml(config_section, resolve=resolve)
branch.add(rich.syntax.Syntax(branch_content, "yaml"))
rich.print(tree)
if save_cfg:
with open("config_tree.txt", "w") as fp:
rich.print(tree, file=fp)
def log_optimizer(logger, optimizer, keys):
""" Log values of particular keys from the optimizer's param groups """
keys = sorted(keys)
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
logger.info(' | '.join([
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
] + [f"{k} {v}" for k, v in group_hps.items()]))
class OptimModule(nn.Module):
""" Interface for Module that allows registering buffers/parameters with configurable optimizer hyperparameters """
def register(self, name, tensor, lr=None, wd=0.0):
"""Register a tensor with a configurable learning rate and 0 weight decay"""
if lr == 0.0:
self.register_buffer(name, tensor)
else:
self.register_parameter(name, nn.Parameter(tensor))
optim = {}
if lr is not None: optim["lr"] = lr
if wd is not None: optim["weight_decay"] = wd
setattr(getattr(self, name), "_optim", optim)
|
hyena-dna-main
|
src/utils/train.py
|
import torch
import torch.utils.benchmark as benchmark
def _get_gpu_mem(synchronize=True, empty_cache=True):
return torch.cuda.memory_allocated() / (
(2**20) * 1000
), torch.cuda.memory_cached() / ((2**20) * 1000)
def _generate_mem_hook(handle_ref, mem, idx, hook_type, exp):
def hook(self, *args):
if len(mem) == 0 or mem[-1]["exp"] != exp:
call_idx = 0
else:
call_idx = mem[-1]["call_idx"] + 1
mem_all, mem_cached = _get_gpu_mem()
torch.cuda.synchronize()
mem.append(
{
"layer_idx": idx,
"call_idx": call_idx,
"layer_type": type(self).__name__,
"exp": exp,
"hook_type": hook_type,
"mem_all": mem_all,
"mem_cached": mem_cached,
}
)
return hook
def _add_memory_hooks(idx, model, mem_log, exp, hr):
h = model.register_forward_pre_hook(
_generate_mem_hook(hr, mem_log, idx, "pre", exp)
)
hr.append(h)
h = model.register_forward_hook(_generate_mem_hook(hr, mem_log, idx, "fwd", exp))
hr.append(h)
h = model.register_backward_hook(_generate_mem_hook(hr, mem_log, idx, "bwd", exp))
hr.append(h)
def log_memory(model, inp, mem_log=None, exp=None):
mem_log = mem_log or []
exp = exp or f"exp_{len(mem_log)}"
hr = []
for idx, module in enumerate(model.modules()):
_add_memory_hooks(idx, module, mem_log, exp, hr)
out = model(inp)
if type(out) == tuple:
out = out[0].logits
loss = out.sum()
loss.backward()
[h.remove() for h in hr]
return mem_log
def benchmark_forward(
fn, *inputs, min_run_time=0.2, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the forward pass of an arbitrary function."""
if verbose:
print(desc, "- Forward pass")
t = benchmark.Timer(
stmt="fn(*inputs, **kwinputs)",
globals={"fn": fn, "inputs": inputs, "kwinputs": kwinputs},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
def benchmark_memory(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.synchronize()
fn(*inputs, **kwinputs)
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_memory_bwd(fn, *inputs, desc="", verbose=True, **kwinputs):
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
for input in inputs:
input = input.requires_grad_(True)
torch.cuda.synchronize()
y = fn(*inputs, **kwinputs)
y.sum().backward()
torch.cuda.synchronize()
mem = torch.cuda.max_memory_allocated() / ((2**20) * 1000)
if verbose:
print(f"{desc} max memory: {mem}GB")
torch.cuda.empty_cache()
return mem
def benchmark_backward(
fn, *inputs, grad=None, repeats=10, desc="", verbose=True, **kwinputs
):
"""Use Pytorch Benchmark on the backward pass of an arbitrary function."""
if verbose:
print(desc, "- Backward pass")
y = fn(*inputs, **kwinputs)
if not hasattr(y, "shape"):
y = y[0]
if grad is None:
grad = torch.randn_like(y)
else:
if grad.shape != y.shape:
raise RuntimeError("Grad shape does not match output shape")
t = benchmark.Timer(
stmt="y.backward(grad, retain_graph=True)",
globals={"y": y, "grad": grad},
num_threads=torch.get_num_threads(),
)
m = t.timeit(repeats)
if verbose:
print(m)
return t, m
|
hyena-dna-main
|
src/utils/profiling.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# MIT License
#
# Copyright (c) 2019 cybertronai
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Lamb optimizer."""
import torch
from torch.optim import Optimizer
class Lamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super(Lamb, self).__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
# Decay the first and second moment running average coefficient
# m_t
exp_avg.mul_(beta1).add_(1 - beta1, grad)
# v_t
exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad)
# Paper v3 does not use debiasing.
# bias_correction1 = 1 - beta1 ** state['step']
# bias_correction2 = 1 - beta2 ** state['step']
# Apply bias to lr to avoid broadcast.
step_size = group['lr'] # * math.sqrt(bias_correction2) / bias_correction1
weight_norm = p.data.norm(p=2).clamp_(0, 10)
adam_step = exp_avg / exp_avg_sq.sqrt().add(group['eps'])
if group['weight_decay'] != 0:
adam_step.add_(group['weight_decay'], p.data)
adam_norm = adam_step.norm(p=2)
if weight_norm == 0.0 or adam_norm == 0.0:
trust_ratio = 1
else:
trust_ratio = weight_norm / (adam_norm + group['eps'])
state['weight_norm'] = weight_norm
state['adam_norm'] = adam_norm
state['trust_ratio'] = trust_ratio
if self.adam:
trust_ratio = 1
p.data.add_(-step_size * trust_ratio, adam_step)
return loss
@torch.jit.script
def lamb_kernel(param, grad, exp_avg, exp_avg_sq, beta1: float,
beta2: float, step_size: float, eps: float, weight_decay: float):
exp_avg = exp_avg * beta1 + (1 - beta1) * grad
exp_avg_sq = exp_avg_sq * beta2 + (1 - beta2) * (grad * grad)
adam_step = exp_avg / (exp_avg_sq.sqrt() + eps)
adam_step = adam_step + weight_decay * param
weight_norm = param.norm(p=2).clamp(0, 10)
adam_norm = adam_step.norm(p=2)
trust_ratio = weight_norm / (adam_norm + eps)
trust_ratio = (weight_norm == 0.0) * 1.0 + (weight_norm != 0.0) * trust_ratio
trust_ratio = (adam_norm == 0.0) * 1.0 + (adam_norm != 0.0) * trust_ratio
trust_ratio = trust_ratio.float()
param = param - step_size * trust_ratio * adam_step
return param, exp_avg, exp_avg_sq
class JITLamb(Optimizer):
r"""Implements Lamb algorithm.
It has been proposed in `Large Batch Optimization for Deep Learning: Training BERT in 76 minutes`_.
Arguments:
params (iterable): iterable of parameters to optimize or dicts defining
parameter groups
lr (float, optional): learning rate (default: 1e-3)
betas (Tuple[float, float], optional): coefficients used for computing
running averages of gradient and its square (default: (0.9, 0.999))
eps (float, optional): term added to the denominator to improve
numerical stability (default: 1e-8)
weight_decay (float, optional): weight decay (L2 penalty) (default: 0)
adam (bool, optional): always use trust ratio = 1, which turns this into
Adam. Useful for comparison purposes.
.. _Large Batch Optimization for Deep Learning: Training BERT in 76 minutes:
https://arxiv.org/abs/1904.00962
"""
def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-6,
weight_decay=0, adam=False):
if not 0.0 <= lr:
raise ValueError("Invalid learning rate: {}".format(lr))
if not 0.0 <= eps:
raise ValueError("Invalid epsilon value: {}".format(eps))
if not 0.0 <= betas[0] < 1.0:
raise ValueError("Invalid beta parameter at index 0: {}".format(betas[0]))
if not 0.0 <= betas[1] < 1.0:
raise ValueError("Invalid beta parameter at index 1: {}".format(betas[1]))
defaults = dict(lr=lr, betas=betas, eps=eps,
weight_decay=weight_decay)
self.adam = adam
super().__init__(params, defaults)
def step(self, closure=None):
"""Performs a single optimization step.
Arguments:
closure (callable, optional): A closure that reevaluates the model
and returns the loss.
"""
loss = None
if closure is not None:
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad.data
if grad.is_sparse:
raise RuntimeError('Lamb does not support sparse gradients.')
state = self.state[p]
# State initialization
if len(state) == 0:
state['step'] = 0
# Exponential moving average of gradient values
state['exp_avg'] = torch.zeros_like(p.data)
# Exponential moving average of squared gradient values
state['exp_avg_sq'] = torch.zeros_like(p.data)
exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq']
beta1, beta2 = group['betas']
state['step'] += 1
step_size = group['lr']
param, exp_avg, exp_avg_sq = lamb_kernel(p.data, grad, exp_avg,
exp_avg_sq, beta1,
beta2, step_size,
group['eps'],
group['weight_decay'],
)
state['exp_avg'] = exp_avg
state['exp_avg_sq'] = exp_avg_sq
p.data = param
return loss
|
hyena-dna-main
|
src/utils/optim/lamb.py
|
"""Custom learning rate schedulers"""
import math
import warnings
import torch
from timm.scheduler import CosineLRScheduler
# https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html
class CosineWarmup(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self, optimizer, T_max, eta_min=0, warmup_step=0, **kwargs):
self.warmup_step = warmup_step
super().__init__(optimizer, T_max - warmup_step, eta_min, *kwargs)
# Copied from CosineAnnealingLR, but adding warmup and changing self.last_epoch to
# self.last_epoch - self.warmup_step.
def get_lr(self):
if not self._get_lr_called_within_step:
warnings.warn("To get the last learning rate computed by the scheduler, "
"please use `get_last_lr()`.", UserWarning)
if self.last_epoch == self.warmup_step: # also covers the case where both are 0
return self.base_lrs
elif self.last_epoch < self.warmup_step:
return [base_lr * (self.last_epoch + 1) / self.warmup_step for base_lr in self.base_lrs]
elif (self.last_epoch - self.warmup_step - 1 - self.T_max) % (2 * self.T_max) == 0:
return [group['lr'] + (base_lr - self.eta_min) *
(1 - math.cos(math.pi / self.T_max)) / 2
for base_lr, group in zip(self.base_lrs, self.optimizer.param_groups)]
return [(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step) / self.T_max)) /
(1 + math.cos(math.pi * (self.last_epoch - self.warmup_step - 1) / self.T_max)) *
(group['lr'] - self.eta_min) + self.eta_min
for group in self.optimizer.param_groups]
_get_closed_form_lr = None
def InvSqrt(optimizer, warmup_step):
""" Originally used for Transformer (in Attention is all you need)
"""
def lr_lambda(step):
# return a multiplier instead of a learning rate
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. / (step ** 0.5) if step > warmup_step else (step + 1) / (warmup_step ** 1.5)
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
def Constant(optimizer, warmup_step):
def lr_lambda(step):
if step == warmup_step: # also covers the case where both are 0
return 1.
else:
return 1. if step > warmup_step else (step + 1) / warmup_step
return torch.optim.lr_scheduler.LambdaLR(optimizer, lr_lambda=lr_lambda)
class TimmCosineLRScheduler(CosineLRScheduler, torch.optim.lr_scheduler._LRScheduler):
""" Wrap timm.scheduler.CosineLRScheduler so we can call scheduler.step() without passing in epoch.
It supports resuming as well.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._last_epoch = -1
self.step(epoch=0)
def step(self, epoch=None):
if epoch is None:
self._last_epoch += 1
else:
self._last_epoch = epoch
# We call either step or step_update, depending on whether we're using the scheduler every
# epoch or every step.
# Otherwise, lightning will always call step (i.e., meant for each epoch), and if we set
# scheduler interval to "step", then the learning rate update will be wrong.
if self.t_in_epochs:
super().step(epoch=self._last_epoch)
else:
super().step_update(num_updates=self._last_epoch)
|
hyena-dna-main
|
src/utils/optim/schedulers.py
|
""" Implementations of different types of residual functions. """
import torch
from torch import nn
class Residual(nn.Module):
""" Residual connection with constant affine weights. Can simulate standard residual, no residual, and "constant gates". """
def __init__(self, i_layer, d_input, d_model, alpha=1.0, beta=1.0):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__()
assert (d_input == d_model) or alpha == 0.0
self.i_layer = i_layer
self.d_input = d_input
self.d_model = d_model
self.alpha = alpha
self.beta = beta
@property
def d_output(self):
return self.d_model
def forward(self, x, y, transposed): # TODO documentation of transposed
y = self.beta*y if self.beta != 1.0 else y
return self.alpha * x + y if self.alpha else y
class Affine(Residual):
""" Residual connection with learnable scalar multipliers on the main branch
scalar: Single scalar multiplier, or one per dimension
scale, power: Initialize to scale * layer_num**(-power)
"""
def __init__(self, *args, scalar=True, gamma=0.0, **kwargs):
# print("ConstantResidual extra kwargs", kwargs)
super().__init__(*args, **kwargs)
self.scalar = scalar
self.gamma = gamma
c = self.beta * self.i_layer ** (-self.gamma)
d = 1 if self.scalar else self.d_input
self.affine = nn.Parameter(c * torch.ones(d))
def forward(self, x, y, transposed): # TODO documentation of transposed
c = self.affine
if transposed: c = c.unsqueeze(-1)
return self.alpha * x + c * y
class Feedforward(Residual):
def __init__(self, *args):
# print("Feedforward extra kwargs", kwargs)
super().__init__(*args, alpha=0.0, beta=1.0)
class Highway(Residual):
def __init__(self, *args, scaling_correction=False, elemwise=False):
super().__init__(*args)
self.scaling_correction = 1.732 if scaling_correction else 1.0 # TODO
self.elemwise = elemwise
self.Wx = nn.Linear(self.d_input, self.d_input)
if self.elemwise:
self.Wy = nn.Parameter(torch.randn(self.d_input))
else:
self.Wy = nn.Linear(self.d_input, self.d_input)
def forward(self, x, y, transposed=False): # TODO handle this case
if self.elemwise:
y = self.Wy * y
else:
y = self.Wy(y)
r = torch.sigmoid(self.Wx(x) + y)
z = self.scaling_correction * (1.-r) * x + r * y
return z
class DecayResidual(Residual):
""" Residual connection that can decay the linear combination depending on depth. """
def __init__(self, *args, power=0.5, l2=True):
# print("DecayResidual extra kwargs", kwargs)
super().__init__(*args)
self.power = power
self.l2 = l2
def forward(self, x, y, transposed):
beta = self.i_layer ** (-self.power)
if self.l2:
alpha = (1. - beta**2)**0.5
else:
alpha = 1. - beta
return alpha * x + beta * y
registry = {
'F': Feedforward,
'N': Feedforward,
'R': Residual,
'H': Highway,
'D': DecayResidual,
'A': Affine,
'none': Feedforward,
'ff': Feedforward,
'feedforward': Feedforward,
'residual': Residual,
'highway': Highway,
'decay': DecayResidual,
'affine': Affine,
}
|
hyena-dna-main
|
src/models/nn/residual.py
|
# Copyright (c) 2019-2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import List, Optional
import functools
import torch
import torch.nn as nn
import torch.nn.functional as F
class OptionalParameterList(nn.ParameterList):
def extra_repr(self):
child_lines = []
for k, p in self._parameters.items():
if p is not None:
size_str = 'x'.join(str(size) for size in p.size())
device_str = '' if not p.is_cuda else ' (GPU {})'.format(p.get_device())
parastr = 'Parameter containing: [{} of size {}{}]'.format(
torch.typename(p), size_str, device_str)
child_lines.append(' (' + str(k) + '): ' + parastr)
tmpstr = '\n'.join(child_lines)
return tmpstr
class ProjectedAdaptiveLogSoftmax(nn.Module):
def __init__(self, n_token, d_embed, d_proj, cutoffs, div_val=1,
tie_projs=None, out_layers_weights=None, out_projs=None,
keep_order=False,
bias_scale=0.0,
dropout=0.0,
):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.d_proj = d_proj
self.cutoffs = list(cutoffs) + [n_token]
self.cutoff_ends = [0] + self.cutoffs
self.div_val = div_val
self.shortlist_size = self.cutoffs[0]
self.n_clusters = len(self.cutoffs) - 1
self.head_size = self.shortlist_size + self.n_clusters
# bake the first False into the definition, just as [0] is built into the cutoffs
if tie_projs is None: tie_projs = []
elif isinstance(tie_projs, bool): tie_projs = [tie_projs] * len(cutoffs)
else: tie_projs = list(tie_projs)
tie_projs = [False] + tie_projs
self.tie_projs = tie_projs
if self.n_clusters > 0:
self.cluster_weight = nn.Parameter(torch.zeros(self.n_clusters, self.d_embed))
self.cluster_bias = nn.Parameter(torch.zeros(self.n_clusters))
if not out_layers_weights:
self.out_layers_weights = nn.ParameterList()
else:
self.out_layers_weights = out_layers_weights
self.out_layers_biases = nn.ParameterList()
self.shared_out_projs = out_projs
self.out_projs = OptionalParameterList()
self.dropout = dropout
self.drop = nn.Dropout(dropout)
if div_val == 1:
if d_proj != d_embed:
for i in range(len(self.cutoffs)):
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_embed))
)
else:
self.out_projs.append(None)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(n_token))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(n_token, d_embed))
)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i+1]
d_emb_i = d_embed // (div_val ** i)
if tie_projs[i]:
self.out_projs.append(None)
else:
self.out_projs.append(
nn.Parameter(torch.zeros(d_proj, d_emb_i))
)
self.out_layers_biases.append(
nn.Parameter(torch.zeros(r_idx - l_idx))
)
if not out_layers_weights:
self.out_layers_weights.append(
nn.Parameter(torch.zeros(r_idx - l_idx, d_emb_i))
)
for bias in self.out_layers_biases:
bound = bias_scale * d_proj ** -.5
nn.init.uniform_(bias, -bound, bound)
self.keep_order = keep_order
def _compute_logit(self, hidden, weight, bias, proj):
if proj is None:
logit = F.linear(hidden, weight, bias=bias)
else:
if self.dropout > 0.0:
logit = hidden @ proj
logit = self.drop(logit)
logit = logit @ weight.t()
else:
logit = torch.einsum('bd,de,ev->bv', (hidden, proj, weight.t()))
if bias is not None:
logit = logit + bias
return logit
def get_out_proj(self, i):
if self.tie_projs[i]:
if len(self.shared_out_projs) == 0:
return None
elif len(self.shared_out_projs) == 1:
return self.shared_out_projs[0]
else:
return self.shared_out_projs[i]
else:
return self.out_projs[i]
def forward(self, hidden, target, keep_order=False, key_padding_mask=None, *args, **kwargs):
# [21-09-15 AG]: TODO may need to handle key_padding_mask
'''
hidden :: [len*bsz x d_proj]
target :: [len*bsz]
'''
hidden = hidden.reshape(-1, hidden.size(-1))
target = target.reshape(-1)
if hidden.size(0) != target.size(0):
print(hidden.shape, target.shape)
raise RuntimeError('Input and target should have the same size '
'in the batch dimension.')
if self.n_clusters == 0:
logit = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
nll = -F.log_softmax(logit, dim=-1) \
.gather(1, target.unsqueeze(1)).squeeze(1)
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
nll = torch.zeros_like(target, dtype=hidden.dtype, device=hidden.device)
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
mask_i = (target >= l_idx) & (target < r_idx)
indices_i = mask_i.nonzero(as_tuple=False).squeeze()
if indices_i.numel() == 0:
continue
target_i = target.index_select(0, indices_i) - l_idx
head_logprob_i = head_logprob.index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i.gather(1, target_i[:, None]).squeeze(1)
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden.index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
# First term accounts for cluster probabilities
logprob_i = head_logprob_i[:, -i] \
+ tail_logprob_i.gather(1, target_i[:, None]).squeeze(1)
if self.keep_order or keep_order:
nll.index_copy_(0, indices_i, -logprob_i)
else:
nll[offset:offset+logprob_i.size(0)].copy_(-logprob_i)
offset += logprob_i.size(0) # TODO This should be a bug in the original implementation; it should go into the continue case above as well
return nll.mean() # TODO maybe cases for length or padding_mask
def compute_logits(self, hidden):
"""Compute full vector of logits
Adapted from https://github.com/kimiyoung/transformer-xl/issues/88
"""
hidden = hidden.reshape(-1, hidden.size(-1))
if self.n_clusters == 0:
logits = self._compute_logit(hidden, self.out_layers_weights[0],
self.out_layers_biases[0], self.get_out_proj(0))
return logits
else:
# construct weights and biases
weights, biases = [], []
for i in range(len(self.cutoffs)):
if self.div_val == 1:
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
weight_i = self.out_layers_weights[0][l_idx:r_idx]
bias_i = self.out_layers_biases[0][l_idx:r_idx]
else:
weight_i = self.out_layers_weights[i]
bias_i = self.out_layers_biases[i]
if i == 0:
weight_i = torch.cat(
[weight_i, self.cluster_weight], dim=0)
bias_i = torch.cat(
[bias_i, self.cluster_bias], dim=0)
weights.append(weight_i)
biases.append(bias_i)
head_weight, head_bias, head_proj = weights[0], biases[0], self.get_out_proj(0)
head_logit = self._compute_logit(hidden, head_weight, head_bias, head_proj)
head_logprob = F.log_softmax(head_logit, dim=1)
out_full_logps = [head_logprob[:, :self.cutoffs[0]]]
offset = 0
cutoff_values = [0] + self.cutoffs
for i in range(1, len(cutoff_values) - 1):
l_idx, r_idx = cutoff_values[i], cutoff_values[i + 1]
head_logprob_i = head_logprob # .index_select(0, indices_i)
if i == 0:
logprob_i = head_logprob_i
else:
weight_i, bias_i, proj_i = weights[i], biases[i], self.get_out_proj(i)
hidden_i = hidden # .index_select(0, indices_i)
tail_logit_i = self._compute_logit(hidden_i, weight_i, bias_i, proj_i)
tail_logprob_i = F.log_softmax(tail_logit_i, dim=1)
logprob_i = head_logprob_i[:, -i].view(-1, 1) + tail_logprob_i
offset += logprob_i.size(0)
out_full_logps.append(logprob_i)
out_full_logps = torch.cat(out_full_logps, dim = 1)
# print(torch.sum(out_full_ps), out_full_ps.shape)
return out_full_logps
class AdaptiveEmbedding(nn.Module):
""" Copy of transformers.AdaptiveEmbedding that works with fp16 by replacing the index_put_ operation
Initialization has been fixed for the case when d_proj = d_embed
"""
def __init__(self, n_token, d_embed, d_proj, cutoffs : List[int], div_val=1, init_scale=1.0, sample_softmax=False, dropout=0.0):
super().__init__()
self.n_token = n_token
self.d_embed = d_embed
self.cutoffs = list(cutoffs) + [n_token]
self.div_val = div_val
self.d_proj = d_proj
self.drop = nn.Dropout(dropout) if dropout > 0.0 else nn.Identity()
self.emb_scale = d_proj ** 0.5
self.cutoff_ends = [0] + self.cutoffs
self.emb_layers = nn.ModuleList()
self.emb_projs = nn.ParameterList()
if div_val == 1:
self.emb_layers.append(nn.Embedding(n_token, d_embed, sparse=sample_softmax > 0))
_init_embed(self.emb_layers[-1].weight, d_embed, init_scale)
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_embed ** -.5)
if d_proj != d_embed: # TODO
# self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_embed)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
else:
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
d_emb_i = d_embed // (div_val ** i)
self.emb_layers.append(nn.Embedding(r_idx - l_idx, d_emb_i))
# torch.nn.init.normal_(self.emb_layers[-1].weight, mean=0, std=init_scale * d_emb_i ** -.5)
_init_embed(self.emb_layers[-1].weight, d_emb_i, init_scale)
self.emb_projs.append(nn.Parameter(torch.FloatTensor(d_proj, d_emb_i)))
# torch.nn.init.normal_(self.emb_projs[-1], mean=0, std=init_scale * 1./self.emb_scale)
_init_proj(self.emb_projs[-1], d_proj, init_scale)
def forward(self, inp):
if self.div_val == 1:
embed = self.emb_layers[0](inp)
embed = self.drop(embed)
if self.d_proj != self.d_embed:
embed = F.linear(embed, self.emb_projs[0])
else:
param = next(self.parameters())
inp_flat = inp.reshape(-1)
# Changes from original impl
# emb_flat = torch.zeros([inp_flat.size(0), self.d_proj], dtype=param.dtype, device=param.device)
embeddings = []
indices = torch.zeros_like(inp_flat) # empty should work as long as cutoffs[-1] > max token
_total_tokens = 0
# emb_flat = inp.new_zeros(inp_flat.size(0), self.d_proj)
for i in range(len(self.cutoffs)):
l_idx, r_idx = self.cutoff_ends[i], self.cutoff_ends[i + 1]
mask_i = (inp_flat >= l_idx) & (inp_flat < r_idx)
indices_i = mask_i.nonzero().squeeze(-1) # shape (_tokens,)
_tokens = indices_i.numel()
if _tokens == 0:
continue
inp_i = inp_flat.index_select(0, indices_i) - l_idx
emb_i = self.emb_layers[i](inp_i)
emb_i = self.drop(emb_i)
emb_i = F.linear(emb_i, self.emb_projs[i])
# Changes
embeddings.append(emb_i)
indices.index_put_(
(indices_i,),
torch.arange(_tokens, device=inp.device) + _total_tokens
)
_total_tokens += _tokens
# emb_flat.index_copy_(0, indices_i, emb_i)
embeddings = torch.cat(embeddings, dim=0)
emb_flat = embeddings[indices]
embed_shape = inp.size() + (self.d_proj,)
embed = emb_flat.view(embed_shape)
embed.mul_(self.emb_scale)
# embed.div_(self.emb_scale)
return embed
def _init_weight(weight, d : int, init_scale : Optional[float], default=None):
assert init_scale or default
if init_scale is None:
std = default
else:
std = init_scale * (d ** -0.5)
nn.init.normal_(weight, mean=0, std=std)
_init_embed = functools.partial(_init_weight, default=0.02)
_init_proj = functools.partial(_init_weight, default=0.01)
|
hyena-dna-main
|
src/models/nn/adaptive_softmax.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.