python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
trace_feats_to_decoder, trace_masks, task)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state, trace_feats_to_decoder, trace_masks, task, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state,
tmp_trace_feats, trace_masks, task,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length
for i in range(trace_masks.shape[0]):
tmp_num = trace_masks[i].sum().long()
seq[i, tmp_num:] = 0
seqLogprobs[i, tmp_num:, :] = 0
return seq, seqLogprobs
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_both_backup_2020_11_07.py
|
"""
BertCapModel is using huggingface transformer bert model as seq2seq model.
The result is not as goog as original transformer.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import torch
import torch.nn as nn
import torch.nn.functional as F
import copy
import math
import numpy as np
from .CaptionModel import CaptionModel
from .AttModel import sort_pack_padded_sequence, pad_unsort_packed_sequence, pack_wrapper, AttModel
try:
from transformers import BertModel, BertConfig
except:
print('Hugginface transformers not installed; please visit https://github.com/huggingface/transformers')
from .TransformerModel import subsequent_mask, TransformerModel, Generator
class EncoderDecoder(nn.Module):
"""
A standard Encoder-Decoder architecture. Base for this and many
other models.
"""
def __init__(self, encoder, decoder, generator):
super(EncoderDecoder, self).__init__()
self.encoder = encoder
self.decoder = decoder
self.generator = generator
def forward(self, src, tgt, src_mask, tgt_mask):
"Take in and process masked src and target sequences."
return self.decode(self.encode(src, src_mask), src_mask,
tgt, tgt_mask)
def encode(self, src, src_mask):
return self.encoder(inputs_embeds=src,
attention_mask=src_mask)[0]
def decode(self, memory, src_mask, tgt, tgt_mask):
return self.decoder(input_ids=tgt,
attention_mask=tgt_mask,
encoder_hidden_states=memory,
encoder_attention_mask=src_mask)[0]
class BertCapModel(TransformerModel):
def make_model(self, src_vocab, tgt_vocab, N_enc=6, N_dec=6,
d_model=512, d_ff=2048, h=8, dropout=0.1):
"Helper: Construct a model from hyperparameters."
enc_config = BertConfig(vocab_size=1,
hidden_size=d_model,
num_hidden_layers=N_enc,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=1,
type_vocab_size=1)
dec_config = BertConfig(vocab_size=tgt_vocab,
hidden_size=d_model,
num_hidden_layers=N_dec,
num_attention_heads=h,
intermediate_size=d_ff,
hidden_dropout_prob=dropout,
attention_probs_dropout_prob=dropout,
max_position_embeddings=17,
type_vocab_size=1,
is_decoder=True)
encoder = BertModel(enc_config)
def return_embeds(*args, **kwargs):
return kwargs['inputs_embeds']
del encoder.embeddings; encoder.embeddings = return_embeds
decoder = BertModel(dec_config)
model = EncoderDecoder(
encoder,
decoder,
Generator(d_model, tgt_vocab))
return model
def __init__(self, opt):
super(BertCapModel, self).__init__(opt)
def core(self, it, fc_feats_ph, att_feats_ph, memory, state, mask):
"""
state = [ys.unsqueeze(0)]
"""
if len(state) == 0:
ys = it.unsqueeze(1)
else:
ys = torch.cat([state[0][0], it.unsqueeze(1)], dim=1)
out = self.model.decode(memory, mask,
ys,
subsequent_mask(ys.size(1))
.to(memory.device))
return out[:, -1], [ys.unsqueeze(0)]
|
connect-caption-and-trace-main
|
captioning/models/BertCapModel.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state, word_box_attn = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
if task == 'caption':
return logprobs, state, word_box_attn
else:
return logprobs, state,
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state, word_box_attn = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
# if task != 'both':
# for i in range(trace_masks.shape[0]):
# tmp_num = trace_masks[i].sum().long()
# seq[i, tmp_num:] = 0
# seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
if task == 'caption':
return seq, seqLogprobs, torch.cat([word_box_attn,
torch.zeros([seq.shape[0], seq.shape[1]-word_box_attn.shape[1], word_box_attn.shape[2]]).to(seq.device)], 1)
else:
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_for_coco_caption_baseline.py
|
# This file contains Att2in2, AdaAtt, AdaAttMO, UpDown model
# AdaAtt is from Knowing When to Look: Adaptive Attention via A Visual Sentinel for Image Captioning
# https://arxiv.org/abs/1612.01887
# AdaAttMO is a modified version with maxout lstm
# Att2in is from Self-critical Sequence Training for Image Captioning
# https://arxiv.org/abs/1612.00563
# In this file we only have Att2in2, which is a slightly different version of att2in,
# in which the img feature embedding and word embedding is the same as what in adaatt.
# UpDown is from Bottom-Up and Top-Down Attention for Image Captioning and VQA
# https://arxiv.org/abs/1707.07998
# However, it may not be identical to the author's architecture.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from . import utils
from torch.nn.utils.rnn import PackedSequence, pack_padded_sequence, pad_packed_sequence
from .CaptionModel import CaptionModel
bad_endings = ['a','an','the','in','for','at','of','with','before','after','on','upon','near','to','is','are','am']
bad_endings += ['the']
def sort_pack_padded_sequence(input, lengths):
sorted_lengths, indices = torch.sort(lengths, descending=True)
tmp = pack_padded_sequence(input[indices], sorted_lengths, batch_first=True)
inv_ix = indices.clone()
inv_ix[indices] = torch.arange(0,len(indices)).type_as(inv_ix)
return tmp, inv_ix
def pad_unsort_packed_sequence(input, inv_ix):
tmp, _ = pad_packed_sequence(input, batch_first=True)
tmp = tmp[inv_ix]
return tmp
def pack_wrapper(module, att_feats, att_masks):
if att_masks is not None:
packed, inv_ix = sort_pack_padded_sequence(att_feats, att_masks.data.long().sum(1))
return pad_unsort_packed_sequence(PackedSequence(module(packed[0]), packed[1]), inv_ix)
else:
return module(att_feats)
class AttModel(CaptionModel):
def __init__(self, opt):
super(AttModel, self).__init__()
self.vocab_size = opt.vocab_size
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.seq_length = getattr(opt, 'max_length', 20) or opt.seq_length # maximum sample length
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.bos_idx = getattr(opt, 'bos_idx', 0)
self.eos_idx = getattr(opt, 'eos_idx', 0)
self.pad_idx = getattr(opt, 'pad_idx', 0)
self.use_bn = getattr(opt, 'use_bn', 0)
self.ss_prob = 0.0 # Schedule sampling probability
self.embed = nn.Sequential(nn.Embedding(self.vocab_size + 1, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fc_embed = nn.Sequential(nn.Linear(self.fc_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.att_embed = nn.Sequential(*(
((nn.BatchNorm1d(self.att_feat_size),) if self.use_bn else ())+
(nn.Linear(self.att_feat_size, self.rnn_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))+
((nn.BatchNorm1d(self.rnn_size),) if self.use_bn==2 else ())))
self.logit_layers = getattr(opt, 'logit_layers', 1)
if self.logit_layers == 1:
self.logit = nn.Linear(self.rnn_size, self.vocab_size + 1)
else:
self.logit = [[nn.Linear(self.rnn_size, self.rnn_size), nn.ReLU(), nn.Dropout(0.5)] for _ in range(opt.logit_layers - 1)]
self.logit = nn.Sequential(*(reduce(lambda x,y:x+y, self.logit) + [nn.Linear(self.rnn_size, self.vocab_size + 1)]))
self.ctx2att = nn.Linear(self.rnn_size, self.att_hid_size)
# For remove bad endding
self.vocab = opt.vocab
self.bad_endings_ix = [int(k) for k,v in self.vocab.items() if v in bad_endings]
def init_hidden(self, bsz):
weight = self.logit.weight \
if hasattr(self.logit, "weight") \
else self.logit[0].weight
return (weight.new_zeros(self.num_layers, bsz, self.rnn_size),
weight.new_zeros(self.num_layers, bsz, self.rnn_size))
def clip_att(self, att_feats, box_feats, att_masks):
# Clip the length of att_masks and att_feats to the maximum length
if att_masks is not None:
max_len = att_masks.data.long().sum(1).max()
att_feats = att_feats[:, :max_len].contiguous()
att_masks = att_masks[:, :max_len].contiguous()
box_feats = box_feats[:, :max_len].contiguous()
return att_feats, box_feats, att_masks
def _prepare_feature(self, fc_feats, att_feats, att_masks):
att_feats, att_masks = self.clip_att(att_feats, att_masks)
# embed fc and att feats
fc_feats = self.fc_embed(fc_feats)
att_feats = pack_wrapper(self.att_embed, att_feats, att_masks)
# Project the attention feats first to reduce memory and computation comsumptions.
p_att_feats = self.ctx2att(att_feats)
return fc_feats, att_feats, p_att_feats, att_masks
def _forward(self, fc_feats, att_feats, trace_feats, seq, att_masks=None, trace_masks=None):
batch_size = fc_feats.size(0)
if seq.ndim == 3: # B * seq_per_img * seq_len
seq = seq.reshape(-1, seq.shape[2])
seq_per_img = seq.shape[0] // batch_size
state = self.init_hidden(batch_size*seq_per_img)
outputs = fc_feats.new_zeros(batch_size*seq_per_img, seq.size(1), self.vocab_size+1)
# Prepare the features
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, trace_feats, att_masks, trace_masks)
# pp_att_feats is used for attention, we cache it in advance to reduce computation cost
if seq_per_img > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(seq_per_img,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
for i in range(seq.size(1)):
if self.training and i >= 1 and self.ss_prob > 0.0: # otherwiste no need to sample
sample_prob = fc_feats.new(batch_size*seq_per_img).uniform_(0, 1)
sample_mask = sample_prob < self.ss_prob
if sample_mask.sum() == 0:
it = seq[:, i].clone()
else:
sample_ind = sample_mask.nonzero().view(-1)
it = seq[:, i].data.clone()
prob_prev = torch.exp(outputs[:, i-1].detach()) # fetch prev distribution: shape Nx(M+1)
it.index_copy_(0, sample_ind, torch.multinomial(prob_prev, 1).view(-1).index_select(0, sample_ind))
else:
it = seq[:, i].clone()
# break if all the sequences end
if i >= 1 and seq[:, i].sum() == 0:
break
output, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state)
outputs[:, i] = output
return outputs
def get_logprobs_state(self, it, fc_feats, att_feats, p_att_feats, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=1):
# 'it' contains a word index
xt = self.embed(it)
if task == 'caption' or task == 'show':
if task == 'show':
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task)
else:
output, state = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks,
trace_feats_to_decoder, trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state
elif task == 'both':
output, state, output_trace = self.core(xt, fc_feats, att_feats, p_att_feats, state, att_masks, trace_feats_to_decoder,
trace_masks, task=task)
if output_logsoftmax:
logprobs = F.log_softmax(self.logit(output), dim=1)
else:
logprobs = self.logit(output)
return logprobs, state, self.model.generator_trace(output_trace)
def _old_sample_beam(self, fc_feats, att_feats, att_masks=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
for k in range(batch_size):
state = self.init_hidden(beam_size)
tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks = utils.repeat_tensors(beam_size,
[p_fc_feats[k:k+1], p_att_feats[k:k+1], pp_att_feats[k:k+1], p_att_masks[k:k+1] if att_masks is not None else None]
)
for t in range(1):
if t == 0: # input <bos>
it = fc_feats.new_full([beam_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, state)
self.done_beams[k] = self.old_beam_search(state, logprobs, tmp_fc_feats, tmp_att_feats, tmp_p_att_feats, tmp_att_masks, opt=opt)
if sample_n == beam_size:
for _n in range(sample_n):
seq[k*sample_n+_n, :] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :] = self.done_beams[k][_n]['logps']
else:
seq[k, :] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample_beam(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
beam_size = opt.get('beam_size', 10)
group_size = opt.get('group_size', 1)
sample_n = opt.get('sample_n', 10)
# when sample_n == beam_size then each beam is a sample.
assert sample_n == 1 or sample_n == beam_size // group_size, 'when beam search, sample_n == 1 or beam search'
batch_size = fc_feats.size(0)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
assert beam_size <= self.vocab_size + 1, 'lets assume this for now, otherwise this corner case causes a few headaches down the road. can be dealt with in future if needed'
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
# lets process every image independently for now, for simplicity
self.done_beams = [[] for _ in range(batch_size)]
state = self.init_hidden(batch_size)
# first step, feed bos
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
trace_feats_to_decoder, trace_masks, show_gate_labels, task, state)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels = utils.repeat_tensors(beam_size,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels]
)
self.done_beams = self.beam_search(state, logprobs, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, opt=opt)
for k in range(batch_size):
if sample_n == beam_size:
for _n in range(sample_n):
seq_len = self.done_beams[k][_n]['seq'].shape[0]
seq[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['seq']
seqLogprobs[k*sample_n+_n, :seq_len] = self.done_beams[k][_n]['logps']
else:
seq_len = self.done_beams[k][0]['seq'].shape[0]
seq[k, :seq_len] = self.done_beams[k][0]['seq'] # the first beam has highest cumulative score
seqLogprobs[k, :seq_len] = self.done_beams[k][0]['logps']
# return the samples and their log likelihoods
return seq, seqLogprobs
def _sample(self, fc_feats, att_feats, trace_feats, box_feats, att_masks=None, trace_masks=None, show_gate_labels=None, task=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
sample_n = int(opt.get('sample_n', 1))
group_size = opt.get('group_size', 1)
output_logsoftmax = opt.get('output_logsoftmax', 1)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
if beam_size > 1 and sample_method in ['greedy', 'beam_search']:
return self._sample_beam(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks, show_gate_labels, task, opt)
if group_size > 1:
return self._diverse_sample(fc_feats, att_feats, att_masks, opt)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size*sample_n)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder = \
self._prepare_feature(fc_feats, att_feats, trace_feats, box_feats, att_masks, trace_masks)
if sample_n > 1:
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = utils.repeat_tensors(sample_n,
[p_fc_feats, p_att_feats, pp_att_feats, p_att_masks]
)
trigrams = [] # will be a list of batch_size dictionaries
seq = fc_feats.new_full((batch_size*sample_n, self.seq_length), self.pad_idx, dtype=torch.long)
seqLogprobs = fc_feats.new_zeros(batch_size*sample_n, self.seq_length, self.vocab_size + 1)
if task == 'both':
tmp_trace_feats = torch.zeros([trace_feats_to_decoder.shape[0], 1, trace_feats_to_decoder.shape[2]]).to(trace_masks.device)
for t in range(self.seq_length + 1):
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size*sample_n], self.bos_idx, dtype=torch.long)
if task == 'caption' or task == 'show':
logprobs, state = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, trace_feats_to_decoder, trace_masks, show_gate_labels, task, state, output_logsoftmax=output_logsoftmax)
elif task == 'both':
logprobs, state, output_trace = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks,
tmp_trace_feats, trace_masks, show_gate_labels, task, state,
output_logsoftmax=output_logsoftmax)
output_trace = output_trace[:, t]
output_trace[:, 4] = (output_trace[:, 2] - output_trace[:, 0]) * (output_trace[:, 3] - output_trace[:, 1])
tmp_trace_feats = torch.cat([tmp_trace_feats, output_trace.unsqueeze(1)], 1)
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Make it impossible to generate bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
# Copy from https://github.com/lukemelas/image-paragraph-captioning
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).to(logprobs.device) # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
# sample the next word
if t == self.seq_length: # skip if we achieve maximum length
break
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, temperature)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
it[~unfinished] = self.pad_idx # This allows eos_idx not being overwritten to 0
logprobs = logprobs * unfinished.unsqueeze(1).to(logprobs)
unfinished = unfinished & (it != self.eos_idx)
seq[:,t] = it
seqLogprobs[:,t] = logprobs
# quit loop if all sequences have finished
if unfinished.sum() == 0:
break
### for decoder evaluate: cut at the ground truth caption length. Since in controlled caption generation, we assume we know the caption length
# if task != 'both':
# for i in range(trace_masks.shape[0]):
# tmp_num = trace_masks[i].sum().long()
# seq[i, tmp_num:] = 0
# seqLogprobs[i, tmp_num:, :] = 0
if task != 'both':
return seq, seqLogprobs
else:
tmp_trace_feats = tmp_trace_feats[:, 1:-1]
return seq, seqLogprobs, torch.cat([tmp_trace_feats,
torch.zeros([seq.shape[0], seq.shape[1]-tmp_trace_feats.shape[1], tmp_trace_feats.shape[2]]).to(seq.device)], 1)
def _diverse_sample(self, fc_feats, att_feats, att_masks=None, opt={}):
sample_method = opt.get('sample_method', 'greedy')
beam_size = opt.get('beam_size', 1)
temperature = opt.get('temperature', 1.0)
group_size = opt.get('group_size', 1)
diversity_lambda = opt.get('diversity_lambda', 0.5)
decoding_constraint = opt.get('decoding_constraint', 0)
block_trigrams = opt.get('block_trigrams', 0)
remove_bad_endings = opt.get('remove_bad_endings', 0)
batch_size = fc_feats.size(0)
state = self.init_hidden(batch_size)
p_fc_feats, p_att_feats, pp_att_feats, p_att_masks = self._prepare_feature(fc_feats, att_feats, att_masks)
trigrams_table = [[] for _ in range(group_size)] # will be a list of batch_size dictionaries
seq_table = [fc_feats.new_full((batch_size, self.seq_length), self.pad_idx, dtype=torch.long) for _ in range(group_size)]
seqLogprobs_table = [fc_feats.new_zeros(batch_size, self.seq_length) for _ in range(group_size)]
state_table = [self.init_hidden(batch_size) for _ in range(group_size)]
for tt in range(self.seq_length + group_size):
for divm in range(group_size):
t = tt - divm
seq = seq_table[divm]
seqLogprobs = seqLogprobs_table[divm]
trigrams = trigrams_table[divm]
if t >= 0 and t <= self.seq_length-1:
if t == 0: # input <bos>
it = fc_feats.new_full([batch_size], self.bos_idx, dtype=torch.long)
else:
it = seq[:, t-1] # changed
logprobs, state_table[divm] = self.get_logprobs_state(it, p_fc_feats, p_att_feats, pp_att_feats, p_att_masks, state_table[divm]) # changed
logprobs = F.log_softmax(logprobs / temperature, dim=-1)
# Add diversity
if divm > 0:
unaug_logprobs = logprobs.clone()
for prev_choice in range(divm):
prev_decisions = seq_table[prev_choice][:, t]
logprobs[:, prev_decisions] = logprobs[:, prev_decisions] - diversity_lambda
if decoding_constraint and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
tmp.scatter_(1, seq[:,t-1].data.unsqueeze(1), float('-inf'))
logprobs = logprobs + tmp
if remove_bad_endings and t > 0:
tmp = logprobs.new_zeros(logprobs.size())
prev_bad = np.isin(seq[:,t-1].data.cpu().numpy(), self.bad_endings_ix)
# Impossible to generate remove_bad_endings
tmp[torch.from_numpy(prev_bad.astype('uint8')), 0] = float('-inf')
logprobs = logprobs + tmp
# Mess with trigrams
if block_trigrams and t >= 3:
# Store trigram generated at last step
prev_two_batch = seq[:,t-3:t-1]
for i in range(batch_size): # = seq.size(0)
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
current = seq[i][t-1]
if t == 3: # initialize
trigrams.append({prev_two: [current]}) # {LongTensor: list containing 1 int}
elif t > 3:
if prev_two in trigrams[i]: # add to list
trigrams[i][prev_two].append(current)
else: # create list
trigrams[i][prev_two] = [current]
# Block used trigrams at next step
prev_two_batch = seq[:,t-2:t]
mask = torch.zeros(logprobs.size(), requires_grad=False).cuda() # batch_size x vocab_size
for i in range(batch_size):
prev_two = (prev_two_batch[i][0].item(), prev_two_batch[i][1].item())
if prev_two in trigrams[i]:
for j in trigrams[i][prev_two]:
mask[i,j] += 1
# Apply mask to log probs
#logprobs = logprobs - (mask * 1e9)
alpha = 2.0 # = 4
logprobs = logprobs + (mask * -0.693 * alpha) # ln(1/2) * alpha (alpha -> infty works best)
it, sampleLogprobs = self.sample_next_word(logprobs, sample_method, 1)
# stop when all finished
if t == 0:
unfinished = it != self.eos_idx
else:
unfinished = seq[:,t-1] != self.pad_idx & seq[:,t-1] != self.eos_idx
it[~unfinished] = self.pad_idx
unfinished = unfinished & (it != self.eos_idx) # changed
seq[:,t] = it
seqLogprobs[:,t] = sampleLogprobs.view(-1)
return torch.stack(seq_table, 1).reshape(batch_size * group_size, -1), torch.stack(seqLogprobs_table, 1).reshape(batch_size * group_size, -1)
class AdaAtt_lstm(nn.Module):
def __init__(self, opt, use_maxout=True):
super(AdaAtt_lstm, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
self.use_maxout = use_maxout
# Build a LSTM
self.w2h = nn.Linear(self.input_encoding_size, (4+(use_maxout==True)) * self.rnn_size)
self.v2h = nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size)
self.i2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers - 1)])
self.h2h = nn.ModuleList([nn.Linear(self.rnn_size, (4+(use_maxout==True)) * self.rnn_size) for _ in range(self.num_layers)])
# Layers for getting the fake region
if self.num_layers == 1:
self.r_w2h = nn.Linear(self.input_encoding_size, self.rnn_size)
self.r_v2h = nn.Linear(self.rnn_size, self.rnn_size)
else:
self.r_i2h = nn.Linear(self.rnn_size, self.rnn_size)
self.r_h2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, xt, img_fc, state):
hs = []
cs = []
for L in range(self.num_layers):
# c,h from previous timesteps
prev_h = state[0][L]
prev_c = state[1][L]
# the input to this layer
if L == 0:
x = xt
i2h = self.w2h(x) + self.v2h(img_fc)
else:
x = hs[-1]
x = F.dropout(x, self.drop_prob_lm, self.training)
i2h = self.i2h[L-1](x)
all_input_sums = i2h+self.h2h[L](prev_h)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
# decode the gates
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
# decode the write inputs
if not self.use_maxout:
in_transform = torch.tanh(all_input_sums.narrow(1, 3 * self.rnn_size, self.rnn_size))
else:
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
# perform the LSTM update
next_c = forget_gate * prev_c + in_gate * in_transform
# gated cells form the output
tanh_nex_c = torch.tanh(next_c)
next_h = out_gate * tanh_nex_c
if L == self.num_layers-1:
if L == 0:
i2h = self.r_w2h(x) + self.r_v2h(img_fc)
else:
i2h = self.r_i2h(x)
n5 = i2h+self.r_h2h(prev_h)
fake_region = torch.sigmoid(n5) * tanh_nex_c
cs.append(next_c)
hs.append(next_h)
# set up the decoder
top_h = hs[-1]
top_h = F.dropout(top_h, self.drop_prob_lm, self.training)
fake_region = F.dropout(fake_region, self.drop_prob_lm, self.training)
state = (torch.cat([_.unsqueeze(0) for _ in hs], 0),
torch.cat([_.unsqueeze(0) for _ in cs], 0))
return top_h, fake_region, state
class AdaAtt_attention(nn.Module):
def __init__(self, opt):
super(AdaAtt_attention, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
self.drop_prob_lm = opt.drop_prob_lm
self.att_hid_size = opt.att_hid_size
# fake region embed
self.fr_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.ReLU(),
nn.Dropout(self.drop_prob_lm))
self.fr_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
# h out embed
self.ho_linear = nn.Sequential(
nn.Linear(self.rnn_size, self.input_encoding_size),
nn.Tanh(),
nn.Dropout(self.drop_prob_lm))
self.ho_embed = nn.Linear(self.input_encoding_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
self.att2h = nn.Linear(self.rnn_size, self.rnn_size)
def forward(self, h_out, fake_region, conv_feat, conv_feat_embed, att_masks=None):
# View into three dimensions
att_size = conv_feat.numel() // conv_feat.size(0) // self.rnn_size
conv_feat = conv_feat.view(-1, att_size, self.rnn_size)
conv_feat_embed = conv_feat_embed.view(-1, att_size, self.att_hid_size)
# view neighbor from bach_size * neighbor_num x rnn_size to bach_size x rnn_size * neighbor_num
fake_region = self.fr_linear(fake_region)
fake_region_embed = self.fr_embed(fake_region)
h_out_linear = self.ho_linear(h_out)
h_out_embed = self.ho_embed(h_out_linear)
txt_replicate = h_out_embed.unsqueeze(1).expand(h_out_embed.size(0), att_size + 1, h_out_embed.size(1))
img_all = torch.cat([fake_region.view(-1,1,self.input_encoding_size), conv_feat], 1)
img_all_embed = torch.cat([fake_region_embed.view(-1,1,self.input_encoding_size), conv_feat_embed], 1)
hA = torch.tanh(img_all_embed + txt_replicate)
hA = F.dropout(hA,self.drop_prob_lm, self.training)
hAflat = self.alpha_net(hA.view(-1, self.att_hid_size))
PI = F.softmax(hAflat.view(-1, att_size + 1), dim=1)
if att_masks is not None:
att_masks = att_masks.view(-1, att_size)
PI = PI * torch.cat([att_masks[:,:1], att_masks], 1) # assume one one at the first time step.
PI = PI / PI.sum(1, keepdim=True)
visAtt = torch.bmm(PI.unsqueeze(1), img_all)
visAttdim = visAtt.squeeze(1)
atten_out = visAttdim + h_out_linear
h = torch.tanh(self.att2h(atten_out))
h = F.dropout(h, self.drop_prob_lm, self.training)
return h
class AdaAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(AdaAttCore, self).__init__()
self.lstm = AdaAtt_lstm(opt, use_maxout)
self.attention = AdaAtt_attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
h_out, p_out, state = self.lstm(xt, fc_feats, state)
atten_out = self.attention(h_out, p_out, att_feats, p_att_feats, att_masks)
return atten_out, state
class UpDownCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(UpDownCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
self.att_lstm = nn.LSTMCell(opt.input_encoding_size + opt.rnn_size * 2, opt.rnn_size) # we, fc, h^2_t-1
self.lang_lstm = nn.LSTMCell(opt.rnn_size * 2, opt.rnn_size) # h^1_t, \hat v
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
prev_h = state[0][-1]
att_lstm_input = torch.cat([prev_h, fc_feats, xt], 1)
h_att, c_att = self.att_lstm(att_lstm_input, (state[0][0], state[1][0]))
att = self.attention(h_att, att_feats, p_att_feats, att_masks)
lang_lstm_input = torch.cat([att, h_att], 1)
# lang_lstm_input = torch.cat([att, F.dropout(h_att, self.drop_prob_lm, self.training)], 1) ?????
h_lang, c_lang = self.lang_lstm(lang_lstm_input, (state[0][1], state[1][1]))
output = F.dropout(h_lang, self.drop_prob_lm, self.training)
state = (torch.stack([h_att, h_lang]), torch.stack([c_att, c_lang]))
return output, state
############################################################################
# Notice:
# StackAtt and DenseAtt are models that I randomly designed.
# They are not related to any paper.
############################################################################
from .FCModel import LSTMCore
class StackAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(StackAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([h_1,att_res_2],1), [state[0][2:3], state[1][2:3]])
return h_2, [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class DenseAttCore(nn.Module):
def __init__(self, opt, use_maxout=False):
super(DenseAttCore, self).__init__()
self.drop_prob_lm = opt.drop_prob_lm
# self.att0 = Attention(opt)
self.att1 = Attention(opt)
self.att2 = Attention(opt)
opt_input_encoding_size = opt.input_encoding_size
opt.input_encoding_size = opt.input_encoding_size + opt.rnn_size
self.lstm0 = LSTMCore(opt) # att_feat + word_embedding
opt.input_encoding_size = opt.rnn_size * 2
self.lstm1 = LSTMCore(opt)
self.lstm2 = LSTMCore(opt)
opt.input_encoding_size = opt_input_encoding_size
# self.emb1 = nn.Linear(opt.rnn_size, opt.rnn_size)
self.emb2 = nn.Linear(opt.rnn_size, opt.rnn_size)
# fuse h_0 and h_1
self.fusion1 = nn.Sequential(nn.Linear(opt.rnn_size*2, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
# fuse h_0, h_1 and h_2
self.fusion2 = nn.Sequential(nn.Linear(opt.rnn_size*3, opt.rnn_size),
nn.ReLU(),
nn.Dropout(opt.drop_prob_lm))
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
# att_res_0 = self.att0(state[0][-1], att_feats, p_att_feats, att_masks)
h_0, state_0 = self.lstm0(torch.cat([xt,fc_feats],1), [state[0][0:1], state[1][0:1]])
att_res_1 = self.att1(h_0, att_feats, p_att_feats, att_masks)
h_1, state_1 = self.lstm1(torch.cat([h_0,att_res_1],1), [state[0][1:2], state[1][1:2]])
att_res_2 = self.att2(h_1 + self.emb2(att_res_1), att_feats, p_att_feats, att_masks)
h_2, state_2 = self.lstm2(torch.cat([self.fusion1(torch.cat([h_0, h_1], 1)),att_res_2],1), [state[0][2:3], state[1][2:3]])
return self.fusion2(torch.cat([h_0, h_1, h_2], 1)), [torch.cat(_, 0) for _ in zip(state_0, state_1, state_2)]
class Attention(nn.Module):
def __init__(self, opt):
super(Attention, self).__init__()
self.rnn_size = opt.rnn_size
self.att_hid_size = opt.att_hid_size
self.h2att = nn.Linear(self.rnn_size, self.att_hid_size)
self.alpha_net = nn.Linear(self.att_hid_size, 1)
def forward(self, h, att_feats, p_att_feats, att_masks=None):
# The p_att_feats here is already projected
att_size = att_feats.numel() // att_feats.size(0) // att_feats.size(-1)
att = p_att_feats.view(-1, att_size, self.att_hid_size)
att_h = self.h2att(h) # batch * att_hid_size
att_h = att_h.unsqueeze(1).expand_as(att) # batch * att_size * att_hid_size
dot = att + att_h # batch * att_size * att_hid_size
dot = torch.tanh(dot) # batch * att_size * att_hid_size
dot = dot.view(-1, self.att_hid_size) # (batch * att_size) * att_hid_size
dot = self.alpha_net(dot) # (batch * att_size) * 1
dot = dot.view(-1, att_size) # batch * att_size
weight = F.softmax(dot, dim=1) # batch * att_size
if att_masks is not None:
weight = weight * att_masks.view(-1, att_size).to(weight)
weight = weight / weight.sum(1, keepdim=True) # normalize to 1
att_feats_ = att_feats.view(-1, att_size, att_feats.size(-1)) # batch * att_size * att_feat_size
att_res = torch.bmm(weight.unsqueeze(1), att_feats_).squeeze(1) # batch * att_feat_size
return att_res
class Att2in2Core(nn.Module):
def __init__(self, opt):
super(Att2in2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2c = nn.Linear(self.rnn_size, 2 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1])
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size) + \
self.a2c(att_res)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class Att2inCore(Att2in2Core):
def __init__(self, opt):
super(Att2inCore, self).__init__(opt)
del self.a2c
self.a2c = nn.Linear(self.att_feat_size, 2 * self.rnn_size)
"""
Note this is my attempt to replicate att2all model in self-critical paper.
However, this is not a correct replication actually. Will fix it.
"""
class Att2all2Core(nn.Module):
def __init__(self, opt):
super(Att2all2Core, self).__init__()
self.input_encoding_size = opt.input_encoding_size
#self.rnn_type = opt.rnn_type
self.rnn_size = opt.rnn_size
#self.num_layers = opt.num_layers
self.drop_prob_lm = opt.drop_prob_lm
self.fc_feat_size = opt.fc_feat_size
self.att_feat_size = opt.att_feat_size
self.att_hid_size = opt.att_hid_size
# Build a LSTM
self.a2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.i2h = nn.Linear(self.input_encoding_size, 5 * self.rnn_size)
self.h2h = nn.Linear(self.rnn_size, 5 * self.rnn_size)
self.dropout = nn.Dropout(self.drop_prob_lm)
self.attention = Attention(opt)
def forward(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks=None):
att_res = self.attention(state[0][-1], att_feats, p_att_feats, att_masks)
all_input_sums = self.i2h(xt) + self.h2h(state[0][-1]) + self.a2h(att_res)
sigmoid_chunk = all_input_sums.narrow(1, 0, 3 * self.rnn_size)
sigmoid_chunk = torch.sigmoid(sigmoid_chunk)
in_gate = sigmoid_chunk.narrow(1, 0, self.rnn_size)
forget_gate = sigmoid_chunk.narrow(1, self.rnn_size, self.rnn_size)
out_gate = sigmoid_chunk.narrow(1, self.rnn_size * 2, self.rnn_size)
in_transform = all_input_sums.narrow(1, 3 * self.rnn_size, 2 * self.rnn_size)
in_transform = torch.max(\
in_transform.narrow(1, 0, self.rnn_size),
in_transform.narrow(1, self.rnn_size, self.rnn_size))
next_c = forget_gate * state[1][-1] + in_gate * in_transform
next_h = out_gate * torch.tanh(next_c)
output = self.dropout(next_h)
state = (next_h.unsqueeze(0), next_c.unsqueeze(0))
return output, state
class AdaAttModel(AttModel):
def __init__(self, opt):
super(AdaAttModel, self).__init__(opt)
self.core = AdaAttCore(opt)
# AdaAtt with maxout lstm
class AdaAttMOModel(AttModel):
def __init__(self, opt):
super(AdaAttMOModel, self).__init__(opt)
self.core = AdaAttCore(opt, True)
class Att2in2Model(AttModel):
def __init__(self, opt):
super(Att2in2Model, self).__init__(opt)
self.core = Att2in2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class Att2all2Model(AttModel):
def __init__(self, opt):
super(Att2all2Model, self).__init__(opt)
self.core = Att2all2Core(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x : x
class UpDownModel(AttModel):
def __init__(self, opt):
super(UpDownModel, self).__init__(opt)
self.num_layers = 2
self.core = UpDownCore(opt)
class StackAttModel(AttModel):
def __init__(self, opt):
super(StackAttModel, self).__init__(opt)
self.num_layers = 3
self.core = StackAttCore(opt)
class DenseAttModel(AttModel):
def __init__(self, opt):
super(DenseAttModel, self).__init__(opt)
self.num_layers = 3
self.core = DenseAttCore(opt)
class Att2inModel(AttModel):
def __init__(self, opt):
super(Att2inModel, self).__init__(opt)
del self.embed, self.fc_embed, self.att_embed
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self.fc_embed = self.att_embed = lambda x: x
del self.ctx2att
self.ctx2att = nn.Linear(self.att_feat_size, self.att_hid_size)
self.core = Att2inCore(opt)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.embed.weight.data.uniform_(-initrange, initrange)
self.logit.bias.data.fill_(0)
self.logit.weight.data.uniform_(-initrange, initrange)
class NewFCModel(AttModel):
def __init__(self, opt):
super(NewFCModel, self).__init__(opt)
self.fc_embed = nn.Linear(self.fc_feat_size, self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
# Step 0, feed the input image
# if (self.training and state[0].is_leaf) or \
# (not self.training and state[0].sum() == 0):
# _, state = self._core(fc_feats, state)
# three cases
# normal mle training
# Sample
# beam search (diverse beam search)
# fixed captioning module.
is_first_step = (state[0]==0).all(2).all(0) # size: B
if is_first_step.all():
_, state = self._core(fc_feats, state)
elif is_first_step.any():
# This is mostly for diverse beam search I think
new_state = [torch.zeros_like(_) for _ in state]
new_state[0][:, ~is_first_step] = state[0][:, ~is_first_step]
new_state[1][:, ~is_first_step] = state[1][:, ~is_first_step]
_, state = self._core(fc_feats, state)
new_state[0][:, is_first_step] = state[0][:, is_first_step]
new_state[1][:, is_first_step] = state[1][:, is_first_step]
state = new_state
# if (state[0]==0).all():
# # Let's forget about diverse beam search first
# _, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, att_feats, att_feats, att_masks
class LMModel(AttModel):
def __init__(self, opt):
super(LMModel, self).__init__(opt)
delattr(self, 'fc_embed')
self.fc_embed = lambda x: x.new_zeros(x.shape[0], self.input_encoding_size)
self.embed = nn.Embedding(self.vocab_size + 1, self.input_encoding_size)
self._core = LSTMCore(opt)
delattr(self, 'att_embed')
self.att_embed = lambda x : x
delattr(self, 'ctx2att')
self.ctx2att = lambda x: x
def core(self, xt, fc_feats, att_feats, p_att_feats, state, att_masks):
if (state[0]==0).all():
# Let's forget about diverse beam search first
_, state = self._core(fc_feats, state)
return self._core(xt, state)
def _prepare_feature(self, fc_feats, att_feats, att_masks):
fc_feats = self.fc_embed(fc_feats)
return fc_feats, None, None, None
|
connect-caption-and-trace-main
|
captioning/models/AttModel_for_coco_caption_task.py
|
import torch
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
loss = self.crit(self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:])
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_caption_generation.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
self.show_gate_crit = torch.nn.CrossEntropyLoss()
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks,
show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels,
gts, gt_indices, sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# train generating both caption and trace
# caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='both')
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
# loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
# loss_mask.sum() * 4)
# loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
# loss_both = loss_both_caption + loss_both_trace
# # #
# # # # for caption generation
# caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
# #
# # # for trace generation - regression
# trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='trace')
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# # for show-control-tell 2 layer with gate prediction
# show_caption_outputs, show_gate_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_trace_masks, show_gate_labels=None, task='show')
# loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# loss_show_gate = self.show_gate_crit(show_gate_outputs.reshape(-1, show_gate_outputs.shape[-1]),
# show_gate_labels[..., 1:].reshape(-1))
# # for show control tell 1 layer, without gate prediction
show_caption_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats,
show_labels[..., :-1],
att_masks, show_trace_masks,
task='show')
loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # # for cycle trace and caption
# # trace_outputs_both = trace_outputs_both.detach()
# # caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
#
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
# sum the loss of caption and trace generation
loss = loss_show_caption # loss_caption + loss_trace + loss_both # + (loss_cycle_caption + loss_cycle_trace) * 0.5 + loss_caption + loss_trace
# for trace generation - classification
# model_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# model_outputs = F.log_softmax(model_outputs, dim=-1)
# model_outputs = model_outputs.view(-1, model_outputs.shape[2])
# trace_class_label = trace_feats[:,:,5] - 1
# trace_class_label = trace_class_label.view(-1).long()
# loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_show_control_tell.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
from ..utils.local_optimal_transport import local_OT
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
# if opt.label_smoothing > 0:
# self.crit = losses.LabelSmoothing(smoothing=opt.label_smoothing)
# else:
# self.crit = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# for caption generation
# loss = self.crit(self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks), labels[..., 1:], masks[..., 1:])
# for trace generation - regression
# outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss = (torch.abs(outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# construct the localized optimal transport
# D = torch.abs(outputs[:,:,:4].unsqueeze(2) - trace_feats[:,:,:4].unsqueeze(1)).mean(dim=-1)
# T = local_OT(D).to(outputs.device)
# loss = (torch.abs(torch.matmul(outputs[:, :, :4].transpose(1,2), T).transpose(1,2) -
# trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# for trace generation - classification
trace_class_label = trace_feats[:, :, 0] * (trace_feats[:, :, 5] != 1).float() - 1
trace_class_label = trace_class_label.view(-1).long()
model_outputs = self.model(fc_feats, att_feats, trace_feats[:,:,1:], box_feats, labels[..., :-1], att_masks, trace_masks)
model_outputs = F.log_softmax(model_outputs, dim=-1)
model_outputs = model_outputs.view(-1, model_outputs.shape[2])
loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_trace_generation.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
import numpy as np
import random
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks, gts, gt_indices,
sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
if self.opt.task == 'pred_both':
# train generating both caption and trace
caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='both')
loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
loss_mask.sum() * 4)
loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
loss_both = loss_both_caption + loss_both_trace # for baseline training
if self.opt.task in ['caption', 'c_joint_t']:
# for caption generation
caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='caption')
loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
if self.opt.task in ['trace', 'c_joint_t']:
# for trace generation - regression
trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='trace')
loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
# # for cycle trace and caption
# trace_outputs_both = trace_outputs_both.detach()
# caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
################ random permute cycle loss ###################
### random permute trace within its segments
# permute_trace_list = []
# for i in range(trace_feats.shape[0]):
# tmp_gt_length = trace_masks[i].sum().long().item()
# tmp_trace = trace_feats[i, :tmp_gt_length]
# segment_list = []
# tmp_const = np.ceil(tmp_gt_length / 5).astype(int)
# for j in range(5):
# segment_list.append(tmp_trace[j * tmp_const: (j + 1) * tmp_const])
# random.shuffle(segment_list)
# tmp_permute_trace = torch.cat(segment_list, 0)
# if tmp_permute_trace.shape[0] < trace_masks.shape[1]:
# tmp_permute_trace = torch.cat([tmp_permute_trace,
# torch.zeros([trace_masks.shape[1]-tmp_permute_trace.shape[0], tmp_permute_trace.shape[1]]).to(trace_masks.device)])
# permute_trace_list.append(tmp_permute_trace)
# permute_trace_feats = torch.stack(permute_trace_list, 0)
#
if self.opt.task == 'c_joint_t':
#### random exchange trace within batch
random_idx = np.arange(trace_feats.shape[0])
np.random.shuffle(random_idx)
rnd_trace_feats = trace_feats[random_idx]
# construct the loss
rnd_caption_outputs = self.model(fc_feats, att_feats, rnd_trace_feats, box_feats, labels[..., :-1],
att_masks, trace_masks, task='caption')
caption_outputs_cycle_1 = torch.exp(rnd_caption_outputs)
## caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
att_masks, trace_masks, task='cycle_trace')
loss_cycle_trace = (torch.abs(
trace_outputs_cycle_1[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (loss_mask.sum() * 4)
if self.opt.task == 'pred_both':
loss = loss_both
elif self.opt.task == 'caption':
loss = loss_caption
elif self.opt.task == 'caption':
loss = loss_trace
elif self.opt.task == 'c_joint_t':
loss = loss_trace + 0.3 * (loss_caption) + 0.1 * (loss_cycle_trace)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_joint.py
|
import torch
import torch.nn.functional as F
from . import losses
from ..utils.rewards import init_scorer, get_self_critical_reward
class LossWrapper(torch.nn.Module):
def __init__(self, model, opt):
super(LossWrapper, self).__init__()
self.opt = opt
self.model = model
if opt.label_smoothing > 0:
self.crit_caption = losses.LabelSmoothing(smoothing=opt.label_smoothing)
else:
self.crit_caption = losses.LanguageModelCriterion()
self.rl_crit = losses.RewardCriterion()
self.struc_crit = losses.StructureLosses(opt)
self.show_gate_crit = torch.nn.CrossEntropyLoss()
# regression loss for trace generation
self.crit_trace = torch.nn.L1Loss()
def forward(self, fc_feats, att_feats, trace_feats, box_feats, labels, masks, att_masks, trace_masks,
show_labels, show_trace_feats, show_trace_masks, show_masks, show_gate_labels,
gts, gt_indices, sc_flag, struc_flag):
opt = self.opt
out = {}
if struc_flag:
if opt.structure_loss_weight < 1:
lm_loss = self.crit(self.model(fc_feats, att_feats, labels[..., :-1], att_masks), labels[..., 1:], masks[..., 1:])
else:
lm_loss = torch.tensor(0).type_as(fc_feats)
if opt.structure_loss_weight > 0:
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'output_logsoftmax': opt.struc_use_logsoftmax or opt.structure_loss_type == 'softmax_margin'\
or not 'margin' in opt.structure_loss_type,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
struc_loss = self.struc_crit(sample_logprobs, gen_result, gts)
else:
struc_loss = {'loss': torch.tensor(0).type_as(fc_feats),
'reward': torch.tensor(0).type_as(fc_feats)}
loss = (1-opt.structure_loss_weight) * lm_loss + opt.structure_loss_weight * struc_loss['loss']
out['lm_loss'] = lm_loss
out['struc_loss'] = struc_loss['loss']
out['reward'] = struc_loss['reward']
elif not sc_flag:
# train generating both caption and trace
# caption_outputs_both, trace_outputs_both = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='both')
# loss_mask = ((trace_masks != 0) * (trace_feats[:, :, 4] != 1)).unsqueeze(2)
# loss_both_trace = (torch.abs(trace_outputs_both[:, :, :4] - trace_feats[:, :, :4]) * loss_mask).sum() / (
# loss_mask.sum() * 4)
# loss_both_caption = self.crit_caption(caption_outputs_both, labels[..., 1:], masks[..., 1:])
# loss_both = loss_both_caption + loss_both_trace
# # # #
# # # # for caption generation
# # caption_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
# # loss_caption = self.crit_caption(caption_outputs, labels[..., 1:], masks[..., 1:])
# #
# # # for trace generation - regression
# trace_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='trace')[:, :-1]
# loss_mask = ((trace_masks!=0) * (trace_feats[:,:,4]!=1)).unsqueeze(2) # for those words without labels ([0,0,1,1,1]), don't calculate the loss
# loss_trace = (torch.abs(trace_outputs[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# for coco-caption training
### inference to get with coco trace
with torch.no_grad():
tmp_trace_feats = show_trace_feats[:, :1]
for i in range(show_labels.shape[2]-2):
# for regression
tmp_trace_feats_input = torch.cat(
[tmp_trace_feats, torch.zeros(tmp_trace_feats.shape[0], 1, tmp_trace_feats.shape[2]).to(tmp_trace_feats.device)], 1)
_, curr_out = self.model(fc_feats, att_feats, tmp_trace_feats_input, box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:, :tmp_trace_feats_input.shape[1]], task='both')
curr_out = curr_out[:, i]
curr_out[:, 4] = (curr_out[:, 2] - curr_out[:, 0]) * (curr_out[:, 3] - curr_out[:, 1])
if i == 0:
tmp_trace_feats = curr_out.unsqueeze(1)
else:
tmp_trace_feats = torch.cat([tmp_trace_feats, curr_out.unsqueeze(1)], 1)
coco_trace_outputs = tmp_trace_feats.detach()
coco_caption_outputs, coco_trace_outputs_both = self.model(fc_feats, att_feats, coco_trace_outputs, box_feats,
show_labels[..., :-1].squeeze(1),
att_masks, show_masks.squeeze(1)[:, :coco_trace_outputs.shape[1]], task='both')
loss_coco_caption = self.crit_caption(coco_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # for coco-caption-baseline
# baseline_caption_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_masks, task='caption')
# loss_coco_caption_baseline = self.crit_caption(baseline_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# # # for show-control-tell
# show_caption_outputs, show_gate_outputs = self.model(fc_feats, att_feats, show_trace_feats, box_feats, show_labels[..., :-1],
# att_masks, show_trace_masks, show_gate_labels=show_gate_labels, task='show')
# loss_show_caption = self.crit_caption(show_caption_outputs, show_labels[..., 1:], show_masks[..., 1:])
# loss_show_gate = self.show_gate_crit(show_gate_outputs.reshape(-1, show_gate_outputs.shape[-1]),
# show_gate_labels[..., 1:].reshape(-1))
# # # for cycle trace and caption
# # trace_outputs_both = trace_outputs_both.detach()
# # caption_outputs_cycle = self.model(fc_feats, att_feats, trace_outputs_both, box_feats, labels[..., :-1],
# # att_masks, trace_masks, task='caption')
#
# caption_outputs_cycle_1 = torch.exp(caption_outputs) # get the logits before log (only after softmax)
# trace_outputs_cycle_1 = self.model(fc_feats, att_feats, trace_feats, box_feats, caption_outputs_cycle_1,
# att_masks, trace_masks, task='cycle_trace')
# loss_cycle_trace = (torch.abs(trace_outputs_cycle_1[:,:,:4] - trace_feats[:,:,:4]) * loss_mask).sum() / (loss_mask.sum() * 4)
#
# trace_outputs_cycle_2 = trace_outputs
# caption_outputs_cycle_2 = self.model(fc_feats, att_feats, trace_outputs_cycle_2, box_feats, labels[..., :-1],
# att_masks, trace_masks, task='caption')
# loss_cycle_caption = self.crit_caption(caption_outputs_cycle_2, labels[..., 1:], masks[..., 1:])
# sum the loss of caption and trace generation
loss = loss_coco_caption #loss_both + loss_trace #+ loss_caption # loss_coco_caption # loss_caption + loss_trace + loss_both # + (loss_cycle_caption + loss_cycle_trace) * 0.5 + loss_caption + loss_trace
# for trace generation - classification
# model_outputs = self.model(fc_feats, att_feats, trace_feats, box_feats, labels[..., :-1], att_masks, trace_masks)
# model_outputs = F.log_softmax(model_outputs, dim=-1)
# model_outputs = model_outputs.view(-1, model_outputs.shape[2])
# trace_class_label = trace_feats[:,:,5] - 1
# trace_class_label = trace_class_label.view(-1).long()
# loss = F.nll_loss(model_outputs, trace_class_label, ignore_index=-1)
else:
self.model.eval()
with torch.no_grad():
greedy_res, _ = self.model(fc_feats, att_feats, att_masks,
mode='sample',
opt={'sample_method': opt.sc_sample_method,
'beam_size': opt.sc_beam_size})
self.model.train()
gen_result, sample_logprobs = self.model(fc_feats, att_feats, att_masks,
opt={'sample_method':opt.train_sample_method,
'beam_size':opt.train_beam_size,
'sample_n': opt.train_sample_n},
mode='sample')
gts = [gts[_] for _ in gt_indices.tolist()]
reward = get_self_critical_reward(greedy_res, gts, gen_result, self.opt)
reward = torch.from_numpy(reward).to(sample_logprobs)
loss = self.rl_crit(sample_logprobs, gen_result.data, reward)
out['reward'] = reward[:,0].mean()
out['loss'] = loss
return out
|
connect-caption-and-trace-main
|
captioning/modules/loss_wrapper_for_coco_caption.py
|
import torch
import torch.nn as nn
from ..utils.rewards import get_scores, get_self_cider_scores
class RewardCriterion(nn.Module):
def __init__(self):
super(RewardCriterion, self).__init__()
def forward(self, input, seq, reward):
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
input = input.reshape(-1)
reward = reward.reshape(-1)
mask = (seq>0).to(input)
mask = torch.cat([mask.new(mask.size(0), 1).fill_(1), mask[:, :-1]], 1).reshape(-1)
output = - input * reward * mask
output = torch.sum(output) / torch.sum(mask)
return output
class StructureLosses(nn.Module):
"""
This loss is inspired by Classical Structured Prediction Losses for Sequence to Sequence Learning (Edunov et al., 2018).
"""
def __init__(self, opt):
super(StructureLosses, self).__init__()
self.opt = opt
self.loss_type = opt.structure_loss_type
def forward(self, input, seq, data_gts):
"""
Input is either logits or log softmax
"""
out = {}
batch_size = input.size(0)# batch_size = sample_size * seq_per_img
seq_per_img = batch_size // len(data_gts)
assert seq_per_img == self.opt.train_sample_n, seq_per_img
mask = (seq>0).to(input)
mask = torch.cat([mask.new_full((mask.size(0), 1), 1), mask[:, :-1]], 1)
scores = get_scores(data_gts, seq, self.opt)
scores = torch.from_numpy(scores).type_as(input).view(-1, seq_per_img)
out['reward'] = scores #.mean()
if self.opt.entropy_reward_weight > 0:
entropy = - (F.softmax(input, dim=2) * F.log_softmax(input, dim=2)).sum(2).data
entropy = (entropy * mask).sum(1) / mask.sum(1)
print('entropy', entropy.mean().item())
scores = scores + self.opt.entropy_reward_weight * entropy.view(-1, seq_per_img)
# rescale cost to [0,1]
costs = - scores
if self.loss_type == 'risk' or self.loss_type == 'softmax_margin':
costs = costs - costs.min(1, keepdim=True)[0]
costs = costs / costs.max(1, keepdim=True)[0]
# in principle
# Only risk need such rescale
# margin should be alright; Let's try.
# Gather input: BxTxD -> BxT
input = input.gather(2, seq.unsqueeze(2)).squeeze(2)
if self.loss_type == 'seqnll':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'risk':
# input is logsoftmax
input = input * mask
input = input.sum(1)
input = input.view(-1, seq_per_img)
output = (F.softmax(input.exp()) * costs).sum(1).mean()
# test
# avg_scores = input
# probs = F.softmax(avg_scores.exp_())
# loss = (probs * costs.type_as(probs)).sum() / input.size(0)
# print(output.item(), loss.item())
elif self.loss_type == 'max_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input).max(1)[0] / 2
output = output.mean()
# sanity test
# avg_scores = input + costs
# scores_with_high_target = avg_scores.clone()
# scores_with_high_target.scatter_(1, costs.min(1)[1].view(-1, 1), 1e10)
# target_and_offender_index = scores_with_high_target.sort(1, True)[1][:, 0:2]
# avg_scores = avg_scores.gather(1, target_and_offender_index)
# target_index = avg_scores.new_zeros(avg_scores.size(0), dtype=torch.long)
# loss = F.multi_margin_loss(avg_scores, target_index, size_average=True, margin=0)
# print(loss.item() * 2, output.item())
elif self.loss_type == 'multi_margin':
# input is logits
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
_, __ = costs.min(1, keepdim=True)
costs_star = _
input_star = input.gather(1, __)
output = F.relu(costs - costs_star - input_star + input)
output = output.mean()
# sanity test
# avg_scores = input + costs
# loss = F.multi_margin_loss(avg_scores, costs.min(1)[1], margin=0)
# print(output, loss)
elif self.loss_type == 'softmax_margin':
# input is logsoftmax
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'real_softmax_margin':
# input is logits
# This is what originally defined in Kevin's paper
# The result should be equivalent to softmax_margin
input = input * mask
input = input.sum(1) / mask.sum(1)
input = input.view(-1, seq_per_img)
input = input + costs
target = costs.min(1)[1]
output = F.cross_entropy(input, target)
elif self.loss_type == 'new_self_critical':
"""
A different self critical
Self critical uses greedy decoding score as baseline;
This setting uses the average score of the rest samples as baseline
(suppose c1...cn n samples, reward1 = score1 - 1/(n-1)(score2+..+scoren) )
"""
baseline = (scores.sum(1, keepdim=True) - scores) / (scores.shape[1] - 1)
scores = scores - baseline
# self cider used as reward to promote diversity (not working that much in this way)
if getattr(self.opt, 'self_cider_reward_weight', 0) > 0:
_scores = get_self_cider_scores(data_gts, seq, self.opt)
_scores = torch.from_numpy(_scores).type_as(scores).view(-1, 1)
_scores = _scores.expand_as(scores - 1)
scores += self.opt.self_cider_reward_weight * _scores
output = - input * mask * scores.view(-1, 1)
output = torch.sum(output) / torch.sum(mask)
out['loss'] = output
return out
class LanguageModelCriterion(nn.Module):
def __init__(self):
super(LanguageModelCriterion, self).__init__()
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
# truncate to the same size
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)].to(input)
output = -input.gather(2, target.unsqueeze(2)).squeeze(2) * mask
# Average over each token
output = torch.sum(output) / torch.sum(mask)
return output
class LabelSmoothing(nn.Module):
"Implement label smoothing."
def __init__(self, size=0, padding_idx=0, smoothing=0.0):
super(LabelSmoothing, self).__init__()
self.criterion = nn.KLDivLoss(size_average=False, reduce=False)
# self.padding_idx = padding_idx
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
# self.size = size
self.true_dist = None
def forward(self, input, target, mask):
if target.ndim == 3:
target = target.reshape(-1, target.shape[2])
mask = mask.reshape(-1, mask.shape[2])
# truncate to the same size
target = target[:, :input.size(1)]
mask = mask[:, :input.size(1)]
input = input.reshape(-1, input.size(-1))
target = target.reshape(-1)
mask = mask.reshape(-1).to(input)
# assert x.size(1) == self.size
self.size = input.size(1)
# true_dist = x.data.clone()
true_dist = input.data.clone()
# true_dist.fill_(self.smoothing / (self.size - 2))
true_dist.fill_(self.smoothing / (self.size - 1))
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
# true_dist[:, self.padding_idx] = 0
# mask = torch.nonzero(target.data == self.padding_idx)
# self.true_dist = true_dist
return (self.criterion(input, true_dist).sum(1) * mask).sum() / mask.sum()
|
connect-caption-and-trace-main
|
captioning/modules/losses.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
self.loader = lambda x: np.load(six.BytesIO(x))['feat']
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False,
map_size=1099511627776 * 2,)
self.db_txn = env.begin(write=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def __getstate__(self):
state = self.__dict__
if self.db_type == 'lmdb':
state["db_txn"] = None
return state
def __setstate__(self, state):
self.__dict__ = state
if self.db_type == 'lmdb':
env = lmdb.open(self.db_path, subdir=os.path.isdir(self.db_path),
readonly=True, lock=False,
readahead=False, meminit=False,
map_size=1099511627776 * 2,)
self.db_txn = env.begin(write=False)
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
byteflow = self.db_txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class CaptionDataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
if 'ix_to_word' in self.info:
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
for ix in range(len(self.info['images'])):
img = self.info['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val':
self.split_ix['val'].append(ix)
elif img['split'] == 'test':
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def collate_func(self, batch):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_seq, \
ix = sample
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
fc_batch, att_batch, label_batch, gts, infos = \
zip(*sorted(zip(fc_batch, att_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# set att_masks to None if attention features have same length
if data['att_masks'].sum() == data['att_masks'].size:
data['att_masks'] = None
data['labels'] = np.vstack(label_batch)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
data['gts'] = gts # all ground truth captions of each images
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, ix):
"""This function returns a tuple that is further passed to collate_fn
"""
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box:
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
if self.norm_box_feat:
box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
att_feat = np.hstack([att_feat, box_feat])
# sort the features by the size of boxes
att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
else:
seq = None
return (fc_feat,
att_feat, seq,
ix)
def __len__(self):
return len(self.info['images'])
if __name__ == '__main__':
from captioning.utils.misc import pickle_load
x = pickle_load(open('log_trans/infos_trans.pkl', 'rb'))
dataset = Dataset(x['opt'])
ds = torch.utils.data.Subset(dataset, dataset.split_ix['train'])
import pudb;pu.db
|
connect-caption-and-trace-main
|
captioning/data/pth_loader.py
|
connect-caption-and-trace-main
|
captioning/data/__init__.py
|
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import os
import numpy as np
import random
import torch
import skimage
import skimage.io
import scipy.misc
from torchvision import transforms as trn
preprocess = trn.Compose([
#trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
from ..utils.resnet_utils import myResnet
from ..utils import resnet
class DataLoaderRaw():
def __init__(self, opt):
self.opt = opt
self.coco_json = opt.get('coco_json', '')
self.folder_path = opt.get('folder_path', '')
self.batch_size = opt.get('batch_size', 1)
self.seq_per_img = 1
# Load resnet
self.cnn_model = opt.get('cnn_model', 'resnet101')
self.my_resnet = getattr(resnet, self.cnn_model)()
self.my_resnet.load_state_dict(torch.load('./data/imagenet_weights/'+self.cnn_model+'.pth'))
self.my_resnet = myResnet(self.my_resnet)
self.my_resnet.cuda()
self.my_resnet.eval()
# load the json file which contains additional information about the dataset
print('DataLoaderRaw loading images from folder: ', self.folder_path)
self.files = []
self.ids = []
print(len(self.coco_json))
if len(self.coco_json) > 0:
print('reading from ' + opt.coco_json)
# read in filenames from the coco-style json file
self.coco_annotation = json.load(open(self.coco_json))
for k,v in enumerate(self.coco_annotation['images']):
fullpath = os.path.join(self.folder_path, v['file_name'])
self.files.append(fullpath)
self.ids.append(v['id'])
else:
# read in all the filenames from the folder
print('listing all images in directory ' + self.folder_path)
def isImage(f):
supportedExt = ['.jpg','.JPG','.jpeg','.JPEG','.png','.PNG','.ppm','.PPM']
for ext in supportedExt:
start_idx = f.rfind(ext)
if start_idx >= 0 and start_idx + len(ext) == len(f):
return True
return False
n = 1
for root, dirs, files in os.walk(self.folder_path, topdown=False):
for file in files:
fullpath = os.path.join(self.folder_path, file)
if isImage(fullpath):
self.files.append(fullpath)
self.ids.append(str(n)) # just order them sequentially
n = n + 1
self.N = len(self.files)
print('DataLoaderRaw found ', self.N, ' images')
self.iterator = 0
# Nasty
self.dataset = self # to fix the bug in eval
def get_batch(self, split, batch_size=None):
batch_size = batch_size or self.batch_size
# pick an index of the datapoint to load next
fc_batch = np.ndarray((batch_size, 2048), dtype = 'float32')
att_batch = np.ndarray((batch_size, 14, 14, 2048), dtype = 'float32')
max_index = self.N
wrapped = False
infos = []
for i in range(batch_size):
ri = self.iterator
ri_next = ri + 1
if ri_next >= max_index:
ri_next = 0
wrapped = True
# wrap back around
self.iterator = ri_next
img = skimage.io.imread(self.files[ri])
if len(img.shape) == 2:
img = img[:,:,np.newaxis]
img = np.concatenate((img, img, img), axis=2)
img = img[:,:,:3].astype('float32')/255.0
img = torch.from_numpy(img.transpose([2,0,1])).cuda()
img = preprocess(img)
with torch.no_grad():
tmp_fc, tmp_att = self.my_resnet(img)
fc_batch[i] = tmp_fc.data.cpu().float().numpy()
att_batch[i] = tmp_att.data.cpu().float().numpy()
info_struct = {}
info_struct['id'] = self.ids[ri]
info_struct['file_path'] = self.files[ri]
infos.append(info_struct)
data = {}
data['fc_feats'] = fc_batch
data['att_feats'] = att_batch.reshape(batch_size, -1, 2048)
data['labels'] = np.zeros([batch_size, 0])
data['masks'] = None
data['att_masks'] = None
data['bounds'] = {'it_pos_now': self.iterator, 'it_max': self.N, 'wrapped': wrapped}
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def reset_iterator(self, split):
self.iterator = 0
def get_vocab_size(self):
return len(self.ix_to_word)
def get_vocab(self):
return self.ix_to_word
|
connect-caption-and-trace-main
|
captioning/data/dataloaderraw.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
def load_npz(x):
x = np.load(six.BytesIO(x))
return x['feat'] if 'feat' in x else x['arr_0'] # normally it should be 'feat', but under cocotest_bu, the key is saved to be 'z' mistakenly.
self.loader = load_npz
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class Dataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.use_trace = getattr(opt, 'use_trace', 0)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
self.dataset_choice = getattr(opt, 'dataset_choice', 'coco')
self.trace_max_length = getattr(opt, 'trace_max_length', 225)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
if 'ix_to_word' in self.info:
self.ix_to_word = self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.trace_loader = HybridLoader(self.opt.input_trace_dir, '.npy', in_memory=self.data_in_memory)
self.trace_class_label_loader = HybridLoader(self.opt.input_trace_class_label_dir, '.npy', in_memory=self.data_in_memory)
self.trace_feat_loader = HybridLoader(self.opt.input_trace_feat_dir, '.npy', in_memory=self.data_in_memory)
self.dataset_choice = getattr(opt, 'dataset_choice', 'coco')
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
for ix in range(len(self.info['images'])):
img = self.info['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val' and (self.dataset_choice in ['coco', 'ade20k']): #
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'test' and (self.dataset_choice in ['coco', 'ade20k']):
pass
elif img['split'] == 'val' and self.dataset_choice in ['flk30k', 'openimg']: #
pass
elif img['split'] == 'test' and self.dataset_choice in ['flk30k', 'openimg']: #
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def collate_func(self, batch, split):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
trace_batch = []
box_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_trace, tmp_box, tmp_seq, \
ix, it_pos_now, tmp_wrapped = sample
if tmp_wrapped:
wrapped = True
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
trace_batch.append(tmp_trace)
box_batch.append(tmp_box)
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
# commented for classification
# fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
data['box_feats'] = np.zeros([len(box_batch), max_att_len, box_batch[0].shape[1]], dtype='float32')
assert att_batch[0].shape[0] == box_batch[0].shape[0], 'box should have same shape[0] with att'
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['box_feats'][i, :box_batch[i].shape[0]] = box_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# merge trace_feats
max_trace_len = max([_.shape[0] for _ in trace_batch])
data['trace_feats'] = np.zeros([len(trace_batch), max_trace_len, trace_batch[0].shape[1]], dtype='float32')
for i in range(len(trace_batch)):
data['trace_feats'][i, :trace_batch[i].shape[0]] = trace_batch[i]
data['trace_masks'] = np.zeros(data['trace_feats'].shape[:2], dtype='float32')
for i in range(len(trace_batch)):
data['trace_masks'][i, :trace_batch[i].shape[0]] = 1
data['labels'] = np.vstack(label_batch)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
data['gts'] = gts # all ground truth captions of each images
data['bounds'] = {'it_pos_now': it_pos_now, # the it_pos_now of the last sample
'it_max': len(self.split_ix[split]), 'wrapped': wrapped}
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
ix, it_pos_now, wrapped = index #self.split_ix[index]
if self.use_trace:
trace_feat = self.trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')[:self.trace_max_length]
# for classification
# trace_class_label = self.trace_class_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32') + 1 # do -1 when using in loss
# trace_feat = np.concatenate([np.reshape(trace_class_label, [-1,1]), trace_feat], 1)
### for using grid level feature, commented for using trace box feature
if self.use_trace_feat:
trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for gird feature with 14*14, 2048+2
# trace_grid_feat = np.reshape(trace_grid_feat, [14*14, 2048])
# grid_resolution = 14
# grid_x = torch.arange(grid_resolution).unsqueeze(0).repeat(grid_resolution, 1).view(-1).unsqueeze(1)
# grid_y = torch.arange(grid_resolution).unsqueeze(1).repeat(1, grid_resolution).view(-1).unsqueeze(1)
# trace_grid_feat = np.concatenate([trace_grid_feat, grid_x, grid_y], 1)
# if self.use_trace_feat: # then concat trace_feat
# grid_resolution = 14 # currently it's 14*14 grid
# trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# trace_grid_feat = np.transpose(np.reshape(trace_grid_feat, [2048, -1]))
# tmp_trace_center_xy = np.stack([(trace_feat[:, 0]+trace_feat[:, 2])/2,
# (trace_feat[:, 1]+trace_feat[:, 3])/2], 1)
# tmp_trace_center_xy = np.clip(tmp_trace_center_xy, 0., 1.)
# tmp_trace_grid_idx = np.clip(np.floor(tmp_trace_center_xy[:,1]*grid_resolution), 0, grid_resolution-1)*grid_resolution + \
# np.clip(np.floor(tmp_trace_center_xy[:,0]*grid_resolution), 0, grid_resolution-1)
# trace_grid_feat = trace_grid_feat[tmp_trace_grid_idx.astype(int), :] # [T, 1024/2048]
# extend the trace_feat by sigma=0.1
# sigma = 0.1
# trace_feat[:, 0] = trace_feat[:, 0] - sigma
# trace_feat[:, 1] = trace_feat[:, 1] - sigma
# trace_feat[:, 2] = trace_feat[:, 2] + sigma
# trace_feat[:, 3] = trace_feat[:, 3] + sigma
# trace_feat = np.clip(trace_feat, 0., 1.)
# trace_feat[:, 4] = (trace_feat[:,2] - trace_feat[:,0]) * (trace_feat[:,3] - trace_feat[:,1])
if self.use_trace_feat:
# concat location and grid feat
trace_feat = np.concatenate([trace_feat, trace_grid_feat], 1)
else:
trace_feat = np.zeros((0, 0), dtype='float32')
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box:
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
if self.dataset_choice == 'flk30k':
x1, y1, x2, y2, w, h = np.hsplit(box_feat, 6)
box_feat = np.hstack((x1, y1, x2, y2, (x2 - x1) * (y2 - y1)))
elif self.dataset_choice == 'ade20k' or self.dataset_choice == 'openimg': # 4-d
x1, y1, x2, y2 = np.hsplit(box_feat, 4)
box_feat = np.hstack((x1, y1, x2, y2, (x2 - x1) * (y2 - y1)))
else:
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
else:
box_feat = np.zeros((0, 0), dtype='float32')
# if self.use_box:
# box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# # devided by image width and height
# x1,y1,x2,y2 = np.hsplit(box_feat, 4)
# h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
# box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
# if self.norm_box_feat:
# box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
# att_feat = np.hstack([att_feat, box_feat])
# # sort the features by the size of boxes
# att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
else:
seq = None
return (fc_feat,
att_feat, trace_feat, box_feat, seq,
ix, it_pos_now, wrapped)
def __len__(self):
return len(self.info['images'])
class DataLoader:
def __init__(self, opt):
self.opt = opt
self.batch_size = self.opt.batch_size
self.dataset = Dataset(opt)
# Initialize loaders and iters
self.loaders, self.iters = {}, {}
for split in ['train', 'val', 'test']:
if split == 'train':
sampler = MySampler(self.dataset.split_ix[split], shuffle=True, wrap=True)
else:
sampler = MySampler(self.dataset.split_ix[split], shuffle=False, wrap=False)
self.loaders[split] = data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
sampler=sampler,
pin_memory=True,
num_workers=4, # 4 is usually enough
collate_fn=lambda x: self.dataset.collate_func(x, split),
drop_last=False)
self.iters[split] = iter(self.loaders[split])
def get_batch(self, split):
try:
data = next(self.iters[split])
except StopIteration:
self.iters[split] = iter(self.loaders[split])
data = next(self.iters[split])
return data
def reset_iterator(self, split):
self.loaders[split].sampler._reset_iter()
self.iters[split] = iter(self.loaders[split])
def get_vocab_size(self):
return self.dataset.get_vocab_size()
@property
def vocab_size(self):
return self.get_vocab_size()
def get_vocab(self):
return self.dataset.get_vocab()
def get_seq_length(self):
return self.dataset.get_seq_length()
@property
def seq_length(self):
return self.get_seq_length()
def state_dict(self):
def get_prefetch_num(split):
if self.loaders[split].num_workers > 0:
return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size
else:
return 0
return {split: loader.sampler.state_dict(get_prefetch_num(split)) \
for split, loader in self.loaders.items()}
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
for split in self.loaders.keys():
self.loaders[split].sampler.load_state_dict(state_dict[split])
class MySampler(data.sampler.Sampler):
def __init__(self, index_list, shuffle, wrap):
self.index_list = index_list
self.shuffle = shuffle
self.wrap = wrap
# if wrap, there will be not stop iteration called
# wrap True used during training, and wrap False used during test.
self._reset_iter()
def __iter__(self):
return self
def __next__(self):
wrapped = False
if self.iter_counter == len(self._index_list):
self._reset_iter()
if self.wrap:
wrapped = True
else:
raise StopIteration()
if len(self._index_list) == 0: # overflow when 0 samples
return None
elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped)
self.iter_counter += 1
return elem
def next(self):
return self.__next__()
def _reset_iter(self):
if self.shuffle:
rand_perm = npr.permutation(len(self.index_list))
self._index_list = [self.index_list[_] for _ in rand_perm]
else:
self._index_list = self.index_list
self.iter_counter = 0
def __len__(self):
return len(self.index_list)
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
self._index_list = state_dict['index_list']
self.iter_counter = state_dict['iter_counter']
def state_dict(self, prefetched_num=None):
prefetched_num = prefetched_num or 0
return {
'index_list': self._index_list,
'iter_counter': self.iter_counter - prefetched_num
}
|
connect-caption-and-trace-main
|
captioning/data/dataloader.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import h5py
import lmdb
import os
import numpy as np
import numpy.random as npr
import random
import torch
import torch.utils.data as data
import multiprocessing
import six
class HybridLoader:
"""
If db_path is a director, then use normal file loading
If lmdb, then load from lmdb
The loading method depend on extention.
in_memory: if in_memory is True, we save all the features in memory
For individual np(y|z)s, we don't need to do that because the system will do this for us.
Should be useful for lmdb or h5.
(Copied this idea from vilbert)
"""
def __init__(self, db_path, ext, in_memory=False):
self.db_path = db_path
self.ext = ext
if self.ext == '.npy':
self.loader = lambda x: np.load(six.BytesIO(x))
else:
def load_npz(x):
x = np.load(six.BytesIO(x))
return x['feat'] if 'feat' in x else x['z'] # normally it should be 'feat', but under cocotest_bu, the key is saved to be 'z' mistakenly.
self.loader = load_npz
if db_path.endswith('.lmdb'):
self.db_type = 'lmdb'
self.env = lmdb.open(db_path, subdir=os.path.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
elif db_path.endswith('.pth'): # Assume a key,value dictionary
self.db_type = 'pth'
self.feat_file = torch.load(db_path)
self.loader = lambda x: x
print('HybridLoader: ext is ignored')
elif db_path.endswith('h5'):
self.db_type = 'h5'
self.loader = lambda x: np.array(x).astype('float32')
else:
self.db_type = 'dir'
self.in_memory = in_memory
if self.in_memory:
self.features = {}
def get(self, key):
if self.in_memory and key in self.features:
# We save f_input because we want to save the
# compressed bytes to save memory
f_input = self.features[key]
elif self.db_type == 'lmdb':
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(key.encode())
f_input = byteflow
elif self.db_type == 'pth':
f_input = self.feat_file[key]
elif self.db_type == 'h5':
f_input = h5py.File(self.db_path, 'r')[key]
else:
f_input = open(os.path.join(self.db_path, key + self.ext), 'rb').read()
if self.in_memory and key not in self.features:
self.features[key] = f_input
# load image
feat = self.loader(f_input)
return feat
class Dataset(data.Dataset):
def get_vocab_size(self):
return self.vocab_size
def get_vocab(self):
return self.ix_to_word
def get_seq_length(self):
return self.seq_length
def __init__(self, opt):
self.opt = opt
self.seq_per_img = opt.seq_per_img
# feature related options
self.use_fc = getattr(opt, 'use_fc', True)
self.use_att = getattr(opt, 'use_att', True)
self.use_box = getattr(opt, 'use_box', 0)
self.use_trace = getattr(opt, 'use_trace', 0)
self.use_trace_feat = getattr(opt, 'use_trace_feat', 0)
self.norm_att_feat = getattr(opt, 'norm_att_feat', 0)
self.norm_box_feat = getattr(opt, 'norm_box_feat', 0)
# load the json file which contains additional information about the dataset
print('DataLoader loading json file: ', opt.input_json)
self.info = json.load(open(self.opt.input_json))
self.union_vocab_json = json.load(open('./data/coco_LN_union_vocab.json'))
if 'ix_to_word' in self.info:
self.ix_to_word = self.union_vocab_json['ix_to_word'] #self.info['ix_to_word']
self.vocab_size = len(self.ix_to_word)
print('vocab size is ', self.vocab_size)
# open the hdf5 file
print('DataLoader loading h5 file: ', opt.input_fc_dir, opt.input_att_dir, opt.input_box_dir, opt.input_label_h5)
"""
Setting input_label_h5 to none is used when only doing generation.
For example, when you need to test on coco test set.
"""
if self.opt.input_label_h5 != 'none':
### for show-control-tell dataset on coco caption
self.show_coco_h5_label_file = h5py.File('data/coco_in_LN_vocab_labels.h5', 'r', driver='core')
show_seq_size = self.show_coco_h5_label_file['labels'].shape
self.show_seq_length = show_seq_size[1]
self.show_label = self.show_coco_h5_label_file['labels'][:]
self.show_label_start_ix = self.show_coco_h5_label_file['label_start_ix'][:]
self.show_label_end_ix = self.show_coco_h5_label_file['label_end_ix'][:]
##############################################
self.h5_label_file = h5py.File(self.opt.input_label_h5, 'r', driver='core')
# load in the sequence data
seq_size = self.h5_label_file['labels'].shape
self.label = self.h5_label_file['labels'][:]
self.seq_length = seq_size[1]
print('max sequence length in data is', self.seq_length)
# load the pointers in full to RAM (should be small enough)
self.label_start_ix = self.h5_label_file['label_start_ix'][:]
self.label_end_ix = self.h5_label_file['label_end_ix'][:]
else:
self.seq_length = 1
self.data_in_memory = getattr(opt, 'data_in_memory', False)
self.fc_loader = HybridLoader(self.opt.input_fc_dir, '.npy', in_memory=self.data_in_memory)
self.att_loader = HybridLoader(self.opt.input_att_dir, '.npz', in_memory=self.data_in_memory)
self.box_loader = HybridLoader(self.opt.input_box_dir, '.npy', in_memory=self.data_in_memory)
self.trace_loader = HybridLoader(self.opt.input_trace_dir, '.npy', in_memory=self.data_in_memory)
self.show_trace_loader = HybridLoader('./data/show_control_tell_box_seq', '.npy', in_memory=self.data_in_memory)
self.show_gate_label_loader = HybridLoader('./data/show_control_tell_box_gate_label', '.npy', in_memory=self.data_in_memory)
self.trace_class_label_loader = HybridLoader(self.opt.input_trace_class_label_dir, '.npy', in_memory=self.data_in_memory)
self.trace_feat_loader = HybridLoader(self.opt.input_trace_feat_dir, '.npy', in_memory=self.data_in_memory)
self.num_images = len(self.info['images']) # self.label_start_ix.shape[0]
print('read %d image features' %(self.num_images))
# separate out indexes for each of the provided splits
self.split_ix = {'train': [], 'val': [], 'test': []}
### load the Kaparthy split
tmp_cocotalk_json = json.load(open('/home/zihang/Research/Localized_Narratives/ImageCaptioning.pytorch/data/cocotalk.json'))
for ix in range(len(tmp_cocotalk_json['images'])):
img = tmp_cocotalk_json['images'][ix]
if not 'split' in img:
self.split_ix['train'].append(ix)
self.split_ix['val'].append(ix)
self.split_ix['test'].append(ix)
elif img['split'] == 'train':
self.split_ix['train'].append(ix)
elif img['split'] == 'val': #
pass
elif img['split'] == 'test':
self.split_ix['val'].append(ix) ###zihang
self.split_ix['test'].append(ix)
#self.split_ix['test'].append(ix) ###zihang
#self.split_ix['test'].append(ix) ###zihang
elif opt.train_only == 0: # restval
self.split_ix['train'].append(ix)
# for ix in range(len(self.info['images'])):
# img = self.info['images'][ix]
# if not 'split' in img:
# self.split_ix['train'].append(ix)
# self.split_ix['val'].append(ix)
# self.split_ix['test'].append(ix)
# elif img['split'] == 'train':
# self.split_ix['train'].append(ix)
# elif img['split'] == 'val': #
# pass
# elif img['split'] == 'test':
# self.split_ix['val'].append(ix) ###zihang
# self.split_ix['test'].append(ix)
# #self.split_ix['test'].append(ix) ###zihang
# #self.split_ix['test'].append(ix) ###zihang
# elif opt.train_only == 0: # restval
# self.split_ix['train'].append(ix)
print('assigned %d images to split train' %len(self.split_ix['train']))
print('assigned %d images to split val' %len(self.split_ix['val']))
print('assigned %d images to split test' %len(self.split_ix['test']))
def get_captions(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.label_start_ix[ix] - 1 #label_start_ix starts from 1
ix2 = self.label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
if ncap < seq_per_img:
# we need to subsample (with replacement)
seq = np.zeros([seq_per_img, self.seq_length], dtype = 'int')
for q in range(seq_per_img):
ixl = random.randint(ix1,ix2)
seq[q, :] = self.label[ixl, :self.seq_length]
else:
ixl = random.randint(ix1, ix2 - seq_per_img + 1)
seq = self.label[ixl: ixl + seq_per_img, :self.seq_length]
return seq
def get_captions_show(self, ix, seq_per_img):
# fetch the sequence labels
ix1 = self.show_label_start_ix[ix] - 1 # label_start_ix starts from 1
ix2 = self.show_label_end_ix[ix] - 1
ncap = ix2 - ix1 + 1 # number of captions available for this image
assert ncap > 0, 'an image does not have any label. this can be handled but right now isn\'t'
# if ncap < seq_per_img:
# # we need to subsample (with replacement)
# seq = np.zeros([seq_per_img, self.show_seq_length], dtype='int')
# for q in range(seq_per_img):
# ixl = random.randint(ix1, ix2)
# seq[q, :] = self.show_label[ixl, :self.show_seq_length]
# else:
# ixl = random.randint(ix1, ix2 - seq_per_img + 1)
# seq = self.show_label[ixl: ixl + seq_per_img, :self.show_seq_length]
### zihang: temporarily load all captions for the image
ixl = ix1 # get first 5 instead of random
seq = self.show_label[ixl: ixl + seq_per_img, :self.show_seq_length]
# seq = self.show_label[ix1:ix2+1, :self.show_seq_length]
return seq
def collate_func(self, batch, split):
seq_per_img = self.seq_per_img
fc_batch = []
att_batch = []
label_batch = []
trace_batch = []
box_batch = []
show_trace_feat_batch = []
show_label_batch = []
show_gate_label_batch = []
wrapped = False
infos = []
gts = []
for sample in batch:
# fetch image
tmp_fc, tmp_att, tmp_trace, tmp_box, tmp_seq, \
ix, it_pos_now, tmp_wrapped, tmp_show_seq, tmp_show_trace_feat, tmp_show_gate_label_orig = sample
if tmp_wrapped:
wrapped = True
fc_batch.append(tmp_fc)
att_batch.append(tmp_att)
trace_batch.append(tmp_trace)
box_batch.append(tmp_box)
# show-control-tell
for tmp_i in range(tmp_show_trace_feat.shape[0]):
show_trace_feat_batch.append(tmp_show_trace_feat[tmp_i]) # append the trace feats of one caption sentence
tmp_label = np.zeros([seq_per_img, self.seq_length + 2], dtype = 'int')
if hasattr(self, 'h5_label_file'):
# if there is ground truth
tmp_label[:, 1 : self.seq_length + 1] = tmp_seq
label_batch.append(tmp_label)
tmp_show_label = np.zeros([5, self.show_seq_length + 2], dtype='int')
tmp_show_label[:, 1: self.show_seq_length + 1] = tmp_show_seq
show_label_batch.append(tmp_show_label)
# for gate
tmp_show_gate_label = np.zeros([5, self.show_seq_length + 2], dtype='int')
tmp_show_gate_label[:, 1: self.show_seq_length + 1] = tmp_show_gate_label_orig[:5, :self.show_seq_length]
show_gate_label_batch.append(tmp_show_gate_label)
# Used for reward evaluation
if hasattr(self, 'h5_label_file'):
# if there is ground truth
gts.append(self.label[self.label_start_ix[ix] - 1: self.label_end_ix[ix]])
else:
gts.append([])
# record associated info as well
info_dict = {}
info_dict['ix'] = ix
info_dict['id'] = self.info['images'][ix]['id']
info_dict['file_path'] = self.info['images'][ix].get('file_path', '')
infos.append(info_dict)
# #sort by att_feat length
# fc_batch, att_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, np.vsplit(label_batch, batch_size), gts, infos), key=lambda x: len(x[1]), reverse=True))
# commented for classification
# fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos = \
# zip(*sorted(zip(fc_batch, att_batch, trace_batch, box_batch, label_batch, gts, infos), key=lambda x: 0, reverse=True))
data = {}
data['fc_feats'] = np.stack(fc_batch)
# merge att_feats
max_att_len = max([_.shape[0] for _ in att_batch])
data['att_feats'] = np.zeros([len(att_batch), max_att_len, att_batch[0].shape[1]], dtype = 'float32')
data['box_feats'] = np.zeros([len(box_batch), max_att_len, box_batch[0].shape[1]], dtype='float32')
assert att_batch[0].shape[0] == box_batch[0].shape[0], 'box should have same shape[0] with att'
for i in range(len(att_batch)):
data['att_feats'][i, :att_batch[i].shape[0]] = att_batch[i]
data['box_feats'][i, :box_batch[i].shape[0]] = box_batch[i]
data['att_masks'] = np.zeros(data['att_feats'].shape[:2], dtype='float32')
for i in range(len(att_batch)):
data['att_masks'][i, :att_batch[i].shape[0]] = 1
# set att_masks to None if attention features have same length #commented by zihang
# if data['att_masks'].sum() == data['att_masks'].size:
# data['att_masks'] = None
# merge trace_feats
max_trace_len = max([_.shape[0] for _ in trace_batch])
data['trace_feats'] = np.zeros([len(trace_batch), max_trace_len, trace_batch[0].shape[1]], dtype='float32')
for i in range(len(trace_batch)):
data['trace_feats'][i, :trace_batch[i].shape[0]] = trace_batch[i]
data['trace_masks'] = np.zeros(data['trace_feats'].shape[:2], dtype='float32')
for i in range(len(trace_batch)):
data['trace_masks'][i, :trace_batch[i].shape[0]] = 1
# set trace_masks to None if attention features have same length #commented by zihang
# if data['trace_masks'].sum() == data['trace_masks'].size:
# data['trace_masks'] = None
# merge show-control-tell trace feats
max_trace_len = max([_.shape[0] for _ in show_trace_feat_batch])
data['show_trace_feats'] = np.zeros([len(show_trace_feat_batch), max_trace_len, show_trace_feat_batch[0].shape[1]], dtype='float32')
for i in range(len(show_trace_feat_batch)):
data['show_trace_feats'][i, :show_trace_feat_batch[i].shape[0]] = show_trace_feat_batch[i]
data['show_trace_masks'] = np.zeros(data['show_trace_feats'].shape[:2], dtype='float32')
for i in range(len(show_trace_feat_batch)):
data['show_trace_masks'][i, :show_trace_feat_batch[i].shape[0]] = 1
for i in range(data['show_trace_feats'].shape[0]):
for j in range(data['show_trace_feats'].shape[1]):
if data['show_trace_feats'][i,j,0] < 0:
data['show_trace_masks'][i, j] = 0
data['show_trace_feats'] = np.clip(data['show_trace_feats'], 0., 1.)
data['labels'] = np.vstack(label_batch)
data['show_labels'] = np.expand_dims(np.vstack(show_label_batch), 1)
data['show_gate_labels'] = np.expand_dims(np.vstack(show_gate_label_batch), 1)
# generate mask
nonzeros = np.array(list(map(lambda x: (x != 0).sum()+2, data['labels'])))
mask_batch = np.zeros([data['labels'].shape[0], self.seq_length + 2], dtype = 'float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['masks'] = mask_batch
data['labels'] = data['labels'].reshape(len(batch), seq_per_img, -1)
data['masks'] = data['masks'].reshape(len(batch), seq_per_img, -1)
# generate mask for show-control-tell
nonzeros = np.array(list(map(lambda x: (x != 0).sum() + 2, data['show_labels'])))
mask_batch = np.zeros([data['show_labels'].shape[0], self.show_seq_length + 2], dtype='float32')
for ix, row in enumerate(mask_batch):
row[:nonzeros[ix]] = 1
data['show_masks'] = np.expand_dims(mask_batch, 1)
data['gts'] = gts # all ground truth captions of each images
data['bounds'] = {'it_pos_now': it_pos_now, # the it_pos_now of the last sample
'it_max': len(self.split_ix[split]), 'wrapped': wrapped}
# print('In dataloader', len(self.split_ix[split]), split, infos)###zihang
data['infos'] = infos
data = {k:torch.from_numpy(v) if type(v) is np.ndarray else v for k,v in data.items()} # Turn all ndarray to torch tensor
return data
def __getitem__(self, index):
"""This function returns a tuple that is further passed to collate_fn
"""
ix, it_pos_now, wrapped = index #self.split_ix[index]
show_trace_feat = self.show_trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')[:5]
show_gate_label = self.show_gate_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# print(show_trace_feat.shape, ix)
try:
assert show_trace_feat.shape[2] == 5
except:
print(show_trace_feat.shape, ix)
if self.use_trace:
trace_feat = self.trace_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for classification
# trace_class_label = self.trace_class_label_loader.get(str(self.info['images'][ix]['id'])).astype('float32') + 1 # do -1 when using in loss
# trace_feat = np.concatenate([np.reshape(trace_class_label, [-1,1]), trace_feat], 1)
### for using grid level feature, commented for using trace box feature
if self.use_trace_feat:
trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# for gird feature with 14*14, 2048+2
# trace_grid_feat = np.reshape(trace_grid_feat, [14*14, 2048])
# grid_resolution = 14
# grid_x = torch.arange(grid_resolution).unsqueeze(0).repeat(grid_resolution, 1).view(-1).unsqueeze(1)
# grid_y = torch.arange(grid_resolution).unsqueeze(1).repeat(1, grid_resolution).view(-1).unsqueeze(1)
# trace_grid_feat = np.concatenate([trace_grid_feat, grid_x, grid_y], 1)
# if self.use_trace_feat: # then concat trace_feat
# grid_resolution = 14 # currently it's 14*14 grid
# trace_grid_feat = self.trace_feat_loader.get(str(self.info['images'][ix]['id'])).astype('float32')
# trace_grid_feat = np.transpose(np.reshape(trace_grid_feat, [2048, -1]))
# tmp_trace_center_xy = np.stack([(trace_feat[:, 0]+trace_feat[:, 2])/2,
# (trace_feat[:, 1]+trace_feat[:, 3])/2], 1)
# tmp_trace_center_xy = np.clip(tmp_trace_center_xy, 0., 1.)
# tmp_trace_grid_idx = np.clip(np.floor(tmp_trace_center_xy[:,1]*grid_resolution), 0, grid_resolution-1)*grid_resolution + \
# np.clip(np.floor(tmp_trace_center_xy[:,0]*grid_resolution), 0, grid_resolution-1)
# trace_grid_feat = trace_grid_feat[tmp_trace_grid_idx.astype(int), :] # [T, 1024/2048]
# extend the trace_feat by sigma=0.1
# sigma = 0.1
# trace_feat[:, 0] = trace_feat[:, 0] - sigma
# trace_feat[:, 1] = trace_feat[:, 1] - sigma
# trace_feat[:, 2] = trace_feat[:, 2] + sigma
# trace_feat[:, 3] = trace_feat[:, 3] + sigma
# trace_feat = np.clip(trace_feat, 0., 1.)
# trace_feat[:, 4] = (trace_feat[:,2] - trace_feat[:,0]) * (trace_feat[:,3] - trace_feat[:,1])
if self.use_trace_feat:
# concat location and grid feat
trace_feat = np.concatenate([trace_feat, trace_grid_feat], 1)
else:
trace_feat = np.zeros((0, 0), dtype='float32')
if self.use_att:
att_feat = self.att_loader.get(str(self.info['images'][ix]['id']))
# Reshape to K x C
att_feat = att_feat.reshape(-1, att_feat.shape[-1])
if self.norm_att_feat:
att_feat = att_feat / np.linalg.norm(att_feat, 2, 1, keepdims=True)
if self.use_box: # zihang updated version
box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# devided by image width and height
x1,y1,x2,y2 = np.hsplit(box_feat, 4)
h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
else:
box_feat = np.zeros((0, 0), dtype='float32')
# if self.use_box:
# box_feat = self.box_loader.get(str(self.info['images'][ix]['id']))
# # devided by image width and height
# x1,y1,x2,y2 = np.hsplit(box_feat, 4)
# h,w = self.info['images'][ix]['height'], self.info['images'][ix]['width']
# box_feat = np.hstack((x1/w, y1/h, x2/w, y2/h, (x2-x1)*(y2-y1)/(w*h))) # question? x2-x1+1??
# if self.norm_box_feat:
# box_feat = box_feat / np.linalg.norm(box_feat, 2, 1, keepdims=True)
# att_feat = np.hstack([att_feat, box_feat])
# # sort the features by the size of boxes
# att_feat = np.stack(sorted(att_feat, key=lambda x:x[-1], reverse=True))
else:
att_feat = np.zeros((0,0), dtype='float32')
if self.use_fc:
try:
fc_feat = self.fc_loader.get(str(self.info['images'][ix]['id']))
except:
# Use average of attention when there is no fc provided (For bottomup feature)
fc_feat = att_feat.mean(0)
else:
fc_feat = np.zeros((0), dtype='float32')
if hasattr(self, 'h5_label_file'):
seq = self.get_captions(ix, self.seq_per_img)
seq_show = self.get_captions_show(ix, 5)
else:
seq = None
return (fc_feat,
att_feat, trace_feat, box_feat, seq,
ix, it_pos_now, wrapped, seq_show, show_trace_feat, show_gate_label)
def __len__(self):
return len(self.info['images'])
class DataLoader:
def __init__(self, opt):
self.opt = opt
self.batch_size = self.opt.batch_size
self.dataset = Dataset(opt)
# Initialize loaders and iters
self.loaders, self.iters = {}, {}
for split in ['train', 'val', 'test']:
if split == 'train':
sampler = MySampler(self.dataset.split_ix[split], shuffle=True, wrap=True)
else:
sampler = MySampler(self.dataset.split_ix[split], shuffle=False, wrap=False)
self.loaders[split] = data.DataLoader(dataset=self.dataset,
batch_size=self.batch_size,
sampler=sampler,
pin_memory=True,
num_workers=4, # 4 is usually enough
collate_fn=lambda x: self.dataset.collate_func(x, split),
drop_last=False)
self.iters[split] = iter(self.loaders[split])
def get_batch(self, split):
# print('In Dataloader, get_batch', split)###zihang
try:
data = next(self.iters[split])
except StopIteration:
self.iters[split] = iter(self.loaders[split])
data = next(self.iters[split])
return data
def reset_iterator(self, split):
self.loaders[split].sampler._reset_iter()
self.iters[split] = iter(self.loaders[split])
def get_vocab_size(self):
return self.dataset.get_vocab_size()
@property
def vocab_size(self):
return self.get_vocab_size()
def get_vocab(self):
return self.dataset.get_vocab()
def get_seq_length(self):
return self.dataset.get_seq_length()
@property
def seq_length(self):
return self.get_seq_length()
def state_dict(self):
def get_prefetch_num(split):
if self.loaders[split].num_workers > 0:
return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size
else:
return 0
return {split: loader.sampler.state_dict(get_prefetch_num(split)) \
for split, loader in self.loaders.items()}
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
for split in self.loaders.keys():
self.loaders[split].sampler.load_state_dict(state_dict[split])
class MySampler(data.sampler.Sampler):
def __init__(self, index_list, shuffle, wrap):
self.index_list = index_list
self.shuffle = shuffle
self.wrap = wrap
# if wrap, there will be not stop iteration called
# wrap True used during training, and wrap False used during test.
self._reset_iter()
def __iter__(self):
return self
def __next__(self):
wrapped = False
if self.iter_counter == len(self._index_list):
self._reset_iter()
if self.wrap:
wrapped = True
else:
raise StopIteration()
if len(self._index_list) == 0: # overflow when 0 samples
return None
elem = (self._index_list[self.iter_counter], self.iter_counter+1, wrapped)
self.iter_counter += 1
return elem
def next(self):
return self.__next__()
def _reset_iter(self):
if self.shuffle:
rand_perm = npr.permutation(len(self.index_list))
self._index_list = [self.index_list[_] for _ in rand_perm]
else:
self._index_list = self.index_list
self.iter_counter = 0
def __len__(self):
return len(self.index_list)
def load_state_dict(self, state_dict=None):
if state_dict is None:
return
self._index_list = state_dict['index_list']
self.iter_counter = state_dict['iter_counter']
def state_dict(self, prefetched_num=None):
prefetched_num = prefetched_num or 0
return {
'index_list': self._index_list,
'iter_counter': self.iter_counter - prefetched_num
}
|
connect-caption-and-trace-main
|
captioning/data/dataloader_show_control_tell.py
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import base64
import numpy as np
import csv
import sys
import zlib
import time
import mmap
import argparse
parser = argparse.ArgumentParser()
# output_dir
parser.add_argument('--downloaded_feats', default='data/bu_data', help='downloaded feature directory')
parser.add_argument('--output_dir', default='data/cocobu', help='output feature files')
args = parser.parse_args()
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'image_w','image_h','num_boxes', 'boxes', 'features']
infiles = ['trainval/karpathy_test_resnet101_faster_rcnn_genome.tsv',
'trainval/karpathy_val_resnet101_faster_rcnn_genome.tsv',\
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.0', \
'trainval/karpathy_train_resnet101_faster_rcnn_genome.tsv.1']
os.makedirs(args.output_dir+'_att')
os.makedirs(args.output_dir+'_fc')
os.makedirs(args.output_dir+'_box')
for infile in infiles:
print('Reading ' + infile)
with open(os.path.join(args.downloaded_feats, infile), "r") as tsv_in_file:
reader = csv.DictReader(tsv_in_file, delimiter='\t', fieldnames = FIELDNAMES)
for item in reader:
item['image_id'] = int(item['image_id'])
item['num_boxes'] = int(item['num_boxes'])
for field in ['boxes', 'features']:
item[field] = np.frombuffer(base64.decodestring(item[field].encode('ascii')),
dtype=np.float32).reshape((item['num_boxes'],-1))
np.savez_compressed(os.path.join(args.output_dir+'_att', str(item['image_id'])), feat=item['features'])
np.save(os.path.join(args.output_dir+'_fc', str(item['image_id'])), item['features'].mean(0))
np.save(os.path.join(args.output_dir+'_box', str(item['image_id'])), item['boxes'])
|
connect-caption-and-trace-main
|
scripts/make_bu_data.py
|
"""
Preprocess a raw json dataset into features files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: two folders of features
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
from six.moves import cPickle
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from torchvision import transforms as trn
preprocess = trn.Compose([
#trn.ToTensor(),
trn.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
])
from captioning.utils.resnet_utils import myResnet
import captioning.utils.resnet as resnet
def main(params):
net = getattr(resnet, params['model'])()
net.load_state_dict(torch.load(os.path.join(params['model_root'],params['model']+'.pth')))
my_resnet = myResnet(net)
my_resnet.cuda()
my_resnet.eval()
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
seed(123) # make reproducible
dir_fc = params['output_dir']+'_fc'
dir_att = params['output_dir']+'_att'
if not os.path.isdir(dir_fc):
os.mkdir(dir_fc)
if not os.path.isdir(dir_att):
os.mkdir(dir_att)
for i,img in enumerate(imgs):
# load the image
I = skimage.io.imread(os.path.join(params['images_root'], img['filepath'], img['filename']))
# handle grayscale input images
if len(I.shape) == 2:
I = I[:,:,np.newaxis]
I = np.concatenate((I,I,I), axis=2)
I = I.astype('float32')/255.0
I = torch.from_numpy(I.transpose([2,0,1])).cuda()
I = preprocess(I)
with torch.no_grad():
tmp_fc, tmp_att = my_resnet(I, params['att_size'])
# write to pkl
np.save(os.path.join(dir_fc, str(img['cocoid'])), tmp_fc.data.cpu().float().numpy())
np.savez_compressed(os.path.join(dir_att, str(img['cocoid'])), feat=tmp_att.data.cpu().float().numpy())
if i % 1000 == 0:
print('processing %d/%d (%.2f%% done)' % (i, N, i*100.0/N))
print('wrote ', params['output_dir'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_dir', default='data', help='output h5 file')
# options
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
parser.add_argument('--att_size', default=14, type=int, help='14x14 or 7x7')
parser.add_argument('--model', default='resnet101', type=str, help='resnet101, resnet152')
parser.add_argument('--model_root', default='./data/imagenet_weights', type=str, help='model root')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_feats.py
|
# coding: utf-8
"""
Create a reference json file used for evaluation with `coco-caption` repo.
Used when reference json is not provided, (e.g., flickr30k, or you have your own split of train/val/test)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
import sys
import hashlib
from random import shuffle, seed
def main(params):
imgs = json.load(open(params['input_json'][0], 'r'))['images']
# tmp = []
# for k in imgs.keys():
# for img in imgs[k]:
# img['filename'] = img['image_id'] # k+'/'+img['image_id']
# img['image_id'] = int(
# int(hashlib.sha256(img['image_id']).hexdigest(), 16) % sys.maxint)
# tmp.append(img)
# imgs = tmp
# create output json file
out = {'info': {'description': 'This is stable 1.0 version of the 2014 MS COCO dataset.', 'url': 'http://mscoco.org', 'version': '1.0', 'year': 2014, 'contributor': 'Microsoft COCO group', 'date_created': '2015-01-27 09:11:52.357475'}, 'licenses': [{'url': 'http://creativecommons.org/licenses/by-nc-sa/2.0/', 'id': 1, 'name': 'Attribution-NonCommercial-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nc/2.0/', 'id': 2, 'name': 'Attribution-NonCommercial License'}, {'url': 'http://creativecommons.org/licenses/by-nc-nd/2.0/', 'id': 3, 'name': 'Attribution-NonCommercial-NoDerivs License'}, {'url': 'http://creativecommons.org/licenses/by/2.0/', 'id': 4, 'name': 'Attribution License'}, {'url': 'http://creativecommons.org/licenses/by-sa/2.0/', 'id': 5, 'name': 'Attribution-ShareAlike License'}, {'url': 'http://creativecommons.org/licenses/by-nd/2.0/', 'id': 6, 'name': 'Attribution-NoDerivs License'}, {'url': 'http://flickr.com/commons/usage/', 'id': 7, 'name': 'No known copyright restrictions'}, {'url': 'http://www.usa.gov/copyright.shtml', 'id': 8, 'name': 'United States Government Work'}], 'type': 'captions'}
out.update({'images': [], 'annotations': []})
cnt = 0
empty_cnt = 0
for i, img in enumerate(imgs):
if img['split'] == 'train':
continue
out['images'].append(
{'id': img.get('cocoid', img['imgid'])})
for j, s in enumerate(img['sentences']):
if len(s) == 0:
continue
s = ' '.join(s['tokens'])
out['annotations'].append(
{'image_id': out['images'][-1]['id'], 'caption': s, 'id': cnt})
cnt += 1
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', nargs='+', required=True,
help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json',
help='output json file')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent=2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_reference_json.py
|
"""
Precompute ngram counts of captions, to accelerate cider computation during training time.
"""
import os
import json
import argparse
from six.moves import cPickle
import captioning.utils.misc as utils
from collections import defaultdict
import sys
sys.path.append("cider")
from pyciderevalcap.ciderD.ciderD_scorer import CiderScorer
def get_doc_freq(refs, params):
tmp = CiderScorer(df_mode="corpus")
for ref in refs:
tmp.cook_append(None, ref)
tmp.compute_doc_freq()
return tmp.document_frequency, len(tmp.crefs)
def build_dict(imgs, wtoi, params):
wtoi['<eos>'] = 0
count_imgs = 0
refs_words = []
refs_idxs = []
for img in imgs:
if (params['split'] == img['split']) or \
(params['split'] == 'train' and img['split'] == 'restval') or \
(params['split'] == 'all'):
#(params['split'] == 'val' and img['split'] == 'restval') or \
ref_words = []
ref_idxs = []
for sent in img['sentences']:
if hasattr(params, 'bpe'):
sent['tokens'] = params.bpe.segment(' '.join(sent['tokens'])).strip().split(' ')
tmp_tokens = sent['tokens'] + ['<eos>']
tmp_tokens = [_ if _ in wtoi else 'UNK' for _ in tmp_tokens]
ref_words.append(' '.join(tmp_tokens))
ref_idxs.append(' '.join([str(wtoi[_]) for _ in tmp_tokens]))
refs_words.append(ref_words)
refs_idxs.append(ref_idxs)
count_imgs += 1
print('total imgs:', count_imgs)
ngram_words, count_refs = get_doc_freq(refs_words, params)
ngram_idxs, count_refs = get_doc_freq(refs_idxs, params)
print('count_refs:', count_refs)
return ngram_words, ngram_idxs, count_refs
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
dict_json = json.load(open(params['dict_json'], 'r'))
itow = dict_json['ix_to_word']
wtoi = {w:i for i,w in itow.items()}
# Load bpe
if 'bpe' in dict_json:
import tempfile
import codecs
codes_f = tempfile.NamedTemporaryFile(delete=False)
codes_f.close()
with open(codes_f.name, 'w') as f:
f.write(dict_json['bpe'])
with codecs.open(codes_f.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
params.bpe = bpe
imgs = imgs['images']
ngram_words, ngram_idxs, ref_len = build_dict(imgs, wtoi, params)
utils.pickle_dump({'document_frequency': ngram_words, 'ref_len': ref_len}, open(params['output_pkl']+'-words.p','wb'))
utils.pickle_dump({'document_frequency': ngram_idxs, 'ref_len': ref_len}, open(params['output_pkl']+'-idxs.p','wb'))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', default='data/dataset_coco.json', help='input json file to process into hdf5')
parser.add_argument('--dict_json', default='data/cocotalk.json', help='output json file')
parser.add_argument('--output_pkl', default='data/coco-all', help='output pickle file')
parser.add_argument('--split', default='all', help='test, val, train, all')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_ngrams.py
|
import argparse
import h5py
import os
import numpy as np
import json
from tqdm import tqdm
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
N = len(imgs)
if params['fc_input_dir'] is not None:
print('processing fc')
with h5py.File(params['fc_output']) as file_fc:
for i, img in enumerate(tqdm(imgs)):
npy_fc_path = os.path.join(
params['fc_input_dir'],
str(img['cocoid']) + '.npy')
d_set_fc = file_fc.create_dataset(
str(img['cocoid']), data=np.load(npy_fc_path))
file_fc.close()
if params['att_input_dir'] is not None:
print('processing att')
with h5py.File(params['att_output']) as file_att:
for i, img in enumerate(tqdm(imgs)):
npy_att_path = os.path.join(
params['att_input_dir'],
str(img['cocoid']) + '.npz')
d_set_att = file_att.create_dataset(
str(img['cocoid']),
data=np.load(npy_att_path)['feat'])
file_att.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--fc_output', default='data', help='output h5 filename for fc')
parser.add_argument('--att_output', default='data', help='output h5 file for att')
parser.add_argument('--fc_input_dir', default=None, help='input directory for numpy fc files')
parser.add_argument('--att_input_dir', default=None, help='input directory for numpy att files')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent=2))
main(params)
|
connect-caption-and-trace-main
|
scripts/dump_to_h5df.py
|
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.py
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from PIL import Image
def build_vocab(imgs, params):
count_thr = params['word_count_threshold']
# count up the number of words
counts = {}
for img in imgs:
for sent in img['sentences']:
for w in sent['tokens']:
counts[w] = counts.get(w, 0) + 1
cw = sorted([(count,w) for w,count in counts.items()], reverse=True)
print('top words and their counts:')
print('\n'.join(map(str,cw[:20])))
# print some stats
total_words = sum(counts.values())
print('total words:', total_words)
bad_words = [w for w,n in counts.items() if n <= count_thr]
vocab = [w for w,n in counts.items() if n > count_thr]
bad_count = sum(counts[w] for w in bad_words)
print('number of bad words: %d/%d = %.2f%%' % (len(bad_words), len(counts), len(bad_words)*100.0/len(counts)))
print('number of words in vocab would be %d' % (len(vocab), ))
print('number of UNKs: %d/%d = %.2f%%' % (bad_count, total_words, bad_count*100.0/total_words))
# lets look at the distribution of lengths as well
sent_lengths = {}
for img in imgs:
for sent in img['sentences']:
txt = sent['tokens']
nw = len(txt)
sent_lengths[nw] = sent_lengths.get(nw, 0) + 1
max_len = max(sent_lengths.keys())
print('max length sentence in raw data: ', max_len)
print('sentence length distribution (count, number of words):')
sum_len = sum(sent_lengths.values())
for i in range(max_len+1):
print('%2d: %10d %f%%' % (i, sent_lengths.get(i,0), sent_lengths.get(i,0)*100.0/sum_len))
# lets now produce the final annotations
if bad_count > 0:
# additional special UNK token we will use below to map infrequent words to
print('inserting the special UNK token')
vocab.append('UNK')
for img in imgs:
img['final_captions'] = []
for sent in img['sentences']:
txt = sent['tokens']
caption = [w if counts.get(w,0) > count_thr else 'UNK' for w in txt]
img['final_captions'].append(caption)
return vocab
def encode_captions(imgs, params, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j,s in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k,w in enumerate(s):
if k < max_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123) # make reproducible
# create the vocab
vocab = build_vocab(imgs, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
L, label_start_ix, label_end_ix, label_length = encode_captions(imgs, params, wtoi)
# create output h5 file
N = len(imgs)
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = []
for i,img in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if 'filename' in img: jimg['file_path'] = os.path.join(img.get('filepath', ''), img['filename']) # copy it over, might need
if 'cocoid' in img:
jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful)
elif 'imgid' in img:
jimg['id'] = img['imgid']
if params['images_root'] != '':
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
jimg['width'], jimg['height'] = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json', help='output json file')
parser.add_argument('--output_h5', default='data', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--word_count_threshold', default=5, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/prepro_labels.py
|
import torch
import scipy.optimize
import numpy as np
m = 10
pred = torch.rand([10, m, 4])
label = torch.rand([10, m, 4])
def local_OT(D):
p = D.shape[1]; m = D.shape[2]
# construct the cx, ax=b
x = torch.rand([10,m*m])
A = torch.zeros([m+m,m*m])
b = torch.ones([m+m])
for i in range(p):
A[i, (i)*m:(i+1)*m] = 1
for i in range(m):
for j in range(p):
A[p+i, j*m+i] = 1
A_local = torch.zeros([m, m, m])
for i in range(m):
if i+2<=m-1:
A_local[i, i, i+2:] = 1
if i-2 >=0:
A_local[i, i, :i-1] = 1
A_local = A_local.view([m, m*m])
b_local = torch.zeros([m])
A = torch.cat([A, A_local], 0).numpy()
b = torch.cat([b, b_local], 0).numpy()
T_list = []
for i in range(D.shape[0]):
c = D[i].view(-1).detach().cpu().numpy()
sol = scipy.optimize.linprog(c, A_eq = A, b_eq = b, bounds=(0, None))
sol_x = torch.from_numpy(sol.x).view([p,m])
T_list.append(sol_x)
T = torch.stack(T_list, 0)
return (T>0.5).float() # binarize it
T = local_OT(torch.rand([10,20,20]))
print('finish')
|
connect-caption-and-trace-main
|
scripts/my_local_optimal_transport.py
|
# copy from https://github.com/Lyken17/Efficient-PyTorch/tools
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path as osp
import os, sys
import os.path as osp
from PIL import Image
import six
import string
import lmdb
import pickle
import tqdm
import numpy as np
import argparse
import json
import torch
import torch.utils.data as data
from torch.utils.data import DataLoader
import csv
csv.field_size_limit(sys.maxsize)
FIELDNAMES = ['image_id', 'status']
class FolderLMDB(data.Dataset):
def __init__(self, db_path, fn_list=None):
self.db_path = db_path
self.env = lmdb.open(db_path, subdir=osp.isdir(db_path),
readonly=True, lock=False,
readahead=False, meminit=False)
if fn_list is not None:
self.length = len(fn_list)
self.keys = fn_list
else:
raise Error
def __getitem__(self, index):
env = self.env
with env.begin(write=False) as txn:
byteflow = txn.get(self.keys[index].encode())
# load image
imgbuf = byteflow
buf = six.BytesIO()
buf.write(imgbuf)
buf.seek(0)
try:
if args.extension == '.npz':
feat = np.load(buf)['feat']
else:
feat = np.load(buf)
except Exception as e:
print(self.keys[index], e)
return None
return feat
def __len__(self):
return self.length
def __repr__(self):
return self.__class__.__name__ + ' (' + self.db_path + ')'
def make_dataset(dir, extension):
images = []
dir = os.path.expanduser(dir)
for root, _, fnames in sorted(os.walk(dir)):
for fname in sorted(fnames):
if has_file_allowed_extension(fname, [extension]):
path = os.path.join(root, fname)
images.append(path)
return images
def raw_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
return bin_data
def raw_npz_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npz_data = np.load(six.BytesIO(bin_data))['feat']
except Exception as e:
print(path)
npz_data = None
print(e)
return bin_data, npz_data
def raw_npy_reader(path):
with open(path, 'rb') as f:
bin_data = f.read()
try:
npy_data = np.load(six.BytesIO(bin_data))
except Exception as e:
print(path)
npy_data = None
print(e)
return bin_data, npy_data
class Folder(data.Dataset):
def __init__(self, root, loader, extension, fn_list=None):
super(Folder, self).__init__()
self.root = root
if fn_list:
samples = [os.path.join(root, str(_)+extension) for _ in fn_list]
else:
samples = make_dataset(self.root, extension)
self.loader = loader
self.extension = extension
self.samples = samples
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (sample, target) where target is class_index of the target class.
"""
path = self.samples[index]
sample = self.loader(path)
return (path.split('/')[-1].split('.')[0],) + sample
def __len__(self):
return len(self.samples)
def folder2lmdb(dpath, fn_list, write_frequency=5000):
directory = osp.expanduser(osp.join(dpath))
print("Loading dataset from %s" % directory)
if args.extension == '.npz':
dataset = Folder(directory, loader=raw_npz_reader, extension='.npz',
fn_list=fn_list)
else:
dataset = Folder(directory, loader=raw_npy_reader, extension='.npy',
fn_list=fn_list)
data_loader = DataLoader(dataset, num_workers=16, collate_fn=lambda x: x)
# lmdb_path = osp.join(dpath, "%s.lmdb" % (directory.split('/')[-1]))
lmdb_path = osp.join("%s.lmdb" % (directory))
isdir = os.path.isdir(lmdb_path)
print("Generate LMDB to %s" % lmdb_path)
db = lmdb.open(lmdb_path, subdir=isdir,
map_size=1099511627776 * 2, readonly=False,
meminit=False, map_async=True)
txn = db.begin(write=True)
tsvfile = open(args.output_file, 'a')
writer = csv.DictWriter(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
names = []
all_keys = []
for idx, data in enumerate(tqdm.tqdm(data_loader)):
# print(type(data), data)
name, byte, npz = data[0]
if npz is not None:
txn.put(name.encode(), byte)
all_keys.append(name)
names.append({'image_id': name, 'status': str(npz is not None)})
if idx % write_frequency == 0:
print("[%d/%d]" % (idx, len(data_loader)))
print('writing')
txn.commit()
txn = db.begin(write=True)
# write in tsv
for name in names:
writer.writerow(name)
names = []
tsvfile.flush()
print('writing finished')
# write all keys
txn.put("keys".encode(), pickle.dumps(all_keys))
# finish iterating through dataset
txn.commit()
for name in names:
writer.writerow(name)
tsvfile.flush()
tsvfile.close()
print("Flushing database ...")
db.sync()
db.close()
def parse_args():
"""
Parse input arguments
"""
parser = argparse.ArgumentParser(description='Generate bbox output from a Fast R-CNN network')
# parser.add_argument('--json)
parser.add_argument('--input_json', default='./data/dataset_coco.json', type=str)
parser.add_argument('--output_file', default='.dump_cache.tsv', type=str)
parser.add_argument('--folder', default='./data/cocobu_att', type=str)
parser.add_argument('--extension', default='.npz', type=str)
args = parser.parse_args()
return args
if __name__ == "__main__":
global args
args = parse_args()
args.output_file += args.folder.split('/')[-1]
if args.folder.find('/') > 0:
args.output_file = args.folder[:args.folder.rfind('/')+1]+args.output_file
print(args.output_file)
img_list = json.load(open(args.input_json, 'r'))['images']
fn_list = [str(_['cocoid']) for _ in img_list]
found_ids = set()
try:
with open(args.output_file, 'r') as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
if item['status'] == 'True':
found_ids.add(item['image_id'])
except:
pass
fn_list = [_ for _ in fn_list if _ not in found_ids]
folder2lmdb(args.folder, fn_list)
# Test existing.
found_ids = set()
with open(args.output_file, 'r') as tsvfile:
reader = csv.DictReader(tsvfile, delimiter='\t', fieldnames=FIELDNAMES)
for item in reader:
if item['status'] == 'True':
found_ids.add(item['image_id'])
folder_dataset = FolderLMDB(args.folder+'.lmdb', list(found_ids))
data_loader = DataLoader(folder_dataset, num_workers=16, collate_fn=lambda x: x)
for data in tqdm.tqdm(data_loader):
assert data[0] is not None
|
connect-caption-and-trace-main
|
scripts/dump_to_lmdb.py
|
import numpy as np
import os
import h5py
import numpy as np
import jsonlines
import re
import json
# The first directory should lead to your feature files extracted by detectrons, and the box_only and feats_only are the new folders for saving bounding boxes and features (which will be used during training).
i = 0
for f in os.listdir('/mnt/m2/Datasets/ADE20k/full_images_feats/features/'):
i += 1
item = np.load('/mnt/m2/Datasets/ADE20k/full_images_feats/features/'+f)
id = f.split('.jpg')[0]
np.save('/mnt/m2/Datasets/ADE20k/full_images_feats/box_only/'+str(id), item['norm_bb'])
np.savez('/mnt/m2/Datasets/ADE20k/full_images_feats/feats_only/'+str(id), item['box_feats'])
if i % 1000 == 0:
print('Processing #', i)
print('finish!')
|
connect-caption-and-trace-main
|
scripts/prepare_feats_boxes_from_npz.py
|
"""
Preprocess a raw json dataset into hdf5/json files for use in data_loader.lua
Input: json file that has the form
[{ file_path: 'path/img.jpg', captions: ['a caption', ...] }, ...]
example element in this list would look like
{'captions': [u'A man with a red helmet on a small moped on a dirt road. ', u'Man riding a motor bike on a dirt road on the countryside.', u'A man riding on the back of a motorcycle.', u'A dirt path with a young person on a motor bike rests to the foreground of a verdant area with a bridge and a background of cloud-wreathed mountains. ', u'A man in a red shirt and a red hat is on a motorcycle on a hill side.'], 'file_path': u'val2014/COCO_val2014_000000391895.jpg', 'id': 391895}
This script reads this json, does some basic preprocessing on the captions
(e.g. lowercase, etc.), creates a special UNK token, and encodes everything to arrays
Output: a json file and an hdf5 file
The hdf5 file contains several fields:
/labels is (M,max_length) uint32 array of encoded labels, zero padded
/label_start_ix and /label_end_ix are (N,) uint32 arrays of pointers to the
first and last indices (in range 1..M) of labels for each image
/label_length stores the length of the sequence for each of the M sequences
The json file has a dict that contains:
- an 'ix_to_word' field storing the vocab in form {ix:'word'}, where ix is 1-indexed
- an 'images' field that is a list holding auxiliary information for each image,
such as in particular the 'split' it was assigned to.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import json
import argparse
from random import shuffle, seed
import string
# non-standard dependencies:
import h5py
import numpy as np
import torch
import torchvision.models as models
import skimage.io
from PIL import Image
import codecs
import tempfile
from subword_nmt import learn_bpe, apply_bpe
# python scripts/build_bpe_subword_nmt.py --input_json data/dataset_coco.json --output_json data/cocotalkbpe.json --output_h5 data/cocotalkbpe
def build_vocab(imgs, params):
# count up the number of words
captions = []
for img in imgs:
for sent in img['sentences']:
captions.append(' '.join(sent['tokens']))
captions='\n'.join(captions)
all_captions = tempfile.NamedTemporaryFile(delete=False)
all_captions.close()
with open(all_captions.name, 'w') as txt_file:
txt_file.write(captions)
#
codecs_output = tempfile.NamedTemporaryFile(delete=False)
codecs_output.close()
with codecs.open(codecs_output.name, 'w', encoding='UTF-8') as output:
learn_bpe.learn_bpe(codecs.open(all_captions.name, encoding='UTF-8'), output, params['symbol_count'])
with codecs.open(codecs_output.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes)
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8')
for _, img in enumerate(imgs):
img['final_captions'] = []
for sent in img['sentences']:
txt = ' '.join(sent['tokens'])
txt = bpe.segment(txt).strip()
img['final_captions'].append(txt.split(' '))
tmpout.write(txt)
tmpout.write('\n')
if _ < 20:
print(txt)
tmpout.close()
tmpin = codecs.open(tmp.name, encoding='UTF-8')
vocab = learn_bpe.get_vocabulary(tmpin)
vocab = sorted(vocab.keys(), key=lambda x: vocab[x], reverse=True)
# Always insert UNK
print('inserting the special UNK token')
vocab.append('UNK')
print('Vocab size:', len(vocab))
os.remove(all_captions.name)
with open(codecs_output.name, 'r') as codes:
bpe = codes.read()
os.remove(codecs_output.name)
os.remove(tmp.name)
return vocab, bpe
def encode_captions(imgs, params, wtoi):
"""
encode all captions into one large array, which will be 1-indexed.
also produces label_start_ix and label_end_ix which store 1-indexed
and inclusive (Lua-style) pointers to the first and last caption for
each image in the dataset.
"""
max_length = params['max_length']
N = len(imgs)
M = sum(len(img['final_captions']) for img in imgs) # total number of captions
label_arrays = []
label_start_ix = np.zeros(N, dtype='uint32') # note: these will be one-indexed
label_end_ix = np.zeros(N, dtype='uint32')
label_length = np.zeros(M, dtype='uint32')
caption_counter = 0
counter = 1
for i,img in enumerate(imgs):
n = len(img['final_captions'])
assert n > 0, 'error: some image has no captions'
Li = np.zeros((n, max_length), dtype='uint32')
for j,s in enumerate(img['final_captions']):
label_length[caption_counter] = min(max_length, len(s)) # record the length of this sequence
caption_counter += 1
for k,w in enumerate(s):
if k < max_length:
Li[j,k] = wtoi[w]
# note: word indices are 1-indexed, and captions are padded with zeros
label_arrays.append(Li)
label_start_ix[i] = counter
label_end_ix[i] = counter + n - 1
counter += n
L = np.concatenate(label_arrays, axis=0) # put all the labels together
assert L.shape[0] == M, 'lengths don\'t match? that\'s weird'
assert np.all(label_length > 0), 'error: some caption had no words?'
print('encoded captions to array of size ', L.shape)
return L, label_start_ix, label_end_ix, label_length
def main(params):
imgs = json.load(open(params['input_json'], 'r'))
imgs = imgs['images']
seed(123) # make reproducible
# create the vocab
vocab, bpe = build_vocab(imgs, params)
itow = {i+1:w for i,w in enumerate(vocab)} # a 1-indexed vocab translation table
wtoi = {w:i+1 for i,w in enumerate(vocab)} # inverse table
# encode captions in large arrays, ready to ship to hdf5 file
L, label_start_ix, label_end_ix, label_length = encode_captions(imgs, params, wtoi)
# create output h5 file
N = len(imgs)
f_lb = h5py.File(params['output_h5']+'_label.h5', "w")
f_lb.create_dataset("labels", dtype='uint32', data=L)
f_lb.create_dataset("label_start_ix", dtype='uint32', data=label_start_ix)
f_lb.create_dataset("label_end_ix", dtype='uint32', data=label_end_ix)
f_lb.create_dataset("label_length", dtype='uint32', data=label_length)
f_lb.close()
# create output json file
out = {}
out['ix_to_word'] = itow # encode the (1-indexed) vocab
out['images'] = []
out['bpe'] = bpe
for i,img in enumerate(imgs):
jimg = {}
jimg['split'] = img['split']
if 'filename' in img: jimg['file_path'] = os.path.join(img['filepath'], img['filename']) # copy it over, might need
if 'cocoid' in img: jimg['id'] = img['cocoid'] # copy over & mantain an id, if present (e.g. coco ids, useful)
if params['images_root'] != '':
with Image.open(os.path.join(params['images_root'], img['filepath'], img['filename'])) as _img:
jimg['width'], jimg['height'] = _img.size
out['images'].append(jimg)
json.dump(out, open(params['output_json'], 'w'))
print('wrote ', params['output_json'])
if __name__ == "__main__":
parser = argparse.ArgumentParser()
# input json
parser.add_argument('--input_json', required=True, help='input json file to process into hdf5')
parser.add_argument('--output_json', default='data.json', help='output json file')
parser.add_argument('--output_h5', default='data', help='output h5 file')
parser.add_argument('--images_root', default='', help='root location in which images are stored, to be prepended to file_path in input json')
# options
parser.add_argument('--max_length', default=16, type=int, help='max length of a caption, in number of words. captions longer than this get clipped.')
parser.add_argument('--symbol_count', default=10000, type=int, help='only words that occur more than this number of times will be put in vocab')
args = parser.parse_args()
params = vars(args) # convert to ordinary dict
print('parsed input parameters:')
print(json.dumps(params, indent = 2))
main(params)
|
connect-caption-and-trace-main
|
scripts/build_bpe_subword_nmt.py
|
"""
Preprocess PubTator snapshot for use with Snorkel v6.2
1) Download snapshot
2) Split snapshot into blocks
3) Parse blocks and load into database
"""
import os
import sys
import glob
import shutil
import argparse
from time import time
def split_pubtator_corpus(file_path, split_size=500000):
"""
Split PubTator snapshot into blocks of size num_docs
:return:
"""
try:
fp = file_path
nd = split_size
ns = None
# Create directory for the splits
SPLIT_DIR = "%s.splits_%s/" % (fp, nd)
if os.path.exists(SPLIT_DIR):
shutil.rmtree(SPLIT_DIR)
os.makedirs(SPLIT_DIR)
ns_print = ns if ns else ""
print "Splitting %s into %s splits of %s docs each, saving splits in %s" % (fp, ns_print, nd, SPLIT_DIR)
except:
print "USAGE: python split_pubtator_file.py FPATH NDOCS_PER_SPLIT MAX_SPLITS"
sys.exit(1)
with open(fp, 'rb') as f:
s = 0
d = 0
f_out = open(SPLIT_DIR + 'split_%s' % s, 'wb')
for line in f:
f_out.write(line)
if len(line.strip()) == 0:
d += 1
if d % nd == 0:
f_out.close()
s += 1
if ns is None or s < ns:
f_out = open(SPLIT_DIR + 'split_%s' % s, 'wb')
else:
break
f_out.close()
print "Split %s." % d
def main(args):
session = SnorkelSession()
# ---------------------------------------
# 1: Split into blocks
# ---------------------------------------
split_pubtator_corpus(args.input_file, split_size=args.split_size)
# ---------------------------------------
# 2: Parse documents
# ---------------------------------------
filelist = glob.glob("{}.splits_{}/*".format(args.input_file,args.split_size))
# Iterate through the splits
start_ts = time()
for fp in filelist:
doc_preprocessor = PubTatorDocPreprocessor(fp)
parser = Spacy() if args.parser == "spacy" else StanfordCoreNLPServer()
corpus_parser = CorpusParser(parser=parser)
corpus_parser.apply(doc_preprocessor, parallelism=args.num_procs, clear=False)
end_ts = time()
print "Split completed in [%s]" % (time() - end_ts,)
# pubtator_tags = PubTatorTagProcessor()
# for fp in filelist:
# # load entity tags
# pubtator_tags.load_data(session, fp)
print "\nDONE in [%s]" % (time() - start_ts,)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-d", "--dbname", type=str, default="postgresql:///biocorpus", help="SNORKELDB enviorn variable")
argparser.add_argument("-i", "--input_file", type=str, default="data/bioconcepts2pubtator_offsets.sample",
help="PubTator snapshot")
argparser.add_argument("-s", "--split_size", type=int, default=50000, help="Number of documents per split")
argparser.add_argument("-n", "--num_procs", type=int, default=1, help="Number of processes")
argparser.add_argument("-p", "--parser", action='store', choices=['spacy', 'corenlp'],
default='spacy', help="parser choice")
args = argparser.parse_args()
os.environ['SNORKELDB'] = args.dbname
os.environ['TIKA_LOG_PATH'] = "."
from snorkel import SnorkelSession
from snorkel.parser import CorpusParser, Spacy, StanfordCoreNLPServer
from pubtator import PubTatorDocPreprocessor, PubTatorTagProcessor
main(args)
|
snorkel-biocorpus-master
|
parse_pubtator.py
|
import re
import sys
from itertools import product
from sqlalchemy.sql import select
from collections import defaultdict
from snorkel.udf import UDF, UDFRunner
from snorkel.models import TemporarySpan, Sentence, Document, SequenceTag, Candidate
class SequenceTagCandidateExtractor(UDFRunner):
"""UDFRunner for SequenceTagCandidateExtractorUDF"""
def __init__(self, candidate_class, entity_types, tag_sources=['.*'], self_relations=False,
nested_relations=False, symmetric_relations=True):
super(SequenceTagCandidateExtractor, self).__init__(
SequenceTagCandidateExtractorUDF, candidate_class=candidate_class,
entity_types=entity_types, tag_sources=tag_sources, self_relations=self_relations,
nested_relations=nested_relations, symmetric_relations=symmetric_relations,
)
def apply(self, xs, split=0, **kwargs):
super(SequenceTagCandidateExtractor, self).apply(xs, split=split, **kwargs)
def clear(self, session, split, **kwargs):
session.query(Candidate).filter(Candidate.split == split).delete()
class SequenceTagCandidateExtractorUDF(UDF):
"""
An extractor for pre-tagged entities, stored as SequenceTag objects
"""
def __init__(self, candidate_class, entity_types, tag_sources=['.*'], self_relations=False,
nested_relations=False, symmetric_relations=False, **kwargs):
self.candidate_class = candidate_class
self.entity_types = entity_types
self.tag_sources = [re.compile(p) for p in tag_sources]
self.arity = len(entity_types)
self.self_relations = self_relations
self.nested_relations = nested_relations
self.symmetric_relations = symmetric_relations
self.cache = {}
super(SequenceTagCandidateExtractorUDF, self).__init__(**kwargs)
def _map_annotations(self, doc, tags):
"""
Take sequence tags, defined by absolute char offsets, and map to sentence/span objects
:param:
:param:
:return tuple of sentence index and tag, (int, SequenceTag)
"""
spans = []
char_index = [s.abs_char_offsets[0] for s in doc.sentences]
for t in tags:
position = None
for i in range(len(char_index) - 1):
if t.abs_char_start >= char_index[i] and t.abs_char_end <= char_index[i+1]:
position = i
break
if position == None and t.abs_char_start >= char_index[-1]:
position = len(char_index) - 1
if position == None:
values = (doc.name, doc.id, t.abs_char_start, t.abs_char_end)
sys.stderr.write("Warning! Skipping cross-sentence mention [{}] {} {}:{} \n".format(*values))
continue
try:
shift = doc.sentences[position].abs_char_offsets[0]
span = doc.sentences[position].text[t.abs_char_start-shift:t.abs_char_end-shift]
spans.append((position, t))
except Exception as e:
print "Error!",e
return spans
def apply(self, context, clear, split, check_for_existing=True, **kwargs):
"""Extract Candidates from a Context"""
# For now, just handle Sentences
if not isinstance(context, Sentence):
raise NotImplementedError("%s is currently only implemented for Sentence contexts." % self.__name__)
# Load and remap this entire parent document's tag set
if context.document.id not in self.cache:
tags = self.session.query(SequenceTag).filter(SequenceTag.document_id==context.document.id).all()
# filter to 1) target concept/entity types and 2) target sources (e.g., PutTator, TaggerOne)
tags = [t for t in tags if t.concept_type in set(self.entity_types)]
tags = [t for t in tags if len([rgx.search(t.source) for rgx in self.tag_sources]) > 0]
tags = self._map_annotations(context.document, tags)
self.cache[context.document.id] = defaultdict(list)
for position, tag in tags:
self.cache[context.document.id][position].append(tag)
# no tags for this Sentence
if context.position not in self.cache[context.document.id]:
return
spans = self.cache[context.document.id][context.position]
#del self.cache[context.document.id][context.position]
entity_spans = defaultdict(list)
entity_cids = {}
# create temp spans
offsets = [context.document.sentences[i].abs_char_offsets[0] for i in range(len(context.document.sentences))]
i = context.position
for tag in spans:
char_start, char_end = tag.abs_char_start - offsets[i], tag.abs_char_end - offsets[i]
tc = TemporarySpan(char_start=char_start, char_end=char_end - 1, sentence=context.document.sentences[i])
tc.load_id_or_insert(self.session)
entity_cids[tc.id] = tag.concept_uid
entity_spans[tag.concept_type].append(tc)
# Generates and persists candidates
candidate_args = {'split' : split}
for args in product(*[enumerate(entity_spans[et]) for et in self.entity_types]):
if self.arity == 2:
ai, a = args[0]
bi, b = args[1]
# Check for self-joins, "nested" joins (joins from span to its subspan),
# and flipped duplicate "symmetric" relations
if not self.self_relations and a == b:
continue
elif not self.nested_relations and (a in b or b in a):
continue
elif not self.symmetric_relations and ai > bi:
continue
# Assemble candidate arguments
for i, arg_name in enumerate(self.candidate_class.__argnames__):
candidate_args[arg_name + '_id'] = args[i][1].id
candidate_args[arg_name + '_cid'] = entity_cids[args[i][1].id]
# Checking for existence
if check_for_existing:
q = select([self.candidate_class.id])
for key, value in candidate_args.items():
q = q.where(getattr(self.candidate_class, key) == value)
candidate_id = self.session.execute(q).first()
if candidate_id is not None:
continue
# Add Candidate to session
yield self.candidate_class(**candidate_args)
|
snorkel-biocorpus-master
|
custom_cand_generator.py
|
class Tagger(object):
pass
|
snorkel-biocorpus-master
|
pubtator/tags.py
|
import re
import sys
import codecs
import lxml.etree as et
from collections import namedtuple
from snorkel.models import Document, SequenceTag
Tag = namedtuple('Tag', 'document_id abs_char_start abs_char_end concept_type concept_uid source')
class MetadataProcessor(object):
"""
Load external information
"""
def __init__(self, name, encoding="utf-8"):
self.name = name
self.encoding = encoding
class PubMedMetadataProcessor(MetadataProcessor):
"""
Load external information
"""
def __init__(self, name="medline_pubmed", encoding="utf-8"):
"""
:param name:
:param encoding:
"""
super(PubMedMetadataProcessor, self).__init__(name, encoding)
self.mappings = {"issn": {}, "mesh_heading": {}}
def load_data(self, session, file_path):
"""
:param session:
:param file_path:
:return:
"""
# target fields for manually extracted data
doc_xpath = './/PubmedArticle'
date_xpath = './MedlineCitation/DateCompleted'
id_xpath = './MedlineCitation/PMID/text()'
journal_issn_xpath = "./MedlineCitation/Article/Journal/ISSN/text()"
journal_title_xpath = "./MedlineCitation/Article/Journal/Title/text()"
mesh_xpath = "./MedlineCitation/MeshHeadingList/MeshHeading"
doc_metadata = {}
for i, doc in enumerate(et.parse(file_path).xpath(doc_xpath)):
fields = []
try:
pmid = doc.xpath(id_xpath)[0]
# MEDLINE/PubMed date (later than actual publication?)
for elem in doc.xpath(date_xpath):
ts = [(child.tag, child.text) for child in elem.getchildren()]
fields.extend(ts)
# The ISSN (International Standard Serial Number) identifies resources (e.g., journals)
issn = doc.xpath(journal_issn_xpath)[0] if doc.xpath(journal_issn_xpath) else None
title = doc.xpath(journal_title_xpath)[0] if doc.xpath(journal_title_xpath) else None
if issn:
fields.append(('ISSN', issn))
self.mappings['issn'][issn] = title
# Medical Subject Headings
for elem in doc.xpath(mesh_xpath):
for child in elem.getchildren():
ui = child.xpath("./@UI")[0]
major_topic = child.xpath("@MajorTopicYN")[0]
name = "MeshMinor" if major_topic == 'N' else "MeshMajor"
fields.append((name, ui))
self.mappings['mesh_heading'][ui] = child.xpath("text()")[0]
# Store the rest as the raw XML
# TODO
doc_metadata[pmid] = fields
except Exception as e:
print "Error! {}".format(e)
print et.tostring(doc)
print "-" * 100
# build doc_name index
name2id = dict(session.query(Document.name, Document.id).all())
# create ContextAttribute
attributes = []
for doc_id in doc_metadata:
if doc_id not in name2id:
sys.stderr.write("{} not in database, skipping labels...".format(doc_id))
continue
for name,value in doc_metadata[pmid]:
attributes.append(ContextAttribute(document_id=name2id[doc_id], name=name, value=value))
# commit to database
try:
session.bulk_save_objects(attributes)
session.commit()
print("Loaded {} tags...".format(len(attributes)))
except Exception as e:
print "ERROR! {}".format(e)
class PubTatorTagProcessor(MetadataProcessor):
"""
Load PubTator tags
"""
def __init__(self, name="PubTator", encoding="utf-8"):
super(PubTatorTagProcessor, self).__init__(name, encoding)
self.concept_types = {
"Gene": "GNormPlus",
"Disease": "DNorm",
"Chemical": "tmChem",
"Species": "SR4GN",
"DNAMutation": "tmVar",
"ProteinMutation": "tmVar",
"SNP": "tmVar"
}
self.concept_types = {c_type:self.name + "_" + src for c_type, src in self.concept_types.items()}
def load_data(self, session, file_path, name2id = None):
# build doc_name index
if not name2id:
name2id = dict(session.query(Document.name, Document.id).all())
tags = set()
for content in self._doc_generator(file_path, self.encoding):
doc_name, annotations = self._parse(content)
if doc_name not in name2id:
sys.stderr.write("{} not in database, skipping labels...\n".format(doc_name))
continue
for anno in annotations:
doc_name, start, end, mention, concept_type, concept_uid = anno
src = self.concept_types[concept_type]
t = Tag(document_id=name2id[doc_name], abs_char_start=start, abs_char_end=end,
concept_type=concept_type, concept_uid=concept_uid, source=src)
tags.add(t)
return tags
def commit(self, session, file_path):
"""
:param session:
:param file_path:
:return:
"""
tags = self.load_data(session, file_path)
try:
seq_tags = []
while tags:
t = tags.pop()
seq_tags.append(SequenceTag(**t))
del t
session.bulk_save_objects(seq_tags)
session.commit()
print("Loaded {} tags...".format(len(seq_tags)))
except Exception as e:
print "ERROR! {}".format(e)
def _parse(self, content):
"""
:param content:
:return:
"""
# First line is the title
split = re.split(r'\|', content[0].rstrip(), maxsplit=2)
doc_id = split[0]
# Rest of the lines are annotations
annos = []
for line in content[2:]:
anno = line.rstrip('\n').rstrip('\r').split('\t')
if anno[3] == 'NO ABSTRACT':
continue
else:
# Handle cases where no CID is provided...
if len(anno) == 5:
anno.append("")
# Handle leading / trailing whitespace
if anno[3].lstrip() != anno[3]:
d = len(anno[3]) - len(anno[3].lstrip())
anno[1] = int(anno[1]) + d
anno[3] = anno[3].lstrip()
if anno[3].rstrip() != anno[3]:
d = len(anno[3]) - len(anno[3].rstrip())
anno[2] = int(anno[2]) - d
anno[3] = anno[3].rstrip()
annos.append(anno)
return doc_id, annos
def _doc_generator(self, file_path, encoding="utf-8"):
"""
PubTator docs are of the form:
UID|TITLE|TEXT
UID|ABSTRACT|TEXT
UID SPAN MENTION ENTITY_TYPE MESH_ID
...
See -- data/bioconcepts2pubtator_offsets.sample
"""
with codecs.open(file_path, "rU", encoding=encoding) as fp:
lines = []
for line in fp:
if len(line.rstrip()) == 0:
if len(lines) > 0:
yield lines
lines = []
else:
lines.append(line)
if len(lines) > 0:
print "**",lines
yield lines
|
snorkel-biocorpus-master
|
pubtator/metadata.py
|
from .parsers import *
from .metadata import *
|
snorkel-biocorpus-master
|
pubtator/__init__.py
|
import re
import codecs
from snorkel.parser import DocPreprocessor
from snorkel.models import Document, split_stable_id
from snorkel.parser import Parser, ParserConnection, Spacy, Sentence
class PubTatorParser(Parser):
"""
Parser wrapper for PubTator annotations. Annotations require some
data munging to map from PubTator to whatever parse is generatered by
our input parser.
"""
ENTITY_SEP = '~@~'
STD_SPLITS_RGX = r'[\s\t\-\/\.]*'
def __init__(self, parser=Spacy(lang='en'), stop_on_err=True, encoding="utf-8"):
super(PubTatorParser, self).__init__(name="PubTatorParser", encoding=encoding)
self.parser = parser
self.stop_on_err = stop_on_err
def _scrub(self, mention):
m = re.sub(r'\'\'|``', '"', mention)
m = re.sub(r'`', "'", m)
return m
def _check_match(self, mention, toks):
"""Check if a string mention matches a list of tokens, without knowledge of token splits"""
return re.match(PubTatorParser.STD_SPLITS_RGX.join(re.escape(self._scrub(t)) for t in toks) +
PubTatorParser.STD_SPLITS_RGX + r'$', self._scrub(mention)) is not None
def _throw_error(self, sentence_parts, mention, toks, msg="Couldn't find match!"):
print sentence_parts
print "Annotation:\t'%s'" % mention
print "Tagged:\t'%s'" % ' '.join(toks)
if self.stop_on_err:
raise ValueError(msg)
else:
print 'WARNING:', msg
def _mark_matched_annotation(self, wi, we, sentence_parts, cid, cid_type):
for j in range(wi, we):
if sentence_parts['entity_cids'][j] == 'O':
sentence_parts['entity_cids'][j] = cid
sentence_parts['entity_types'][j] = cid_type
# Pipe-concatenate multiple labels!
else:
sentence_parts['entity_cids'][j] += PubTatorParser.ENTITY_SEP + cid
sentence_parts['entity_types'][j] += PubTatorParser.ENTITY_SEP + cid_type
def _split_token(self, sentence_parts, abs_offsets, tok_idx, char_idx, mention, toks, left_tok=True):
"""
Split a token, splitting the rest of the parse appropriately as well
Note that this may not result in a correct pos tag split, and dep tree will no longer be a tree...
If target_left=True, then do not include the split character with the left split; vice versa for False
"""
split_word = sentence_parts['words'][tok_idx]
split_pt = char_idx - abs_offsets[tok_idx]
split_char = split_word[split_pt]
# Decide whether to preserve split or not...
keep_split = re.match(PubTatorParser.STD_SPLITS_RGX + r'$', split_char) is None
lsplit_pt = split_pt if not keep_split or left_tok else split_pt + 1
rsplit_pt = split_pt if keep_split and left_tok else split_pt + 1
# Split CoreNLP token
N = len(sentence_parts['words'])
for k, v in sentence_parts.iteritems():
if isinstance(v, list) and len(v) == N:
token = v[tok_idx]
# If words or lemmas, split the word/lemma
# Note that we're assuming (anc checking) that lemmatization does not
# affect the split point
if k in ['words', 'lemmas']:
# Note: don't enforce splitting for lemmas if index is not in range
# Essentially, this boils down to assuming that the split will either be correct,
# or lemmatization will have chopped the split portion off already
if k == 'lemmas' and split_pt > len(token):
sentence_parts[k][tok_idx] = ''
sentence_parts[k].insert(tok_idx, token)
else:
sentence_parts[k][tok_idx] = token[rsplit_pt:]
sentence_parts[k].insert(tok_idx, token[:lsplit_pt])
elif k == 'char_offsets':
sentence_parts[k][tok_idx] = token + rsplit_pt
sentence_parts[k].insert(tok_idx, token)
# Otherwise, just duplicate the split token's value
else:
sentence_parts[k].insert(tok_idx, token)
def connect(self):
return ParserConnection(self)
def parse(self, doc, text):
annotations = doc.meta["annotations"]
# Track how many annotations are correctly matches
sents = []
matched_annos = []
# Parse the document, iterating over dictionary-form Sentences
for sentence_parts in self.parser.parse(doc, text):
try:
_, _, start, end = split_stable_id(sentence_parts['stable_id'])
# Try to match with annotations
# If we don't get a start / end match, AND there is a split character between, we split the
# token and *modify the CoreNLP parse* here!
for i, anno in enumerate(annotations):
_, s, e, mention, cid_type, cid = anno
si = int(s)
ei = int(e)
# Consider annotations that are in this sentence
if si >= start and si < end:
# We assume mentions are contained within a single sentence, otherwise we skip
# NOTE: This is the one type of annotation we do *not* include!
if ei > end + 1:
print "Skipping cross-sentence mention '%s'" % mention
matched_annos.append(i)
continue
# Get absolute char offsets, i.e. relative to document start
# Note: this needs to be re-calculated each time in case we split the sentence!
abs_offsets = [co + start for co in sentence_parts['char_offsets']]
# Get closest end match; note we assume that the end of the tagged span may be
# *shorter* than the end of a token
we = 0
while we < len(abs_offsets) and abs_offsets[we] < ei:
we += 1
# Handle cases where we *do not* match the start token first by splitting start token
if si not in abs_offsets:
wi = 0
while wi < len(abs_offsets) and abs_offsets[wi + 1] < si:
wi += 1
words = [sentence_parts['words'][j] for j in range(wi, we)]
# Split the start token
try:
self._split_token(sentence_parts, abs_offsets, wi, si - 1, mention, words, left_tok=False)
except IndexError:
self._throw_error(sentence_parts, mention, words, msg="Token split error")
matched_annos.append(i)
continue
# Adjust abs_offsets, wi and we appropriately
abs_offsets = [co + start for co in sentence_parts['char_offsets']]
wi += 1
we += 1
wi = abs_offsets.index(si)
words = [sentence_parts['words'][j] for j in range(wi, we)]
# Full exact match- mark and continue
if self._check_match(mention, words):
matched_annos.append(i)
self._mark_matched_annotation(wi, we, sentence_parts, cid, cid_type)
continue
# Truncated ending
else:
try:
self._split_token(sentence_parts, abs_offsets, we - 1, ei, mention, words)
except IndexError:
self._throw_error(sentence_parts, mention, words, msg="Token split error")
matched_annos.append(i)
continue
# Register and confirm match
words = [sentence_parts['words'][j] for j in range(wi, we)]
if self._check_match(mention, words):
matched_annos.append(i)
self._mark_matched_annotation(wi, we, sentence_parts, cid, cid_type)
else:
self._throw_error(sentence_parts, mention, words)
matched_annos.append(i)
continue
yield sentence_parts
sents.append(sentence_parts)
except Exception as e:
print "WARNING: parsing exception {} in document {}".format(e, doc.name)
# Check if we got everything
if len(annotations) != len(matched_annos):
print "Annotations:"
print annotations
print "Matched annotations:"
print matched_annos
print "\n"
for i in set(range(len(annotations))).difference(matched_annos):
print annotations[i]
print "\n"
for sent in sents:
print sent["stable_id"], sent["words"], sent["char_offsets"]
print "\n"
if self.stop_on_err:
raise Exception("Annotations missed!")
else:
print "WARNING: Annotations missed!"
class PubTatorDocPreprocessor(DocPreprocessor):
"""
Load PubTator annotation snapshot
Entity Tags:
* Genes -- GNormPlus http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/GNormPlus/
* Diseases -- DNorm http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/DNorm/
* Chemicals -- tmChem http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/tmChem/
* Species -- SR4GN http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/downloads/SR4GN/
* Mutations -- tmVar http://www.ncbi.nlm.nih.gov/CBBresearch/Lu/pub/tmVar/
Live demo:
https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/PubTator/
Latest snapshot available at:
ftp.ncbi.nlm.nih.gov/pub/lu/PubTator/bioconcepts2pubtator_offsets.gz
"""
def __init__(self, path, encoding="utf-8", max_docs=float('inf'), filter_id_set=None, annotations=False):
"""
:param path:
:param encoding:
:param max_docs:
:param filter_id_set:
"""
super(PubTatorDocPreprocessor,self).__init__(path, encoding="utf-8", max_docs=float('inf'))
self.filter_id_set = filter_id_set
self.annotations = annotations
def _pubtator_parser(self, content):
"""
:param content:
:return:
"""
# First line is the title
split = re.split(r'\|', content[0].rstrip(), maxsplit=2)
doc_id = int(split[0])
stable_id = self.get_stable_id(doc_id)
doc_text = split[2]
# Second line is the abstract
# Assume these are newline-separated; is this true?
# Note: some articles do not have abstracts, however they still have this line
doc_text += ' ' + re.split(r'\|', content[1].rstrip(), maxsplit=2)[2]
# Rest of the lines are annotations
annos = []
for line in content[2:]:
anno = line.rstrip('\n').rstrip('\r').split('\t')
if anno[3] == 'NO ABSTRACT':
continue
else:
# Handle cases where no CID is provided...
if len(anno) == 5:
anno.append("")
# Handle leading / trailing whitespace
if anno[3].lstrip() != anno[3]:
d = len(anno[3]) - len(anno[3].lstrip())
anno[1] = int(anno[1]) + d
anno[3] = anno[3].lstrip()
if anno[3].rstrip() != anno[3]:
d = len(anno[3]) - len(anno[3].rstrip())
anno[2] = int(anno[2]) - d
anno[3] = anno[3].rstrip()
annos.append(anno)
# Form a Document
doc = Document(
name=doc_id, stable_id=stable_id,
meta={}
)
# Return the doc
return doc, doc_text, annos
def _doc_generator(self, file_path, encoding="utf-8"):
"""
PubTator docs are of the form:
UID|TITLE|TEXT
UID|ABSTRACT|TEXT
UID SPAN MENTION ENTITY_TYPE MESH_ID
...
See -- data/bioconcepts2pubtator_offsets.sample
"""
with codecs.open(file_path, "rU", encoding=encoding) as fp:
lines = []
for line in fp:
if len(line.rstrip()) == 0:
if len(lines) > 0:
# filter docs to target set
doc_id = re.split(r'\|', lines[0].rstrip(), maxsplit=2)
if not self.filter_id_set or (self.filter_id_set and doc_id in self.filter_id_set):
yield lines
lines = []
else:
lines.append(line)
def parse_file(self, file_path, file_name):
"""
Parse abstracts
:param file_path:
:param file_name:
:return:
"""
for content in self._doc_generator(file_path, self.encoding):
doc, txt, annos = self._pubtator_parser(content)
if self.annotations:
yield doc, txt, annos
else:
yield doc, txt
|
snorkel-biocorpus-master
|
pubtator/parsers.py
|
import codecs
import spacy
from collections import defaultdict
from snorkel.models import construct_stable_id
from spacy.tokens import Doc
from snorkel.parser import DocPreprocessor
from snorkel.models import Document, split_stable_id
from snorkel.parser import Parser, ParserConnection, Spacy, Sentence
class LineCorpusParser(Parser):
"""
Slight modification of SpaCy parser to allow whitespace tokenization and manual
sentence boundary detection (SBD). Due to the clunky way SpaCy deals with SBD,
we just implement a new class to deal with things.
"""
def __init__(self, annotators=['tagger', 'parser', 'entity'],
lang='en', num_threads=1, tokenize_on_whitespace=True, verbose=False):
super(LineCorpusParser, self).__init__(name="line_space_corpus")
self.model = spacy.load(lang, create_make_doc=WhitespaceTokenizer) if tokenize_on_whitespace else \
spacy.load(lang)
self.num_threads = num_threads
self.pipeline = []
for proc in annotators:
self.pipeline += [self.model.__dict__[proc]]
def _original_string(self, tokens, offsets):
"""
Recreate string with original char offsets
:param tokens:
:param offsets:
:return:
"""
s = ""
for t, i in zip(tokens, offsets):
diff = i - len(s)
if diff:
s += ' ' * diff
s += t
return s
def connect(self):
return ParserConnection(self)
def parse(self, document, text):
'''
Transform spaCy output to match Snorkel's default format
:param document:
:param text:
:return:
'''
offsets, sents = zip(*text)
sents = map(self.to_unicode, list(sents))
sents = map(Parser.strip_null_bytes, list(sents))
# parse each individual sentence
position = 0
sentences = []
for abs_char_offsets, text in zip(offsets, sents):
parts = defaultdict(list)
doc = self.model.make_doc(text)
for proc in self.pipeline:
proc(doc)
assert doc.is_parsed
# recreate original text (with correct offsets)
char_offsets = [i - abs_char_offsets[0] for i in abs_char_offsets]
text = self._original_string(text.split(), char_offsets)
for sent in list(doc.sents):
for i,token in enumerate(sent):
parts['words'].append(str(token))
parts['lemmas'].append(token.lemma_)
parts['pos_tags'].append(token.tag_)
parts['ner_tags'].append(token.ent_type_ if token.ent_type_ else 'O')
head_idx = 0 if token.head is token else token.head.i - sent[0].i + 1
parts['dep_parents'].append(head_idx)
parts['dep_labels'].append(token.dep_)
# Add null entity array (matching null for CoreNLP)
parts['entity_cids'] = ['O' for _ in parts['words']]
parts['entity_types'] = ['O' for _ in parts['words']]
# make char_offsets relative to start of sentence
parts['char_offsets'] = [
p - parts['char_offsets'][0] for p in parts['char_offsets']
]
parts['position'] = position
# Link the sentence to its parent document object
parts['document'] = document
parts['text'] = text
# Add null entity array (matching null for CoreNLP)
parts['entity_cids'] = ['O' for _ in parts['words']]
parts['entity_types'] = ['O' for _ in parts['words']]
parts['char_offsets'] = char_offsets
parts['abs_char_offsets'] = abs_char_offsets
# Assign the stable id as document's stable id plus absolute
# character offset
abs_sent_offset = abs_char_offsets[0]
abs_sent_offset_end = abs_sent_offset + char_offsets[-1] + len(parts['words'][-1])
if document:
parts['stable_id'] = construct_stable_id(document, 'sentence', abs_sent_offset, abs_sent_offset_end)
position += 1
yield parts
class WhitespaceTokenizer(object):
def __init__(self, nlp):
self.vocab = nlp.vocab
def __call__(self, text):
words = text.split(' ')
spaces = [True] * len(words)
return Doc(self.vocab, words=words, spaces=spaces)
class PretokenizedDocPreprocessor(DocPreprocessor):
"""
Assumes text has already been preprocessed for:
- Sentence Boundary Detection (SBD)
- Word tokenization
Format:
DOCUMENT_ID SENTENCE
"""
def _doc_generator(self, file_path, encoding="utf-8"):
"""
"""
with codecs.open(file_path, "rU", encoding=encoding) as fp:
curr = None
lines = []
for line in fp:
s = line.strip().split("\t")
doc_name = s[0]
s[1] = int(s[1])
s[2] = map(int, s[2].split(","))
if curr == doc_name or curr == None:
lines.append(s)
curr = doc_name
elif curr != doc_name:
yield lines
curr = doc_name
lines = [s]
if lines:
yield lines
def _line_corpus_parser(self, content):
"""
:param content:
:return:
"""
doc_name = None
sents = []
for line in content:
doc_name = line[0]
sents.append(line[2:])
stable_id = self.get_stable_id(doc_name)
# Form a Document
doc = Document(
name=doc_name, stable_id=stable_id,
meta={}
)
# Return the doc
return doc, sents
def parse_file(self, file_path, file_name):
"""
Parse abstracts
:param file_path:
:param file_name:
:return:
"""
for content in self._doc_generator(file_path, self.encoding):
doc, txt = self._line_corpus_parser(content)
yield doc, txt
|
snorkel-biocorpus-master
|
pubtator/doc_parsers.py
|
"""An extensible library for opening URLs using a variety of protocols
The simplest way to use this module is to call the urlopen function,
which accepts a string containing a URL or a Request object (described
below). It opens the URL and returns the results as file-like
object; the returned object has some extra methods described below.
The OpenerDirector manages a collection of Handler objects that do
all the actual work. Each Handler implements a particular protocol or
option. The OpenerDirector is a composite object that invokes the
Handlers needed to open the requested URL. For example, the
HTTPHandler performs HTTP GET and POST requests and deals with
non-error returns. The HTTPRedirectHandler automatically deals with
HTTP 301, 302, 303 and 307 redirect errors, and the HTTPDigestAuthHandler
deals with digest authentication.
urlopen(url, data=None) -- Basic usage is the same as original
urllib. pass the url and optionally data to post to an HTTP URL, and
get a file-like object back. One difference is that you can also pass
a Request instance instead of URL. Raises a URLError (subclass of
IOError); for HTTP errors, raises an HTTPError, which can also be
treated as a valid response.
build_opener -- Function that creates a new OpenerDirector instance.
Will install the default handlers. Accepts one or more Handlers as
arguments, either instances or Handler classes that it will
instantiate. If one of the argument is a subclass of the default
handler, the argument will be installed instead of the default.
install_opener -- Installs a new opener as the default opener.
objects of interest:
OpenerDirector -- Sets up the User Agent as the Python-urllib client and manages
the Handler classes, while dealing with requests and responses.
Request -- An object that encapsulates the state of a request. The
state can be as simple as the URL. It can also include extra HTTP
headers, e.g. a User-Agent.
BaseHandler --
exceptions:
URLError -- A subclass of IOError, individual protocols have their own
specific subclass.
HTTPError -- Also a valid HTTP response, so you can treat an HTTP error
as an exceptional event or valid response.
internals:
BaseHandler and parent
_call_chain conventions
Example usage:
import urllib2
# set up authentication info
authinfo = urllib2.HTTPBasicAuthHandler()
authinfo.add_password(realm='PDQ Application',
uri='https://mahler:8092/site-updates.py',
user='klem',
passwd='geheim$parole')
proxy_support = urllib2.ProxyHandler({"http" : "http://ahad-haam:3128"})
# build a new opener that adds authentication and caching FTP handlers
opener = urllib2.build_opener(proxy_support, authinfo, urllib2.CacheFTPHandler)
# install it
urllib2.install_opener(opener)
f = urllib2.urlopen('http://www.python.org/')
"""
# XXX issues:
# If an authentication error handler that tries to perform
# authentication for some reason but fails, how should the error be
# signalled? The client needs to know the HTTP error code. But if
# the handler knows that the problem was, e.g., that it didn't know
# that hash algo that requested in the challenge, it would be good to
# pass that information along to the client, too.
# ftp errors aren't handled cleanly
# check digest against correct (i.e. non-apache) implementation
# Possible extensions:
# complex proxies XXX not sure what exactly was meant by this
# abstract factory for opener
import base64
import hashlib
import httplib
import mimetools
import os
import posixpath
import random
import re
import socket
import sys
import time
import urlparse
import bisect
import warnings
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
# check for SSL
try:
import ssl
except ImportError:
_have_ssl = False
else:
_have_ssl = True
from urllib import (unwrap, unquote, splittype, splithost, quote,
addinfourl, splitport, splittag, toBytes,
splitattr, ftpwrapper, splituser, splitpasswd, splitvalue)
# support for FileHandler, proxies via environment variables
from urllib import localhost, url2pathname, getproxies, proxy_bypass
# used in User-Agent header sent
__version__ = sys.version[:3]
_opener = None
def urlopen(url, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT,
cafile=None, capath=None, cadefault=False, context=None):
global _opener
if cafile or capath or cadefault:
if context is not None:
raise ValueError(
"You can't pass both context and any of cafile, capath, and "
"cadefault"
)
if not _have_ssl:
raise ValueError('SSL support not available')
context = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH,
cafile=cafile,
capath=capath)
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif context:
https_handler = HTTPSHandler(context=context)
opener = build_opener(https_handler)
elif _opener is None:
_opener = opener = build_opener()
else:
opener = _opener
return opener.open(url, data, timeout)
def install_opener(opener):
global _opener
_opener = opener
# do these error classes make sense?
# make sure all of the IOError stuff is overridden. we just want to be
# subtypes.
class URLError(IOError):
# URLError is a sub-type of IOError, but it doesn't share any of
# the implementation. need to override __init__ and __str__.
# It sets self.args for compatibility with other EnvironmentError
# subclasses, but args doesn't have the typical format with errno in
# slot 0 and strerror in slot 1. This may be better than nothing.
def __init__(self, reason):
self.args = reason,
self.reason = reason
def __str__(self):
return '<urlopen error %s>' % self.reason
class HTTPError(URLError, addinfourl):
"""Raised when HTTP error occurs, but also acts like non-error return"""
__super_init = addinfourl.__init__
def __init__(self, url, code, msg, hdrs, fp):
self.code = code
self.msg = msg
self.hdrs = hdrs
self.fp = fp
self.filename = url
# The addinfourl classes depend on fp being a valid file
# object. In some cases, the HTTPError may not have a valid
# file object. If this happens, the simplest workaround is to
# not initialize the base classes.
if fp is not None:
self.__super_init(fp, hdrs, url, code)
def __str__(self):
return 'HTTP Error %s: %s' % (self.code, self.msg)
# since URLError specifies a .reason attribute, HTTPError should also
# provide this attribute. See issue13211 fo discussion.
@property
def reason(self):
return self.msg
def info(self):
return self.hdrs
# copied from cookielib.py
_cut_port_re = re.compile(r":\d+$")
def request_host(request):
"""Return request-host, as defined by RFC 2965.
Variation from RFC: returned value is lowercased, for convenient
comparison.
"""
url = request.get_full_url()
host = urlparse.urlparse(url)[1]
if host == "":
host = request.get_header("Host", "")
# remove port, if present
host = _cut_port_re.sub("", host, 1)
return host.lower()
class Request:
def __init__(self, url, data=None, headers={},
origin_req_host=None, unverifiable=False):
# unwrap('<URL:type://host/path>') --> 'type://host/path'
self.__original = unwrap(url)
self.__original, self.__fragment = splittag(self.__original)
self.type = None
# self.__r_type is what's left after doing the splittype
self.host = None
self.port = None
self._tunnel_host = None
self.data = data
self.headers = {}
for key, value in headers.items():
self.add_header(key, value)
self.unredirected_hdrs = {}
if origin_req_host is None:
origin_req_host = request_host(self)
self.origin_req_host = origin_req_host
self.unverifiable = unverifiable
def __getattr__(self, attr):
# XXX this is a fallback mechanism to guard against these
# methods getting called in a non-standard order. this may be
# too complicated and/or unnecessary.
# XXX should the __r_XXX attributes be public?
if attr[:12] == '_Request__r_':
name = attr[12:]
if hasattr(Request, 'get_' + name):
getattr(self, 'get_' + name)()
return getattr(self, attr)
raise AttributeError, attr
def get_method(self):
if self.has_data():
return "POST"
else:
return "GET"
# XXX these helper methods are lame
def add_data(self, data):
self.data = data
def has_data(self):
return self.data is not None
def get_data(self):
return self.data
def get_full_url(self):
if self.__fragment:
return '%s#%s' % (self.__original, self.__fragment)
else:
return self.__original
def get_type(self):
if self.type is None:
self.type, self.__r_type = splittype(self.__original)
if self.type is None:
raise ValueError, "unknown url type: %s" % self.__original
return self.type
def get_host(self):
if self.host is None:
self.host, self.__r_host = splithost(self.__r_type)
if self.host:
self.host = unquote(self.host)
return self.host
def get_selector(self):
return self.__r_host
def set_proxy(self, host, type):
if self.type == 'https' and not self._tunnel_host:
self._tunnel_host = self.host
else:
self.type = type
self.__r_host = self.__original
self.host = host
def has_proxy(self):
return self.__r_host == self.__original
def get_origin_req_host(self):
return self.origin_req_host
def is_unverifiable(self):
return self.unverifiable
def add_header(self, key, val):
# useful for something like authentication
self.headers[key.capitalize()] = val
def add_unredirected_header(self, key, val):
# will not be added to a redirected request
self.unredirected_hdrs[key.capitalize()] = val
def has_header(self, header_name):
return (header_name in self.headers or
header_name in self.unredirected_hdrs)
def get_header(self, header_name, default=None):
return self.headers.get(
header_name,
self.unredirected_hdrs.get(header_name, default))
def header_items(self):
hdrs = self.unredirected_hdrs.copy()
hdrs.update(self.headers)
return hdrs.items()
class OpenerDirector:
def __init__(self):
client_version = "Python-urllib/%s" % __version__
self.addheaders = [('User-agent', client_version)]
# self.handlers is retained only for backward compatibility
self.handlers = []
# manage the individual handlers
self.handle_open = {}
self.handle_error = {}
self.process_response = {}
self.process_request = {}
def add_handler(self, handler):
if not hasattr(handler, "add_parent"):
raise TypeError("expected BaseHandler instance, got %r" %
type(handler))
added = False
for meth in dir(handler):
if meth in ["redirect_request", "do_open", "proxy_open"]:
# oops, coincidental match
continue
i = meth.find("_")
protocol = meth[:i]
condition = meth[i+1:]
if condition.startswith("error"):
j = condition.find("_") + i + 1
kind = meth[j+1:]
try:
kind = int(kind)
except ValueError:
pass
lookup = self.handle_error.get(protocol, {})
self.handle_error[protocol] = lookup
elif condition == "open":
kind = protocol
lookup = self.handle_open
elif condition == "response":
kind = protocol
lookup = self.process_response
elif condition == "request":
kind = protocol
lookup = self.process_request
else:
continue
handlers = lookup.setdefault(kind, [])
if handlers:
bisect.insort(handlers, handler)
else:
handlers.append(handler)
added = True
if added:
bisect.insort(self.handlers, handler)
handler.add_parent(self)
def close(self):
# Only exists for backwards compatibility.
pass
def _call_chain(self, chain, kind, meth_name, *args):
# Handlers raise an exception if no one else should try to handle
# the request, or return None if they can't but another handler
# could. Otherwise, they return the response.
handlers = chain.get(kind, ())
for handler in handlers:
func = getattr(handler, meth_name)
result = func(*args)
if result is not None:
return result
def open(self, fullurl, data=None, timeout=socket._GLOBAL_DEFAULT_TIMEOUT):
# accept a URL or a Request object
if isinstance(fullurl, basestring):
req = Request(fullurl, data)
else:
req = fullurl
if data is not None:
req.add_data(data)
req.timeout = timeout
protocol = req.get_type()
# pre-process request
meth_name = protocol+"_request"
for processor in self.process_request.get(protocol, []):
meth = getattr(processor, meth_name)
req = meth(req)
response = self._open(req, data)
# post-process response
meth_name = protocol+"_response"
for processor in self.process_response.get(protocol, []):
meth = getattr(processor, meth_name)
response = meth(req, response)
return response
def _open(self, req, data=None):
result = self._call_chain(self.handle_open, 'default',
'default_open', req)
if result:
return result
protocol = req.get_type()
result = self._call_chain(self.handle_open, protocol, protocol +
'_open', req)
if result:
return result
return self._call_chain(self.handle_open, 'unknown',
'unknown_open', req)
def error(self, proto, *args):
if proto in ('http', 'https'):
# XXX http[s] protocols are special-cased
dict = self.handle_error['http'] # https is not different than http
proto = args[2] # YUCK!
meth_name = 'http_error_%s' % proto
http_err = 1
orig_args = args
else:
dict = self.handle_error
meth_name = proto + '_error'
http_err = 0
args = (dict, proto, meth_name) + args
result = self._call_chain(*args)
if result:
return result
if http_err:
args = (dict, 'default', 'http_error_default') + orig_args
return self._call_chain(*args)
# XXX probably also want an abstract factory that knows when it makes
# sense to skip a superclass in favor of a subclass and when it might
# make sense to include both
def build_opener(*handlers):
"""Create an opener object from a list of handlers.
The opener will use several default handlers, including support
for HTTP, FTP and when applicable, HTTPS.
If any of the handlers passed as arguments are subclasses of the
default handlers, the default handlers will not be used.
"""
import types
def isclass(obj):
return isinstance(obj, (types.ClassType, type))
opener = OpenerDirector()
default_classes = [ProxyHandler, UnknownHandler, HTTPHandler,
HTTPDefaultErrorHandler, HTTPRedirectHandler,
FTPHandler, FileHandler, HTTPErrorProcessor]
if hasattr(httplib, 'HTTPS'):
default_classes.append(HTTPSHandler)
skip = set()
for klass in default_classes:
for check in handlers:
if isclass(check):
if issubclass(check, klass):
skip.add(klass)
elif isinstance(check, klass):
skip.add(klass)
for klass in skip:
default_classes.remove(klass)
for klass in default_classes:
opener.add_handler(klass())
for h in handlers:
if isclass(h):
h = h()
opener.add_handler(h)
return opener
class BaseHandler:
handler_order = 500
def add_parent(self, parent):
self.parent = parent
def close(self):
# Only exists for backwards compatibility
pass
def __lt__(self, other):
if not hasattr(other, "handler_order"):
# Try to preserve the old behavior of having custom classes
# inserted after default ones (works only for custom user
# classes which are not aware of handler_order).
return True
return self.handler_order < other.handler_order
class HTTPErrorProcessor(BaseHandler):
"""Process HTTP error responses."""
handler_order = 1000 # after all other processing
def http_response(self, request, response):
code, msg, hdrs = response.code, response.msg, response.info()
# According to RFC 2616, "2xx" code indicates that the client's
# request was successfully received, understood, and accepted.
if not (200 <= code < 300):
response = self.parent.error(
'http', request, response, code, msg, hdrs)
return response
https_response = http_response
class HTTPDefaultErrorHandler(BaseHandler):
def http_error_default(self, req, fp, code, msg, hdrs):
raise HTTPError(req.get_full_url(), code, msg, hdrs, fp)
class HTTPRedirectHandler(BaseHandler):
# maximum number of redirections to any single URL
# this is needed because of the state that cookies introduce
max_repeats = 4
# maximum total number of redirections (regardless of URL) before
# assuming we're in a loop
max_redirections = 10
def redirect_request(self, req, fp, code, msg, headers, newurl):
"""Return a Request or None in response to a redirect.
This is called by the http_error_30x methods when a
redirection response is received. If a redirection should
take place, return a new Request to allow http_error_30x to
perform the redirect. Otherwise, raise HTTPError if no-one
else should try to handle this url. Return None if you can't
but another Handler might.
"""
m = req.get_method()
if (code in (301, 302, 303, 307) and m in ("GET", "HEAD")
or code in (301, 302, 303) and m == "POST"):
# Strictly (according to RFC 2616), 301 or 302 in response
# to a POST MUST NOT cause a redirection without confirmation
# from the user (of urllib2, in this case). In practice,
# essentially all clients do redirect in this case, so we
# do the same.
# be conciliant with URIs containing a space
newurl = newurl.replace(' ', '%20')
newheaders = dict((k,v) for k,v in req.headers.items()
if k.lower() not in ("content-length", "content-type")
)
return Request(newurl,
headers=newheaders,
origin_req_host=req.get_origin_req_host(),
unverifiable=True)
else:
raise HTTPError(req.get_full_url(), code, msg, headers, fp)
# Implementation note: To avoid the server sending us into an
# infinite loop, the request object needs to track what URLs we
# have already seen. Do this by adding a handler-specific
# attribute to the Request object.
def http_error_302(self, req, fp, code, msg, headers):
# Some servers (incorrectly) return multiple Location headers
# (so probably same goes for URI). Use first header.
if 'location' in headers:
newurl = headers.getheaders('location')[0]
elif 'uri' in headers:
newurl = headers.getheaders('uri')[0]
else:
return
# fix a possible malformed URL
urlparts = urlparse.urlparse(newurl)
if not urlparts.path:
urlparts = list(urlparts)
urlparts[2] = "/"
newurl = urlparse.urlunparse(urlparts)
newurl = urlparse.urljoin(req.get_full_url(), newurl)
# For security reasons we do not allow redirects to protocols
# other than HTTP, HTTPS or FTP.
newurl_lower = newurl.lower()
if not (newurl_lower.startswith('http://') or
newurl_lower.startswith('https://') or
newurl_lower.startswith('ftp://')):
raise HTTPError(newurl, code,
msg + " - Redirection to url '%s' is not allowed" %
newurl,
headers, fp)
# XXX Probably want to forget about the state of the current
# request, although that might interact poorly with other
# handlers that also use handler-specific request attributes
new = self.redirect_request(req, fp, code, msg, headers, newurl)
if new is None:
return
# loop detection
# .redirect_dict has a key url if url was previously visited.
if hasattr(req, 'redirect_dict'):
visited = new.redirect_dict = req.redirect_dict
if (visited.get(newurl, 0) >= self.max_repeats or
len(visited) >= self.max_redirections):
raise HTTPError(req.get_full_url(), code,
self.inf_msg + msg, headers, fp)
else:
visited = new.redirect_dict = req.redirect_dict = {}
visited[newurl] = visited.get(newurl, 0) + 1
# Don't close the fp until we are sure that we won't use it
# with HTTPError.
fp.read()
fp.close()
return self.parent.open(new, timeout=req.timeout)
http_error_301 = http_error_303 = http_error_307 = http_error_302
inf_msg = "The HTTP server returned a redirect error that would " \
"lead to an infinite loop.\n" \
"The last 30x error message was:\n"
def _parse_proxy(proxy):
"""Return (scheme, user, password, host/port) given a URL or an authority.
If a URL is supplied, it must have an authority (host:port) component.
According to RFC 3986, having an authority component means the URL must
have two slashes after the scheme:
>>> _parse_proxy('file:/ftp.example.com/')
Traceback (most recent call last):
ValueError: proxy URL with no authority: 'file:/ftp.example.com/'
The first three items of the returned tuple may be None.
Examples of authority parsing:
>>> _parse_proxy('proxy.example.com')
(None, None, None, 'proxy.example.com')
>>> _parse_proxy('proxy.example.com:3128')
(None, None, None, 'proxy.example.com:3128')
The authority component may optionally include userinfo (assumed to be
username:password):
>>> _parse_proxy('joe:password@proxy.example.com')
(None, 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('joe:password@proxy.example.com:3128')
(None, 'joe', 'password', 'proxy.example.com:3128')
Same examples, but with URLs instead:
>>> _parse_proxy('http://proxy.example.com/')
('http', None, None, 'proxy.example.com')
>>> _parse_proxy('http://proxy.example.com:3128/')
('http', None, None, 'proxy.example.com:3128')
>>> _parse_proxy('http://joe:password@proxy.example.com/')
('http', 'joe', 'password', 'proxy.example.com')
>>> _parse_proxy('http://joe:password@proxy.example.com:3128')
('http', 'joe', 'password', 'proxy.example.com:3128')
Everything after the authority is ignored:
>>> _parse_proxy('ftp://joe:password@proxy.example.com/rubbish:3128')
('ftp', 'joe', 'password', 'proxy.example.com')
Test for no trailing '/' case:
>>> _parse_proxy('http://joe:password@proxy.example.com')
('http', 'joe', 'password', 'proxy.example.com')
"""
scheme, r_scheme = splittype(proxy)
if not r_scheme.startswith("/"):
# authority
scheme = None
authority = proxy
else:
# URL
if not r_scheme.startswith("//"):
raise ValueError("proxy URL with no authority: %r" % proxy)
# We have an authority, so for RFC 3986-compliant URLs (by ss 3.
# and 3.3.), path is empty or starts with '/'
end = r_scheme.find("/", 2)
if end == -1:
end = None
authority = r_scheme[2:end]
userinfo, hostport = splituser(authority)
if userinfo is not None:
user, password = splitpasswd(userinfo)
else:
user = password = None
return scheme, user, password, hostport
class ProxyHandler(BaseHandler):
# Proxies must be in front
handler_order = 100
def __init__(self, proxies=None):
if proxies is None:
proxies = getproxies()
assert hasattr(proxies, 'has_key'), "proxies must be a mapping"
self.proxies = proxies
for type, url in proxies.items():
setattr(self, '%s_open' % type,
lambda r, proxy=url, type=type, meth=self.proxy_open: \
meth(r, proxy, type))
def proxy_open(self, req, proxy, type):
orig_type = req.get_type()
proxy_type, user, password, hostport = _parse_proxy(proxy)
if proxy_type is None:
proxy_type = orig_type
if req.host and proxy_bypass(req.host):
return None
if user and password:
user_pass = '%s:%s' % (unquote(user), unquote(password))
creds = base64.b64encode(user_pass).strip()
req.add_header('Proxy-authorization', 'Basic ' + creds)
hostport = unquote(hostport)
req.set_proxy(hostport, proxy_type)
if orig_type == proxy_type or orig_type == 'https':
# let other handlers take care of it
return None
else:
# need to start over, because the other handlers don't
# grok the proxy's URL type
# e.g. if we have a constructor arg proxies like so:
# {'http': 'ftp://proxy.example.com'}, we may end up turning
# a request for http://acme.example.com/a into one for
# ftp://proxy.example.com/a
return self.parent.open(req, timeout=req.timeout)
class HTTPPasswordMgr:
def __init__(self):
self.passwd = {}
def add_password(self, realm, uri, user, passwd):
# uri could be a single URI or a sequence
if isinstance(uri, basestring):
uri = [uri]
if not realm in self.passwd:
self.passwd[realm] = {}
for default_port in True, False:
reduced_uri = tuple(
[self.reduce_uri(u, default_port) for u in uri])
self.passwd[realm][reduced_uri] = (user, passwd)
def find_user_password(self, realm, authuri):
domains = self.passwd.get(realm, {})
for default_port in True, False:
reduced_authuri = self.reduce_uri(authuri, default_port)
for uris, authinfo in domains.iteritems():
for uri in uris:
if self.is_suburi(uri, reduced_authuri):
return authinfo
return None, None
def reduce_uri(self, uri, default_port=True):
"""Accept authority or URI and extract only the authority and path."""
# note HTTP URLs do not have a userinfo component
parts = urlparse.urlsplit(uri)
if parts[1]:
# URI
scheme = parts[0]
authority = parts[1]
path = parts[2] or '/'
else:
# host or host:port
scheme = None
authority = uri
path = '/'
host, port = splitport(authority)
if default_port and port is None and scheme is not None:
dport = {"http": 80,
"https": 443,
}.get(scheme)
if dport is not None:
authority = "%s:%d" % (host, dport)
return authority, path
def is_suburi(self, base, test):
"""Check if test is below base in a URI tree
Both args must be URIs in reduced form.
"""
if base == test:
return True
if base[0] != test[0]:
return False
common = posixpath.commonprefix((base[1], test[1]))
if len(common) == len(base[1]):
return True
return False
class HTTPPasswordMgrWithDefaultRealm(HTTPPasswordMgr):
def find_user_password(self, realm, authuri):
user, password = HTTPPasswordMgr.find_user_password(self, realm,
authuri)
if user is not None:
return user, password
return HTTPPasswordMgr.find_user_password(self, None, authuri)
class AbstractBasicAuthHandler:
# XXX this allows for multiple auth-schemes, but will stupidly pick
# the last one with a realm specified.
# allow for double- and single-quoted realm values
# (single quotes are a violation of the RFC, but appear in the wild)
rx = re.compile('(?:.*,)*[ \t]*([^ \t]+)[ \t]+'
'realm=(["\']?)([^"\']*)\\2', re.I)
# XXX could pre-emptively send auth info already accepted (RFC 2617,
# end of section 2, and section 1.2 immediately after "credentials"
# production).
def __init__(self, password_mgr=None):
if password_mgr is None:
password_mgr = HTTPPasswordMgr()
self.passwd = password_mgr
self.add_password = self.passwd.add_password
def http_error_auth_reqed(self, authreq, host, req, headers):
# host may be an authority (without userinfo) or a URL with an
# authority
# XXX could be multiple headers
authreq = headers.get(authreq, None)
if authreq:
mo = AbstractBasicAuthHandler.rx.search(authreq)
if mo:
scheme, quote, realm = mo.groups()
if quote not in ['"', "'"]:
warnings.warn("Basic Auth Realm was unquoted",
UserWarning, 2)
if scheme.lower() == 'basic':
return self.retry_http_basic_auth(host, req, realm)
def retry_http_basic_auth(self, host, req, realm):
user, pw = self.passwd.find_user_password(realm, host)
if pw is not None:
raw = "%s:%s" % (user, pw)
auth = 'Basic %s' % base64.b64encode(raw).strip()
if req.get_header(self.auth_header, None) == auth:
return None
req.add_unredirected_header(self.auth_header, auth)
return self.parent.open(req, timeout=req.timeout)
else:
return None
class HTTPBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Authorization'
def http_error_401(self, req, fp, code, msg, headers):
url = req.get_full_url()
response = self.http_error_auth_reqed('www-authenticate',
url, req, headers)
return response
class ProxyBasicAuthHandler(AbstractBasicAuthHandler, BaseHandler):
auth_header = 'Proxy-authorization'
def http_error_407(self, req, fp, code, msg, headers):
# http_error_auth_reqed requires that there is no userinfo component in
# authority. Assume there isn't one, since urllib2 does not (and
# should not, RFC 3986 s. 3.2.1) support requests for URLs containing
# userinfo.
authority = req.get_host()
response = self.http_error_auth_reqed('proxy-authenticate',
authority, req, headers)
return response
def randombytes(n):
"""Return n random bytes."""
# Use /dev/urandom if it is available. Fall back to random module
# if not. It might be worthwhile to extend this function to use
# other platform-specific mechanisms for getting random bytes.
if os.path.exists("/dev/urandom"):
f = open("/dev/urandom")
s = f.read(n)
f.close()
return s
else:
L = [chr(random.randrange(0, 256)) for i in range(n)]
return "".join(L)
class AbstractDigestAuthHandler:
# Digest authentication is specified in RFC 2617.
# XXX The client does not inspect the Authentication-Info header
# in a successful response.
# XXX It should be possible to test this implementation against
# a mock server that just generates a static set of challenges.
# XXX qop="auth-int" supports is shaky
def __init__(self, passwd=None):
if passwd is None:
passwd = HTTPPasswordMgr()
self.passwd = passwd
self.add_password = self.passwd.add_password
self.retried = 0
self.nonce_count = 0
self.last_nonce = None
def reset_retry_count(self):
self.retried = 0
def http_error_auth_reqed(self, auth_header, host, req, headers):
authreq = headers.get(auth_header, None)
if self.retried > 5:
# Don't fail endlessly - if we failed once, we'll probably
# fail a second time. Hm. Unless the Password Manager is
# prompting for the information. Crap. This isn't great
# but it's better than the current 'repeat until recursion
# depth exceeded' approach <wink>
raise HTTPError(req.get_full_url(), 401, "digest auth failed",
headers, None)
else:
self.retried += 1
if authreq:
scheme = authreq.split()[0]
if scheme.lower() == 'digest':
return self.retry_http_digest_auth(req, authreq)
def retry_http_digest_auth(self, req, auth):
token, challenge = auth.split(' ', 1)
chal = parse_keqv_list(parse_http_list(challenge))
auth = self.get_authorization(req, chal)
if auth:
auth_val = 'Digest %s' % auth
if req.headers.get(self.auth_header, None) == auth_val:
return None
req.add_unredirected_header(self.auth_header, auth_val)
resp = self.parent.open(req, timeout=req.timeout)
return resp
def get_cnonce(self, nonce):
# The cnonce-value is an opaque
# quoted string value provided by the client and used by both client
# and server to avoid chosen plaintext attacks, to provide mutual
# authentication, and to provide some message integrity protection.
# This isn't a fabulous effort, but it's probably Good Enough.
dig = hashlib.sha1("%s:%s:%s:%s" % (self.nonce_count, nonce, time.ctime(),
randombytes(8))).hexdigest()
return dig[:16]
def get_authorization(self, req, chal):
try:
realm = chal['realm']
nonce = chal['nonce']
qop = chal.get('qop')
algorithm = chal.get('algorithm', 'MD5')
# mod_digest doesn't send an opaque, even though it isn't
# supposed to be optional
opaque = chal.get('opaque', None)
except KeyError:
return None
H, KD = self.get_algorithm_impls(algorithm)
if H is None:
return None
user, pw = self.passwd.find_user_password(realm, req.get_full_url())
if user is None:
return None
# XXX not implemented yet
if req.has_data():
entdig = self.get_entity_digest(req.get_data(), chal)
else:
entdig = None
A1 = "%s:%s:%s" % (user, realm, pw)
A2 = "%s:%s" % (req.get_method(),
# XXX selector: what about proxies and full urls
req.get_selector())
if qop == 'auth':
if nonce == self.last_nonce:
self.nonce_count += 1
else:
self.nonce_count = 1
self.last_nonce = nonce
ncvalue = '%08x' % self.nonce_count
cnonce = self.get_cnonce(nonce)
noncebit = "%s:%s:%s:%s:%s" % (nonce, ncvalue, cnonce, qop, H(A2))
respdig = KD(H(A1), noncebit)
elif qop is None:
respdig = KD(H(A1), "%s:%s" % (nonce, H(A2)))
else:
# XXX handle auth-int.
raise URLError("qop '%s' is not supported." % qop)
# XXX should the partial digests be encoded too?
base = 'username="%s", realm="%s", nonce="%s", uri="%s", ' \
'response="%s"' % (user, realm, nonce, req.get_selector(),
respdig)
if opaque:
base += ', opaque="%s"' % opaque
if entdig:
base += ', digest="%s"' % entdig
base += ', algorithm="%s"' % algorithm
if qop:
base += ', qop=auth, nc=%s, cnonce="%s"' % (ncvalue, cnonce)
return base
def get_algorithm_impls(self, algorithm):
# algorithm should be case-insensitive according to RFC2617
algorithm = algorithm.upper()
# lambdas assume digest modules are imported at the top level
if algorithm == 'MD5':
H = lambda x: hashlib.md5(x).hexdigest()
elif algorithm == 'SHA':
H = lambda x: hashlib.sha1(x).hexdigest()
# XXX MD5-sess
KD = lambda s, d: H("%s:%s" % (s, d))
return H, KD
def get_entity_digest(self, data, chal):
# XXX not implemented yet
return None
class HTTPDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
"""An authentication protocol defined by RFC 2069
Digest authentication improves on basic authentication because it
does not transmit passwords in the clear.
"""
auth_header = 'Authorization'
handler_order = 490 # before Basic auth
def http_error_401(self, req, fp, code, msg, headers):
host = urlparse.urlparse(req.get_full_url())[1]
retry = self.http_error_auth_reqed('www-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class ProxyDigestAuthHandler(BaseHandler, AbstractDigestAuthHandler):
auth_header = 'Proxy-Authorization'
handler_order = 490 # before Basic auth
def http_error_407(self, req, fp, code, msg, headers):
host = req.get_host()
retry = self.http_error_auth_reqed('proxy-authenticate',
host, req, headers)
self.reset_retry_count()
return retry
class AbstractHTTPHandler(BaseHandler):
def __init__(self, debuglevel=0):
self._debuglevel = debuglevel
def set_http_debuglevel(self, level):
self._debuglevel = level
def do_request_(self, request):
host = request.get_host()
if not host:
raise URLError('no host given')
if request.has_data(): # POST
data = request.get_data()
if not request.has_header('Content-type'):
request.add_unredirected_header(
'Content-type',
'application/x-www-form-urlencoded')
if not request.has_header('Content-length'):
request.add_unredirected_header(
'Content-length', '%d' % len(data))
sel_host = host
if request.has_proxy():
scheme, sel = splittype(request.get_selector())
sel_host, sel_path = splithost(sel)
if not request.has_header('Host'):
request.add_unredirected_header('Host', sel_host)
for name, value in self.parent.addheaders:
name = name.capitalize()
if not request.has_header(name):
request.add_unredirected_header(name, value)
return request
def do_open(self, http_class, req, **http_conn_args):
"""Return an addinfourl object for the request, using http_class.
http_class must implement the HTTPConnection API from httplib.
The addinfourl return value is a file-like object. It also
has methods and attributes including:
- info(): return a mimetools.Message object for the headers
- geturl(): return the original request URL
- code: HTTP status code
"""
host = req.get_host()
if not host:
raise URLError('no host given')
# will parse host:port
h = http_class(host, timeout=req.timeout, **http_conn_args)
h.set_debuglevel(self._debuglevel)
headers = dict(req.unredirected_hdrs)
headers.update(dict((k, v) for k, v in req.headers.items()
if k not in headers))
# We want to make an HTTP/1.1 request, but the addinfourl
# class isn't prepared to deal with a persistent connection.
# It will try to read all remaining data from the socket,
# which will block while the server waits for the next request.
# So make sure the connection gets closed after the (only)
# request.
headers["Connection"] = "close"
headers = dict(
(name.title(), val) for name, val in headers.items())
if req._tunnel_host:
tunnel_headers = {}
proxy_auth_hdr = "Proxy-Authorization"
if proxy_auth_hdr in headers:
tunnel_headers[proxy_auth_hdr] = headers[proxy_auth_hdr]
# Proxy-Authorization should not be sent to origin
# server.
del headers[proxy_auth_hdr]
h.set_tunnel(req._tunnel_host, headers=tunnel_headers)
try:
h.request(req.get_method(), req.get_selector(), req.data, headers)
except socket.error, err: # XXX what error?
h.close()
raise URLError(err)
else:
try:
r = h.getresponse(buffering=True)
except TypeError: # buffering kw not supported
r = h.getresponse()
# Pick apart the HTTPResponse object to get the addinfourl
# object initialized properly.
# Wrap the HTTPResponse object in socket's file object adapter
# for Windows. That adapter calls recv(), so delegate recv()
# to read(). This weird wrapping allows the returned object to
# have readline() and readlines() methods.
# XXX It might be better to extract the read buffering code
# out of socket._fileobject() and into a base class.
r.recv = r.read
fp = socket._fileobject(r, close=True)
resp = addinfourl(fp, r.msg, req.get_full_url())
resp.code = r.status
resp.msg = r.reason
return resp
class HTTPHandler(AbstractHTTPHandler):
def http_open(self, req):
return self.do_open(httplib.HTTPConnection, req)
http_request = AbstractHTTPHandler.do_request_
if hasattr(httplib, 'HTTPS'):
class HTTPSHandler(AbstractHTTPHandler):
def __init__(self, debuglevel=0, context=None):
AbstractHTTPHandler.__init__(self, debuglevel)
self._context = context
def https_open(self, req):
return self.do_open(httplib.HTTPSConnection, req,
context=self._context)
https_request = AbstractHTTPHandler.do_request_
class HTTPCookieProcessor(BaseHandler):
def __init__(self, cookiejar=None):
import cookielib
if cookiejar is None:
cookiejar = cookielib.CookieJar()
self.cookiejar = cookiejar
def http_request(self, request):
self.cookiejar.add_cookie_header(request)
return request
def http_response(self, request, response):
self.cookiejar.extract_cookies(response, request)
return response
https_request = http_request
https_response = http_response
class UnknownHandler(BaseHandler):
def unknown_open(self, req):
type = req.get_type()
raise URLError('unknown url type: %s' % type)
def parse_keqv_list(l):
"""Parse list of key=value strings where keys are not duplicated."""
parsed = {}
for elt in l:
k, v = elt.split('=', 1)
if v[0] == '"' and v[-1] == '"':
v = v[1:-1]
parsed[k] = v
return parsed
def parse_http_list(s):
"""Parse lists as described by RFC 2068 Section 2.
In particular, parse comma-separated lists where the elements of
the list may include quoted-strings. A quoted-string could
contain a comma. A non-quoted string could have quotes in the
middle. Neither commas nor quotes count if they are escaped.
Only double-quotes count, not single-quotes.
"""
res = []
part = ''
escape = quote = False
for cur in s:
if escape:
part += cur
escape = False
continue
if quote:
if cur == '\\':
escape = True
continue
elif cur == '"':
quote = False
part += cur
continue
if cur == ',':
res.append(part)
part = ''
continue
if cur == '"':
quote = True
part += cur
# append last part
if part:
res.append(part)
return [part.strip() for part in res]
def _safe_gethostbyname(host):
try:
return socket.gethostbyname(host)
except socket.gaierror:
return None
class FileHandler(BaseHandler):
# Use local file or FTP depending on form of URL
def file_open(self, req):
url = req.get_selector()
if url[:2] == '//' and url[2:3] != '/' and (req.host and
req.host != 'localhost'):
req.type = 'ftp'
return self.parent.open(req)
else:
return self.open_local_file(req)
# names for the localhost
names = None
def get_names(self):
if FileHandler.names is None:
try:
FileHandler.names = tuple(
socket.gethostbyname_ex('localhost')[2] +
socket.gethostbyname_ex(socket.gethostname())[2])
except socket.gaierror:
FileHandler.names = (socket.gethostbyname('localhost'),)
return FileHandler.names
# not entirely sure what the rules are here
def open_local_file(self, req):
import email.utils
import mimetypes
host = req.get_host()
filename = req.get_selector()
localfile = url2pathname(filename)
try:
stats = os.stat(localfile)
size = stats.st_size
modified = email.utils.formatdate(stats.st_mtime, usegmt=True)
mtype = mimetypes.guess_type(filename)[0]
headers = mimetools.Message(StringIO(
'Content-type: %s\nContent-length: %d\nLast-modified: %s\n' %
(mtype or 'text/plain', size, modified)))
if host:
host, port = splitport(host)
if not host or \
(not port and _safe_gethostbyname(host) in self.get_names()):
if host:
origurl = 'file://' + host + filename
else:
origurl = 'file://' + filename
return addinfourl(open(localfile, 'rb'), headers, origurl)
except OSError, msg:
# urllib2 users shouldn't expect OSErrors coming from urlopen()
raise URLError(msg)
raise URLError('file not on local host')
class FTPHandler(BaseHandler):
def ftp_open(self, req):
import ftplib
import mimetypes
host = req.get_host()
if not host:
raise URLError('ftp error: no host given')
host, port = splitport(host)
if port is None:
port = ftplib.FTP_PORT
else:
port = int(port)
# username/password handling
user, host = splituser(host)
if user:
user, passwd = splitpasswd(user)
else:
passwd = None
host = unquote(host)
user = user or ''
passwd = passwd or ''
try:
host = socket.gethostbyname(host)
except socket.error, msg:
raise URLError(msg)
path, attrs = splitattr(req.get_selector())
dirs = path.split('/')
dirs = map(unquote, dirs)
dirs, file = dirs[:-1], dirs[-1]
if dirs and not dirs[0]:
dirs = dirs[1:]
try:
fw = self.connect_ftp(user, passwd, host, port, dirs, req.timeout)
type = file and 'I' or 'D'
for attr in attrs:
attr, value = splitvalue(attr)
if attr.lower() == 'type' and \
value in ('a', 'A', 'i', 'I', 'd', 'D'):
type = value.upper()
fp, retrlen = fw.retrfile(file, type)
headers = ""
mtype = mimetypes.guess_type(req.get_full_url())[0]
if mtype:
headers += "Content-type: %s\n" % mtype
if retrlen is not None and retrlen >= 0:
headers += "Content-length: %d\n" % retrlen
sf = StringIO(headers)
headers = mimetools.Message(sf)
return addinfourl(fp, headers, req.get_full_url())
except ftplib.all_errors, msg:
raise URLError, ('ftp error: %s' % msg), sys.exc_info()[2]
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
fw = ftpwrapper(user, passwd, host, port, dirs, timeout,
persistent=False)
## fw.ftp.set_debuglevel(1)
return fw
class CacheFTPHandler(FTPHandler):
# XXX would be nice to have pluggable cache strategies
# XXX this stuff is definitely not thread safe
def __init__(self):
self.cache = {}
self.timeout = {}
self.soonest = 0
self.delay = 60
self.max_conns = 16
def setTimeout(self, t):
self.delay = t
def setMaxConns(self, m):
self.max_conns = m
def connect_ftp(self, user, passwd, host, port, dirs, timeout):
key = user, host, port, '/'.join(dirs), timeout
if key in self.cache:
self.timeout[key] = time.time() + self.delay
else:
self.cache[key] = ftpwrapper(user, passwd, host, port, dirs, timeout)
self.timeout[key] = time.time() + self.delay
self.check_cache()
return self.cache[key]
def check_cache(self):
# first check for old ones
t = time.time()
if self.soonest <= t:
for k, v in self.timeout.items():
if v < t:
self.cache[k].close()
del self.cache[k]
del self.timeout[k]
self.soonest = min(self.timeout.values())
# then check the size
if len(self.cache) == self.max_conns:
for k, v in self.timeout.items():
if v == self.soonest:
del self.cache[k]
del self.timeout[k]
break
self.soonest = min(self.timeout.values())
def clear_cache(self):
for conn in self.cache.values():
conn.close()
self.cache.clear()
self.timeout.clear()
|
snorkel-biocorpus-master
|
pubtator/api/urllib2.py
|
import urllib2
import time
import sys
import getopt
inputfile = ''
bioconcept = ''
format = ''
try:
options, remainder = getopt.getopt(sys.argv[1:], 'i:b:f:', ['inputfile=','bioconcept=','format='])
except getopt.GetoptError, err:
print "\npython RESTful.client.get.py -i [inputfile] -b [bioconcept] -f [format]\n"
print "\t bioconcept: We support five kinds of bioconcepts, i.e., Gene, Disease, Chemical, Species, Mutation. When 'BioConcept' is used, all five are included.\n"
print "\t inputfile: a file with a pmid list\n"
print "\t format: PubTator (tab-delimited text file), BioC (xml), and JSON\n\n"
sys.exit(0)
for opt, arg in options:
if opt in ('-i', '--inputfile'):
inputfile = arg
elif opt in ('-b', '--bioconcept'):
bioconcept = arg
elif opt in ('-f', '--format'):
format = arg
fh = open(inputfile)
for pmid in fh:
#Submit
pmid=pmid.rstrip('\r\n')
url_Submit = "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/RESTful/tmTool.cgi/" + bioconcept + "/" + pmid + "/" + format + "/"
urllib_result = urllib2.urlopen(url_Submit)
print urllib_result.read()
|
snorkel-biocorpus-master
|
pubtator/api/RESTful.client.get.py
|
import urllib2
import time
import sys
import getopt
inputfile = ''
trigger = ''
taxonomy = ''
email = ''
PubTator_username = ''
url_Submit = ''
try:
options, remainder = getopt.getopt(sys.argv[1:], 'i:t:x:e:', ['inputfile=','trigger=','taxonomy=','email='])
except getopt.GetoptError, err:
print "\npython RESTful.client.post.py -i [inputfile] -t [trigger:tmChem|DNorm|tmVar|GNormPlus] -e [E-mail](optional)\n"
print "\npython RESTful.client.post.py -i [inputfile] -t GNormPlus -x [taxonomy]\n"
sys.exit(0)
for opt, arg in options:
if opt in ('-i', '--inputfile'):
inputfile = arg
elif opt in ('-t', '--trigger'):
trigger = arg
elif opt in ('-x', '--taxonomy'):
taxonomy = arg
elif opt in ('-e', '--PubTator'):
email = arg
#Submit
if taxonomy != '':
url_Submit = "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/RESTful/tmTool.cgi/" + trigger + "/" + taxonomy + "/"
elif email != '':
url_Submit = "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/RESTful/tmTool.cgi/" + trigger + "/Submit:" + email + "/"
else:
url_Submit = "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/RESTful/tmTool.cgi/" + trigger + "/Submit/"
fh = open(inputfile)
InputSTR=''
for line in fh:
InputSTR = InputSTR + line
urllib_submit = urllib2.urlopen(url_Submit, InputSTR)
urllib_result = urllib2.urlopen(url_Submit, InputSTR)
SessionNumber = urllib_submit.read()
if PubTator_username != '':
print "Thanks for your submission (Session number: " + SessionNumber + ").\nThe result will be sent to your E-mail: " + email + ".\n"
else:
print "Thanks for your submission. The session number is : "+ SessionNumber + "\n"
print "\nThe request is received and processing....\n\n"
#Receive
url_Receive = "https://www.ncbi.nlm.nih.gov/CBBresearch/Lu/Demo/RESTful/tmTool.cgi/" + SessionNumber + "/Receive/"
code=404
while(code == 404 or code == 501):
time.sleep(5)
try:
urllib_result = urllib2.urlopen(url_Receive)
except urllib2.HTTPError as e:
code = e.code
except urllib2.URLError as e:
code = e.code
else:
code = urllib_result.getcode()
print urllib_result.read()
|
snorkel-biocorpus-master
|
pubtator/api/RESTful.client.post.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
------------------------------------------------
Learn Common Phrases
------------------------------------------------
Train PMI-based phrase models. Currently this only assumes bigrams,
but it can be extended easily.
"""
import sys
import logging
import argparse
from utils import exec_time
from corpora import TextNormalizer
from gensim.models.phrases import Phrases
from gensim.models.word2vec import LineSentence
def main(args):
sentences = TextNormalizer(LineSentence(args.infile),
args.keep_mixedcase, args.keep_digits, args.keep_punc)
# build initial bigram phrase model
model = Phrases(sentences, min_count=5, threshold=10)
model.save("%sphrase.model" % (args.outdir))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-i","--infile", type=str, help="input sentence corpus")
parser.add_argument("-o","--outdir", type=str, help="output directory for phrase models")
parser.add_argument("-c","--keep_mixedcase", action='store_true',
help="don't apply lowercase normalization",
default=True)
parser.add_argument("-r","--keep_digits", action='store_true',
help="don't apply digit normalization",
default=True)
parser.add_argument("-b","--keep_punc", action='store_true',
help="don't remove punctuation",
default=True)
args = parser.parse_args()
# argument error, exit
if not args.infile:
parser.print_help()
sys.exit()
main(args)
|
snorkel-biocorpus-master
|
embeddings/train_pmi_phrases.py
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
------------------------------------------------
Create Word Embeddings
------------------------------------------------
Use Gensim's Word2Vec implementation to create
word (or short phrase) embeddings
'''
import re
import sys
import string
import logging
import argparse
from corpora import TextNormalizer
from gensim.models.phrases import Phrases
from gensim.models.word2vec import LineSentence, Word2Vec
punc_regex = re.compile("[%s]+" % string.punctuation)
digit_regex = re.compile("\d")
date_regex = re.compile("\d{1,2}/\d{1,2}/\d{2,4}")
time_regex = re.compile("[0-9]+:[0-9]{2,2}(:[0-9]{2,2})*\s*(AM|PM)*")
class TextNormalizer(object):
def __init__(self, corpus, keep_mixedcase=False,
keep_digits=False, keep_punctuation=False):
self.corpus = corpus
self.keep_mixedcase = keep_mixedcase
self.keep_digits = keep_digits
self.keep_punctuation = keep_punctuation
def __iter__(self):
for sentence in self.corpus:
# Mixed Case -> mixed case
if not self.keep_mixedcase:
sentence = [token.lower() for token in sentence]
if not self.keep_digits:
sentence = [digit_regex.sub("0",token).strip() for token in sentence]
if not self.keep_punctuation:
sentence = [token for token in sentence if punc_regex.sub("",token)]
yield sentence
class PhraseCorpus(object):
def __init__(self, filename, models, keep_mixedcase=False,
keep_digits=False, keep_punctuation=False):
self.filename = filename
self.models = models
self.keep_mixedcase = keep_mixedcase
self.keep_digits = keep_digits
self.keep_punctuation = keep_punctuation
def __iter__(self):
# create generator for phrase transformed sentences
sentences = TextNormalizer(LineSentence(self.filename),
self.keep_mixedcase, self.keep_digits,
self.keep_punctuation)
for s in sentences:
if self.models:
yield phrase_transform(s, self.models, 0)
else:
yield s
def load_phrase_models(indir, n):
"""
:param indir:
:param n:
:return:
"""
models = []
for _ in range(2, n + 1):
infile = "%s%sgram.phrase.model" % (indir, n)
models += [Phrases.load(infile)]
return models
def phrase_transform(sentence, models, idx):
"""
Recursively apply Phrase models to sentence
:param sentence:
:param models:
:param idx:
:return:
"""
if idx >= len(models):
return sentence
m = models[idx]
return phrase_transform(m[sentence], models, idx + 1)
def main(args):
models = None if not args.modeldir else load_phrase_models(args.modeldir, args.ngrams)
corpus = PhraseCorpus(args.infile, models, args.keep_mixedcase, args.keep_digits, args.keep_punc)
embeddings = Word2Vec(corpus, size=args.dim, sg=int(args.algorithm == "skipgram"),
window=args.window, min_count=args.min_count, negative=args.negative,
iter=args.iterations, sample=1e-5, workers=args.num_procs)
embeddings.save("%swords.d%s.w%s.m%s.i%s.bin" % (args.outdir, args.dim,
args.window, args.min_count, args.iterations))
if __name__ == '__main__':
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--infile", type=str, help="input file")
parser.add_argument("-o", "--outdir", type=str, help="output directory")
parser.add_argument("-m", "--modeldir", type=str, default=None,
help="trained discounted PMI phrase models")
parser.add_argument("-p", "--num_procs", type=int, default=1)
# normalization options
parser.add_argument("-c", "--keep_mixedcase", action='store_true',
help="don't apply lowercase normalization",
default=True)
parser.add_argument("-r", "--keep_digits", action='store_true',
help="don't apply digit normalization",
default=True)
parser.add_argument("-b", "--keep_punc", action='store_true',
help="don't remove punctuation",
default=True)
parser.add_argument("-s", "--savedict", action='store_true',
help="save dictionary to text file",
default=True)
# word2vec paramters
parser.add_argument("-D", "--dim", type=int, default=50,
help="dimension of word embeddings")
parser.add_argument("-W", "--window", type=int, default=2,
help="context window")
parser.add_argument("-M", "--min_count", type=int, default=25,
help="minimum occurrence count")
parser.add_argument("-N", "--negative", type=int, default=10,
help="negative sampling")
parser.add_argument("-I", "--iterations", type=int, default=1,
help="iterations")
parser.add_argument("-A", "--algorithm", type=str, default="skipgram",
help="training algorithm (cbow or skipgram)")
args = parser.parse_args()
# argument error, exit
if not args.infile and not args.outdir:
parser.print_help()
sys.exit()
main(args)
|
snorkel-biocorpus-master
|
embeddings/train_emb.py
|
"""
Generate external database key/value pairs for select fields that
we want to define joins or elastic search attributes over
(e.g., publication year, journal, mesh keywords). The rest of the
content we commit as a the raw XML tree.
"""
import glob
import lxml.etree as et
filelist = glob.glob("{}/*.xml".format())
fp = inputdir
# target fields for manually extracted data
doc_xpath = './/PubmedArticle'
date_xpath = './MedlineCitation/DateCompleted'
id_xpath = './MedlineCitation/PMID/text()'
journal_issn_xpath = "./MedlineCitation/Article/Journal/ISSN/text()"
journal_title_xpath = "./MedlineCitation/Article/Journal/Title/text()"
mesh_xpath = "./MedlineCitation/MeshHeadingList/MeshHeading"
mappings = {"issn":{},"mesh_heading":{}}
doc_metadata = {}
for i, doc in enumerate(et.parse(fp).xpath(doc_xpath)):
fields = []
try:
pmid = doc.xpath(id_xpath)[0]
# MEDLINE/PubMed date (later than actual publication?)
for elem in doc.xpath(date_xpath):
ts = [(child.tag, child.text) for child in elem.getchildren()]
fields.extend(ts)
# The ISSN (International Standard Serial Number) identifies resources (e.g., journals)
issn = doc.xpath(journal_issn_xpath)[0] if doc.xpath(journal_issn_xpath) else None
title = doc.xpath(journal_title_xpath)[0] if doc.xpath(journal_title_xpath) else None
if issn:
fields.append(('ISSN',issn))
mappings['issn'][issn] = title
# Medical Subject Headings
for elem in doc.xpath(mesh_xpath):
for child in elem.getchildren():
ui = child.xpath("./@UI")[0]
major_topic = child.xpath("@MajorTopicYN")[0]
name = "MeshMinor" if major_topic =='N' else "MeshMajor"
fields.append((name,ui))
mappings['mesh_heading'][ui] = child.xpath("text()")[0]
# Store the rest as the raw XML
# TODO
doc_metadata[pmid] = fields
except Exception as e:
print "Error! {}".format(e)
print et.tostring(doc)
print "-" * 100
for pmid in doc_metadata:
for field,value in doc_metadata[pmid]:
row = [pmid, field, value]
print "\t".join(row)
for category in mappings:
for name in mappings[category]:
row = [category, name, mappings[category][name]]
print "\t".join(row)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/extract_metadata.py
|
'''
Dumps PubMed standoff abstracts to a common text file format for bulk loading
'''
import os
import glob
import codecs
import argparse
import lxml.etree as et
from pubtator.parsers import PubTatorDocPreprocessor
def parse_standoff_format(filename, outputdir, source_prefix="gold_cdr"):
"""
FORMAT:
DOC_ID CHAR_STAR CHAR_END CONCEPT_TYPE CONCEPT_ID SOURCE
100000 1060 1065 Chemical CHEBI:53351 PubTator_tmChem
:param filename:
:param outputdir:
:return:
"""
pubtator = PubTatorDocPreprocessor("", annotations=True)
errors = 0
outfile = os.path.basename(filename)
outfile = ".".join(outfile.split(".")[0:-1])
outfile = "{}/{}.tags.txt".format(outputdir.replace(os.path.basename(filename), ""), outfile)
with codecs.open(outfile, "w", "utf-8") as op:
for doc, text, annos in pubtator.parse_file(filename, filename):
for a in annos:
if len(a) == 4:
continue
a = a[0:6]
try:
pmid, start, end, text, ctype, cid = a
row = [pmid, start, end, ctype, cid]
op.write("\t".join(row + [source_prefix]) + u"\n")
except Exception as e:
print e
print "Wrote", outfile
return errors
def main(args):
doc_parser = parse_standoff_format
filelist = glob.glob("{}/*".format(args.inputdir)) if os.path.isdir(args.inputdir) else [args.inputdir]
filelist = [fp for fp in filelist if not os.path.isdir(fp)]
for fp in filelist:
if not os.path.exists(args.outputdir):
os.mkdir(args.outputdir)
errors = doc_parser(fp, args.outputdir)
if errors:
print "Errors: {}".format(errors)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default="input directory or file")
argparser.add_argument("-o", "--outputdir", type=str, default="outout directory")
args = argparser.parse_args()
main(args)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/extract_annotations.py
|
'''
Dumps PubMed abstracts to a common text file format for bulk preprocessing
FORMAT:
~~_PMID_XXXXXX_~~
TEXT
..
'''
import os
import glob
import codecs
import argparse
import lxml.etree as et
from pubtator.parsers import PubTatorDocPreprocessor
def parse_xml_format(filename, outputdir):
"""
NLM XML Format
:param filename:
:param outputdir:
:return:
"""
doc_xpath = './/PubmedArticle'
id_xpath = './MedlineCitation/PMID/text()'
title_xpath = './MedlineCitation/Article/ArticleTitle/text()'
abstract_xpath = './MedlineCitation/Article/Abstract/AbstractText/text()'
errors = 0
outfile = os.path.basename(filename)
outfile = ".".join(outfile.split(".")[0:-1])
outfile = "{}/{}.txt".format(outputdir.replace(os.path.basename(filename), ""), outfile)
with codecs.open(outfile, "w", "utf-8") as op:
for i, doc in enumerate(et.parse(filename).xpath(doc_xpath)):
try:
pmid = doc.xpath(id_xpath)[0]
title = doc.xpath(title_xpath)[0] if doc.xpath(title_xpath) else ""
abstract = doc.xpath(abstract_xpath)[0] if doc.xpath(abstract_xpath) else ""
text = u"{} {}".format(title, abstract)
op.write(u"~~_PMID_{}_~~\n".format(pmid))
op.write(text + u"\n")
except:
errors += 1
print "Wrote", outfile
return errors
def parse_bioc_format(filename, outputdir):
"""
TOOD: BioC XML format
:param filename:
:param outputdir:
:return:
"""
pass
def parse_standoff_format(filename, outputdir, prefix="tmp"):
"""
:param filename:
:param outputdir:
:return:
"""
pubtator = PubTatorDocPreprocessor("")
errors = 0
outfile = os.path.basename(filename)
outfile = ".".join(outfile.split(".")[0:-1])
outfile = "{}/{}.txt".format(outputdir.replace(os.path.basename(filename), ""), outfile)
with codecs.open(outfile, "w", "utf-8") as op:
for doc, text in pubtator.parse_file(filename, filename):
op.write(u"~~_PMID_{}_~~\n".format(doc.name))
op.write(text + u"\n")
print "Wrote", outfile
return errors
def get_doc_parser(format):
"""
Support various utililities for extracting text data
:param format:
:return:
"""
if format == "xml":
return parse_xml_format
elif format == "bioc":
return parse_bioc_format
else:
return parse_standoff_format
def main(args):
doc_parser = get_doc_parser(args.format)
filelist = glob.glob("{}/*".format(args.inputdir)) if os.path.isdir(args.inputdir) else [args.inputdir]
for fp in filelist:
if not os.path.exists(args.outputdir):
os.mkdir(args.outputdir)
errors = doc_parser(fp, args.outputdir)
if errors:
print "Errors: {}".format(errors)
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default="input directory or file")
argparser.add_argument("-o", "--outputdir", type=str, default="outout directory")
argparser.add_argument("-f", "--format", type=str, default="pubtator")
argparser.add_argument("-p", "--prefix", type=str, default="fixes", help="prefix")
args = argparser.parse_args()
args.outputdir = args.outputdir + "/tmp/"
main(args)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/extract_text.py
|
"""
Requires
1) Convert standoff format into 1 sentence per line.
2) Apply rule-based sentence boundary detection/tokenization fixes
The main goal of this step is to ensure high-quality SBD
which often breaks in the presence of complex chemical names
"""
import re
import os
import glob
import codecs
import argparse
article_rgx = re.compile("~~_PMID_([0-9]+)_~~")
elements = "(Zr|Zn|Yb|Y|Xe|W|V|U|Tm|Tl|Ti|" + \
"Th|Te|Tc|Tb|Ta|Sr|Sn|Sm|Si|Sg|Se|Sc|Sb|S|Ru|Rn|Rh|Rf|Re|Rb|" + \
"Ra|Pu|Pt|Pr|Po|Pm|Pd|Pb|Pa|P|Os|O|Np|No|Ni|Ne|Nd|Nb|Na|N|Mt|" + \
"Mo|Mn|Mg|Md|Lu|Lr|Li|La|Kr|K|Ir|In|I|Hs|Ho|Hg|Hf|He|H|Ge|Gd|" + \
"Ga|Fr|Fm|Fe|F|Eu|Es|Er|Dy|Ds|Db|Cu|Cs|Cr|Co|Cm|Cl|Cf|Ce|Cd|" + \
"Ca|C|Br|Bk|Bi|Bh|Be|Ba|B|Au|At|As|Ar|Am|Al|Ag|Ac)"
# force hypenation for suffix modifiers of these types
# e.g., morphine-like drugs -> morphine - like drugs
chemical_modifiers = ['abusing', 'adducted', 'allergic', 'altered', 'anesthetized', 'antagonistic',
'associated', 'based', 'binding', 'containing', 'controlled', 'converting',
'deficient', 'degrading', 'dependent', 'depleted', 'depleting', 'devoid',
'dose', 'drug', 'ecstasy', 'elicited', 'eluting', 'evoked', 'exposed', 'free',
'gated', 'induced', 'inhibited', 'injected', 'kindled', 'like', 'mediated',
'negative', 'nephropathy', 'positive', 'pretreated', 'primed', 'rats',
'receptor', 'redox', 'related', 'resistance', 'resistant', 'sensitive',
'sparing', 'specific', 'stained', 'stimulated', 'supplemented', 'suppressible',
'therapy', 'treated', 'untreated', 'lowering', 'encoded', "mice", "fortified",
"Induced", "Injected", "precursors", "produced", "channel", "lesioned", "response",
"activated","exposure","loaded","synthase"]
# sentence substitution rules
sentence_repairs = {}
sentence_repairs[re.compile("i\.m \.$")] = "i.m."
sentence_repairs[re.compile("i\.v \.$")] = "i.v."
sentence_repairs[re.compile("a\.c \.$")] = "a.c."
sentence_repairs[re.compile("d\.c \.$")] = "d.c."
sentence_repairs[re.compile("s\.c \.$")] = "s.c."
sentence_repairs[re.compile("i\.p \.$")] = "i.p."
sentence_repairs[re.compile("i\.v\.c \.$")] = "i.v.c."
sentence_repairs[re.compile("i\.c\.v \.$")] = "i.c.v."
sentence_repairs[re.compile("i\.c\.m \.$")] = "i.c.m."
sentence_repairs[re.compile("t\.i\.d \.$")] = "t.i.d."
sentence_repairs[re.compile("b\.i\.d \.$")] = "b.i.d."
# these fixes cause more errors than they solve
#sentence_repairs[re.compile("kg \.$")] = "kg."
#sentence_repairs[re.compile("k\.g \.$")] = "k.g."
sentence_repairs[re.compile("mol \.$")] = "mol."
sentence_repairs[re.compile("^wt\.$")] = "wt."
sentence_repairs[re.compile("wts \.$")] = "wts."
sentence_repairs[re.compile("mol\.wt \.$")] = "mol.wt."
sentence_repairs[re.compile("mol\.wts \.$")] = "mol.wts."
sentence_repairs[re.compile("approx \.$")] = "approx."
sentence_repairs[re.compile("St \.$")] = "St."
# word concatenation rules
word_concat = {}
word_concat[re.compile(" i\.v \. ")] = " i.v. "
word_concat[re.compile(" i\.p \. ")] = " i.p. "
word_concat[re.compile(" s\.c \. ")] = " s.c. "
word_concat[re.compile(" p\.o \. ")] = " p.o. "
word_concat[re.compile(" i\.c\.v \. ")] = " i.c.v. "
word_concat[re.compile(" i\.c\.m \. ")] = " i.c.m. "
word_concat[re.compile("\( \-\- \)")] = "(--)"
word_concat[re.compile(" e\.g \. ")] = " e.g. "
word_concat[re.compile(" i\.e \. ")] = " i.e. "
word_concat[re.compile(" \+ \+")] = "++"
word_concat[re.compile(" t\.i\.d \. ")] = " t.i.d. "
word_concat[re.compile(" \+ \/ \- ")] = " +/- "
word_concat[re.compile("year\- old")] = "year-old"
word_concat[re.compile("\( \+\s*\/\s*\-\)")] = "(+/-)"
word_concat[re.compile("\+ \/ \-")] = "+/-"
word_concat[re.compile("Na \+ ")] = "Na+ "
# word expanstion rules
# TODO -- remove these when we move to a sequence model
word_expand = {}
word_expand[re.compile("[-]({})".format("|".join(chemical_modifiers)))] = lambda rgx,doc: re.sub(rgx, r" - \1", doc)
word_expand[re.compile("'s")] = lambda rgx,doc: re.sub(rgx, " 's", doc)
word_expand[re.compile("[-] and")] = lambda rgx,doc: re.sub(rgx, " - and", doc)
word_expand[re.compile("[-] or")] = lambda rgx,doc: re.sub(rgx, " - or", doc)
word_expand[re.compile("[-] ,")] = lambda rgx,doc: re.sub(rgx, " - ,", doc)
word_expand[re.compile(" non[-]")] = lambda rgx,doc: re.sub(rgx, " non - ", doc)
word_expand[re.compile(" \(([A-Za-z]+)\) ")] = lambda rgx,doc: re.sub(rgx, r" ( \1 )", doc)
word_expand[re.compile("([A-Za-z]{2,})- ")] = lambda rgx,doc: re.sub(rgx, r"\1 - ", doc)
# ion expansion rules
word_expand[re.compile("(\([0-9]*[+/-]+\)[-]*) ")] = lambda rgx,doc: re.sub(rgx, r" \1 ", doc)
word_expand[re.compile(" (\([0-9]*[+/-]+\)[-]*)([A-Za-z])")] = lambda rgx,doc: re.sub(rgx, r" \1 \2", doc)
word_expand[re.compile("^(\([0-9]*[+/-]+\)[-]*)([A-Za-z])")] = lambda rgx,doc: re.sub(rgx, r"\1 \2", doc)
word_expand[re.compile(elements + "([+-]) ")] = lambda rgx,doc: re.sub(rgx, r"\1 \2 ", doc)
def rgx_transform(l, patterns, lines):
"""
:param l:
:param patterns:
:param lines:
:return:
"""
for rgx in patterns:
m = rgx.search(l)
if m:
l = l.replace(m.group(), patterns[rgx])
if lines:
l += " " + lines.pop(0)
return l
return l
def repair(doc):
"""
:param doc:
:return:
"""
lines = []
for sent in doc:
lines.append(" ".join(sent))
# apply sentence repairs
debug = False
doc = []
while lines:
line = lines.pop(0).strip()
if not line:
continue
t_line = rgx_transform(line, sentence_repairs, lines)
while line != t_line:
line = t_line
t_line = rgx_transform(line, sentence_repairs, lines)
doc += [t_line]
# apply word contractions
for i in range(len(doc)):
for rgx in word_concat:
m = rgx.search(doc[i])
if m:
doc[i] = doc[i].replace(m.group(), word_concat[rgx])
#apply word expansion
for i in range(len(doc)):
for rgx in word_expand:
doc[i] = word_expand[rgx](rgx,doc[i])
# HACK -- ensure only 1 whitespace delimiter
doc[i] = " ".join(doc[i].split())
return doc
def main(args):
"""
:param args:
:return:
"""
filelist = glob.glob("{}/*".format(args.inputdir)) if os.path.isdir(args.inputdir) else [args.inputdir]
filelist = [fp for fp in filelist if not os.path.isdir(fp)]
outpath = "{}/fixes/".format(args.outputdir)
if not os.path.exists(outpath):
os.mkdir(outpath)
for fp in filelist:
# write files with new prefix to outputdir
outpath = ".".join(fp.split(".")[0:-1] + [args.prefix] + fp.split(".")[-1:])
outpath = "{}/{}/{}".format(args.outputdir, args.prefix, outpath.split("/")[-1])
with codecs.open(fp,"rU",'utf-8') as fp, codecs.open(outpath,"w",'utf-8') as op:
doc,sentence = [],[]
for line in fp:
if re.search(article_rgx, line) and not doc:
doc += [line.strip()]
elif re.search(article_rgx, line):
op.write(doc[0] + u'\n')
doc = repair(doc[1:])
for l in doc:
op.write(l + u'\n')
doc = [line.strip()]
elif line.strip() == "":
doc += [sentence]
sentence = []
else:
sentence += [line.strip()]
if doc:
op.write(doc[0] + u'\n')
doc = repair(doc[1:])
for l in doc:
op.write(l + u'\n')
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default=None, help="input directory or file")
argparser.add_argument("-o", "--outputdir", type=str, default=".", help="outout directory")
argparser.add_argument("-p", "--prefix", type=str, default="fixes", help="prefix")
args = argparser.parse_args()
main(args)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/tokenization_fixes.py
|
"""
"""
import re
import os
import sys
import glob
import codecs
import argparse
article_rgx = re.compile("~~_PMID_([0-9]+)_~~")
def load_line_corpus(filename, sentences=False, encoding="utf-8"):
corpus = {}
with codecs.open(filename, "rU", 'utf-8') as fp:
doc = []
for line in fp:
if re.search(article_rgx, line) and not doc:
doc += [line.strip()]
elif re.search(article_rgx, line):
pmid = article_rgx.search(doc[0]).group(1)
corpus[pmid] = " ".join(doc[1:]) if not sentences else doc[1:]
doc = [line.strip()]
else:
doc += [line.strip()]
if doc:
pmid = article_rgx.search(doc[0]).group(1)
corpus[pmid] = " ".join(doc[1:]) if not sentences else doc[1:]
return corpus
def align(a, b):
j = 0
offsets = []
for i in range(len(a)):
if a[i] == ' ':
continue
while a[i] != b[j] or b[j] == ' ':
j+= 1
offsets.append((i,j))
j += 1
return offsets
def main(args):
if not os.path.exists(args.outputdir):
os.mkdir(args.outputdir)
sources = glob.glob("{}/*".format(args.source)) if os.path.isdir(args.source) else [args.source]
sources = sorted([fp for fp in sources if not os.path.isdir(fp)])
transformed = glob.glob("{}/*".format(args.transformed)) if os.path.isdir(args.transformed) else [args.transformed]
transformed = sorted([fp for fp in transformed if not os.path.isdir(fp)])
if len(transformed) != len(sources):
print "Error - transformed != sources"
return
for fs,ft in zip(sources,transformed):
source = load_line_corpus(fs)
transform = load_line_corpus(ft, sentences=True)
outpath = ".".join(fs.split(".")[0:-1] + [args.prefix] + fs.split(".")[-1:])
outpath = "{}/{}".format(args.outputdir, outpath.split("/")[-1])
with codecs.open(outpath,"w", args.encoding) as fp:
print outpath
for pmid in source:
#
# 1: Create abs token offsets
#
a = source[pmid]
b = " ".join(transform[pmid])
offsets = align(a,b)
rev_offsets = dict([(j, i) for i, j in offsets])
#
# 2. Tokenize
#
splits = dict.fromkeys([i for i in range(len(b)) if b[i] == ' '])
t, tokens = [],[]
for i in range(len(b)):
if i in splits:
if t:
tokens.append(t)
t = []
else:
# sanity check
if b[i] != a[rev_offsets[i]]:
print "ERROR"
t.append((rev_offsets[i],i))
if t:
tokens.append(t)
abs_char_offset_map = [t[0] for t in tokens]
#
# 3. Create sentence breaks
#
sbd = [len(s.split()) for s in transform[pmid]]
tokens = zip(b.split(), abs_char_offset_map)
sentences = []
for i,l in enumerate(sbd):
words = tokens[0:l]
text = transform[pmid][i]
tokens = tokens[l:]
# sanity check -- do strings match?
if not " ".join(zip(*words)[0]) == text:
sys.stderr.write("[{}] Alignment ERROR\n".format(pmid))
words, abs_offsets = zip(*words)
abs_char_offsets = zip(*abs_offsets)[0]
row = [pmid, unicode(i), ",".join(map(unicode, abs_char_offsets)), " ".join(words)]
fp.write(u"\t".join(row) + u"\n")
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-s", "--source", type=str, default=None, help="original documents")
argparser.add_argument("-t", "--transformed", type=str, default=None, help="transformed documents")
argparser.add_argument("-o", "--outputdir", type=str, default=".", help="outout directory")
argparser.add_argument("-p", "--prefix", type=str, default="processed", help="prefix")
argparser.add_argument("-e", "--encoding", type=str, default="utf-8", help="encoding")
args = argparser.parse_args()
main(args)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/export_line_corpus.py
|
"""
Transform PubMed text generated by extract_pubmed.py into tokenized
standoff format. This leverages 2 external software tools that fix
tokenization errors when using CoreNLP and spaCy are
used on biomedical text, primarily:
1) Errors tokenizing chemical names
2) Correctly identifying sentence boundaries in the presence of complex chemical entities.
I. Sentence Boundary Detection (SBD)
GENIA Sentence Splitter v1.0.0
http://www.nactem.ac.uk/y-matsu/geniass/
They report F1=99.7 test set performance
II. Word Tokenization
ChemTok v1.0.1
https://sourceforge.net/projects/oscar3-chem/files/chemtok/
From OSCAR (Open Source Chemistry Analysis Routines) toolkit:
https://www.ncbi.nlm.nih.gov/pmc/articles/PMC3117806/
"ChemicalTagger: A tool for semantic text-mining in chemistry"
NOTE: This version has been subsumed by OSCAR4. It's unclear if
the latest version provides improved tokenization.
This script assumes these binaries are installed in bin/
"""
import os
import glob
import argparse
from subprocess import check_output
def main(args):
filelist = glob.glob("{}/*".format(args.inputdir)) if os.path.isdir(args.inputdir) else [args.inputdir]
filelist = [fp for fp in filelist if not os.path.isdir(fp)]
#
# I: Sentence Boundary Detection
#
outpath = "{}/sentences/".format(args.outputdir)
if not os.path.exists(outpath):
os.mkdir(outpath)
for fp in filelist:
cwd = os.getcwd()
os.chdir("bin/geniass")
op = ".".join(fp.split(".")[0:-1] + ["sentences"] + fp.split(".")[-1:])
op = "{}/sentences/{}".format(args.outputdir, op.split("/")[-1])
cmd = "./geniass {} {}".format(fp, op)
out = check_output(cmd.split())
os.chdir(cwd)
print "SBD", fp, "DONE"
#
# II. Tokenization
#
filelist = glob.glob("{}/sentences/*.sentences.txt".format(args.outputdir)) if os.path.isdir(args.outputdir) \
else [args.outputdir]
filelist = [fp for fp in filelist if not os.path.isdir(fp)]
outpath = "{}/tokens/".format(args.outputdir)
if not os.path.exists(outpath):
os.mkdir(outpath)
for fp in filelist:
cwd = os.getcwd()
os.chdir("bin/chemtok-1.0.1")
op = ".".join(fp.split(".")[0:-1] + ["tokens"] + fp.split(".")[-1:])
op = "{}/tokens/{}".format(args.outputdir, op.split("/")[-1])
cmd = "java -jar chemtok-1.0.1.jar < {} > {}".format(fp, op)
os.system(cmd)
os.chdir(cwd)
print "Tokenization", fp, "DONE"
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-i", "--inputdir", type=str, default=None, help="input directory or file")
argparser.add_argument("-o", "--outputdir", type=str, default=".", help="outout directory")
args = argparser.parse_args()
main(args)
|
snorkel-biocorpus-master
|
etl/pubmed/extract/tokenize.py
|
"""
Load PubTator snapshot for use with Snorkel v6.2
This loads tags into memory, so it works best when the input PubTator
file is split into smaller blocks.
"""
import os
import glob
import codecs
import argparse
def dump2delimited(tags, outfile, write_mode, sep=u"\t", encoding="utf-8"):
with codecs.open(outfile, write_mode, encoding) as fp:
for t in tags:
row = [t.document_id, t.abs_char_start, t.abs_char_end,
t.concept_type, t.concept_uid, t.source]
row = map(unicode, row)
fp.write(sep.join(row) + u"\n")
def main(args):
session = SnorkelSession()
# ---------------------------------------
# 1: Load documents
# ---------------------------------------
filelist = glob.glob("{}/*".format(args.input_file))
write_mode = "w"
pubtator_tags = PubTatorTagProcessor()
name2id = dict(session.query(Document.name, Document.id).all())
for fp in filelist:
# dump all tags to a tab-delimited text file
if args.dump:
tags = pubtator_tags.load_data(session, fp, name2id)
dump2delimited(tags, args.output_file, write_mode)
# change to append mode after first write
write_mode = "a"
# or commit tags directly to the database
else:
pubtator_tags.commit(session, fp)
print "Loaded tags from {}".format(os.path.basename(fp))
tags = session.query(SequenceTag.id).all()
print "Loaded {} PubTator tags".format(len(tags))
if __name__ == '__main__':
argparser = argparse.ArgumentParser()
argparser.add_argument("-d", "--dbname", type=str, default="postgresql:///biocorpus",
help="SNORKELDB enviorn variable")
argparser.add_argument("-i", "--input_file", type=str, default="data/bioconcepts2pubtator_offsets.sample",
help="PubTator snapshot")
argparser.add_argument("-o", "--output_file", type=str, default="tag_dump.tsv",
help="PubTator dump filename")
argparser.add_argument("-D", "--dump", action="store_true", help="Dump PubTator tags to TSV")
args = argparser.parse_args()
print args
os.environ['SNORKELDB'] = args.dbname
os.environ['TIKA_LOG_PATH'] = "."
from snorkel import SnorkelSession
from snorkel.models import SequenceTag, Document
from pubtator import PubTatorTagProcessor
main(args)
|
snorkel-biocorpus-master
|
OLD/extract_pubtator_tags.py
|
model-patching-master
|
augmentation/__init__.py
|
|
import tensorflow as tf
import wandb
import yaml
import subprocess
from augmentation.utilities.visualize import gallery
from augmentation.utilities.wandb import *
from augmentation.utilities.checkpoint import load_tf_optimizer_state
def rewrite_config_for_resumption(config):
config.prev_wandb_entity = config.wandb_entity
config.prev_wandb_project = config.wandb_project
config.prev_wandb_run_id = wandb.run.id
config.resume = True
yaml.dump(config.__dict__, open(config._config_path, 'w'))
# Push the change for this config
for cmd in [['git', 'add', config._config_path],
['git', 'commit', '-m', f'cfg_update_{wandb.run.id}'],
['git', 'pull'],
['git', 'push']]:
subprocess.run(cmd)
return config
def reload_run(model,
optimizer,
robust_loss_calc,
wandb_run_id,
wandb_project,
wandb_entity,
wandb_ckpt_path,
resume_epoch=-1,
continue_training=True):
# By default, we start at the beginning
start_epoch, start_step = 0, 0
# Load up the previous run
prev_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
step_extractor = particular_checkpoint_step_extractor(resume_epoch,
lambda fname: fname.split(".")[-2].split("_")[-1])
# If the previous run crashed, wandb_ckpt_path should be '': this is the typical use case
# but this should be changed in the future
_, loaded_epoch = load_most_recent_keras_model_weights(model, prev_run,
model_name='ckpt',
exclude='generator',
step_extractor=step_extractor,
wandb_ckpt_path=wandb_ckpt_path)
# If we're continuing training AND if we reloaded a model
# - load up the optimizer and DRO state
# - set the start epoch and start step
if continue_training and loaded_epoch is not None:
start_epoch = loaded_epoch
for line in prev_run.history():
if 'epochs' in line and line['epochs'] == start_epoch:
start_step = line['train_step/step']
break
# Reloading the optimizer states from that epoch
opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='optimizer',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
load_tf_optimizer_state(optimizer, opt_ckpt.name)
# Reloading the state of GDRO from that epoch
gdro_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=wandb_ckpt_path,
model_name='gdro',
step_extractor=particular_checkpoint_step_extractor(start_epoch))
robust_loss_calc._adv_prob_logits = tf.convert_to_tensor(np.load(gdro_ckpt.name))
print(f"Loaded epoch {loaded_epoch} from {wandb_run_id}. Starting from step {start_step} and epoch {start_epoch}.",
flush=True)
return start_epoch, start_step
def log_robust_train_step_to_wandb(group_aliases, group_batches, group_targets, group_predictions, group_losses,
robust_loss, consistency_loss, consistency_penalty_weight,
irm_losses, irm_penalty_weight,
gradients, model, optimizer,
robust_loss_calc, step, log_images=False, log_weights_and_grads=False):
# Loop over the data from each group
# for i, (batch, targets, predictions, loss) in enumerate(zip(group_batches, group_targets,
for (alias, batch, targets, predictions, loss, irm) in zip(group_aliases, group_batches, group_targets,
group_predictions, group_losses, irm_losses):
# Log data generated in this train step
wandb.log({f'train_step/{alias}/targets': targets.numpy(),
f'train_step/{alias}/predictions': wandb.Histogram(predictions.numpy()),
f'train_step/{alias}/argmax_predictions': tf.argmax(predictions, axis=-1).numpy(),
f'train_step/{alias}/loss': loss.numpy(),
f'train_step/{alias}/irm': irm.numpy()},
step=step)
# Optionally, log the minibatch of images
if log_images:
wandb.log({f'train_step/{alias}/images': wandb.Image(gallery(batch.numpy()))}, step=step)
# Log all the gradients and weights: every 50 steps
if log_weights_and_grads:
wandb.log({f'gradients/{v.name}': g.numpy() for v, g in zip(model.trainable_variables, gradients)}, step=step)
wandb.log({f'weights/{v.name}': v.numpy() for v in model.trainable_variables}, step=step)
for prob, alias in zip(tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy().reshape(-1),
robust_loss_calc._aliases):
wandb.log({f'train_step/gdro_adv_prob.{alias}': prob}, step=step)
wandb.log({'train_step/irm_penalty_weight': irm_penalty_weight,
'train_step/consistency_penalty_weight': consistency_penalty_weight,
# 'train_step/gdro_adv_probs': tf.nn.softmax(robust_loss_calc._adv_prob_logits, axis=-1).numpy(),
'train_step/robust_loss': robust_loss.numpy(),
'train_step/consistency_loss': consistency_loss.numpy(),
'train_step/global_gradient_norm': tf.linalg.global_norm(gradients).numpy(),
'train_step/learning_rate': optimizer._decayed_lr(tf.float32).numpy(),
'train_step/step': step}, step=step)
def consistency_penalty(predictions_orig, predictions_1, predictions_2, consistency_type, scale=1.0):
# CAMEL consistency: JS-Divergence of augmentations, plus KL between original and average augmentation
if consistency_type == 'camel':
avg_predictions = (predictions_1 + predictions_2) / 2.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) * 0.5 +
tf.keras.losses.KLD(predictions_1, avg_predictions) * 0.25 +
tf.keras.losses.KLD(predictions_2, avg_predictions) * 0.25)) * scale
# JS-Divergence between original and both augmentations (as in AugMix)
elif consistency_type == 'triplet-js':
avg_predictions = (predictions_orig + predictions_1 + predictions_2) / 3.0
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, avg_predictions) +
tf.keras.losses.KLD(predictions_1, avg_predictions) +
tf.keras.losses.KLD(predictions_2, avg_predictions)) / 3.0) * scale
# KL divergence between original and each augmentation
elif consistency_type == 'kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_orig, predictions_1) +
tf.keras.losses.KLD(predictions_orig, predictions_2)) * scale * 0.5)
elif consistency_type == 'reverse-kl':
return tf.reduce_mean((tf.keras.losses.KLD(predictions_1, predictions_orig) +
tf.keras.losses.KLD(predictions_2, predictions_orig)) * scale * 0.5)
elif consistency_type == 'none':
return tf.convert_to_tensor(0.)
else:
assert False, f'consistency_type {consistency_type} not supported'
def irm_penalty_explicit(targets, pred_logits, penalty_weight):
""" Computes the IRM penalty grad_{w} |_{w=1.0} crossent(targets, w*logits) explicitly """
if penalty_weight == 0.:
return tf.convert_to_tensor(0.)
xent = tf.keras.losses.sparse_categorical_crossentropy(targets, pred_logits, from_logits=True)
sparse_logit = xent + tf.reduce_logsumexp(pred_logits,
axis=-1) # equivalent to grabbing the logit indexed by target
grad = sparse_logit - tf.reduce_sum(pred_logits * tf.nn.softmax(pred_logits, axis=-1), axis=-1)
return tf.reduce_sum(grad ** 2) * penalty_weight
def irm_penalty_gradient(targets, pred_logits, penalty_weight, tape):
""" Computes IRM penalty as formulated in the paper
Currently does not work: tf does not support second order gradients of cross entropy
"""
if penalty_weight == 0.:
return 0.
# Taken from https://github.com/facebookresearch/InvariantRiskMinimization/blob/6aad47e689913b9bdad05880833530a5edac389e/code/colored_mnist/main.py#L107
scale = tf.convert_to_tensor(1.)
tape.watch(scale)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)(targets, pred_logits * scale)
grad = tape.gradient(loss, scale)
return tf.reduce_sum(grad ** 2) * penalty_weight
def consistency_penalty_scheduler(step, n_anneal_steps, base_penalty_weight):
"""
Schedule the consistency penalty.
"""
if base_penalty_weight == 0:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
return 0.0
def irm_penalty_scheduler(step, n_anneal_steps=100, base_penalty_weight=10000.):
"""
Schedule the IRM penalty weight using a step function as done by
https://github.com/facebookresearch/InvariantRiskMinimization
If the penalty weight is 0. (IRM disabled), just return 0.
"""
if base_penalty_weight == 0.:
return 0.
if step >= n_anneal_steps:
return base_penalty_weight
# return 1.0
return 0.0 # train with no irm at first
def irm_loss_rescale(total_loss, irm_penalty_weight):
"""
Rescale the total loss by the IRM penalty weight as done by
https://github.com/facebookresearch/InvariantRiskMinimization
"""
if irm_penalty_weight > 1.0:
return total_loss / irm_penalty_weight
return total_loss
class GDROLoss:
def __init__(self, group_aliases, group_counts, superclass_ids, adj_coef, step_size):
"""
group_counts: list of integer sizes of the groups
adj_coef: scalar coefficient of the generalization gap penalty
step_size: robust learning rate for the "mixture of expert" probabilities
"""
assert len(group_aliases) == len(group_counts) == len(superclass_ids)
group_counts = tf.cast(tf.stack(group_counts), tf.float32)
print(f"GDROLoss: Group counts {group_counts}")
self._adj = adj_coef * 1. / tf.math.sqrt(group_counts)
print("adj_coef", adj_coef)
print("total adjustment", self._adj)
self._step_size = step_size
self._adv_probs = tf.ones(len(group_counts)) / len(group_counts)
# _adv_prob_logits must exist, being logged by wandb now
self._adv_prob_logits = tf.zeros_like(group_counts)
self._aliases = group_aliases
# For now, assume superclass_ids are 0, 1, -1
superclass_idxs_ = {}
for i in set(superclass_ids):
superclass_idxs_[i] = [idx for idx, j in enumerate(superclass_ids) if j == i]
superclass_freqs_ = {i: len(idxs) / len(group_aliases) for i, idxs in superclass_idxs_.items()}
self.superclass_idxs = superclass_idxs_.values()
self.superclass_freqs = superclass_freqs_.values()
print("GDROLoss: superclass indices, freqs", self.superclass_idxs, self.superclass_freqs)
def compute_loss(self, losses):
""" losses: list of losses (scalars) """
if len(losses) == 0: return tf.convert_to_tensor(0.0)
losses = tf.stack(losses, axis=-1) + self._adj
self._adv_prob_logits += self._step_size * losses
loss = tf.convert_to_tensor(0.)
for idxs, freq in zip(self.superclass_idxs, self.superclass_freqs):
adv_probs = tf.nn.softmax(tf.gather(self._adv_prob_logits, idxs), axis=-1)
loss = loss + tf.reduce_sum(adv_probs * tf.gather(losses, idxs), axis=-1) * freq
return loss
|
model-patching-master
|
augmentation/methods/robust/utils.py
|
import argparse
import os
import yaml
import subprocess
import glob
import functools
from augmentation.augment.utils import create_multiple_train_eval_augmentation_pipelines
from augmentation.augment.static import create_multiple_train_eval_static_augmentation_pipelines
from augmentation.datasets.utils import *
from augmentation.methods.robust.utils import *
from augmentation.models.models import *
from augmentation.utilities.config import recursively_create_config_simple_namespace
from augmentation.utilities.eval import evaluate_model
from augmentation.utilities.losses import create_loss_fn, decay_weights
from augmentation.utilities.metrics import create_metrics, update_metrics, reset_metrics, log_metrics_to_wandb
from augmentation.utilities.optim import build_optimizer, build_lr_scheduler
from augmentation.utilities.utils import basic_setup
from augmentation.utilities.checkpoint import *
from augmentation.utilities.wandb import *
import tempfile
def train_robust_model(config):
# Do basic setup
# assert len(config.logical_gpus) > 1, 'Must specify at least 2 logical GPUs for training robust models.'
basic_setup_info = basic_setup(seed=config.seed, logical_gpu_memory_limits=config.logical_gpus)
logical_gpus, devices = basic_setup_info.logical_gpus, basic_setup_info.devices
# Calculate how many folds we're looping over
n_folds = 1 if not config.cross_validation else 1. // config.validation_frac
# Training loop
for fold in range(n_folds):
# Set up weights and biases
while True:
try:
if not config.resume:
# Start a new Weights and Biases run
wandb.init(entity=config.wandb_entity,
project=config.wandb_project,
group=config.wandb_group,
job_type=config.wandb_job_type,
reinit=True,
config=config,
dir=tempfile.mkdtemp(dir=os.getcwd()))
else:
# Resume a previous Weights and Biases run
wandb.init(entity=config.prev_wandb_entity,
project=config.prev_wandb_project,
id=config.prev_wandb_run_id,
reinit=True,
resume=True)
os.makedirs(f'{wandb.run.dir}/{config.checkpoint_path}', exist_ok=True)
break
except:
continue
# Setup the augmentation pipeline we'll be using: if only a single augmentation was passed, it will be applied
# to all the datasets
with devices[0]:
train_augmentations_pipelines, eval_augmentations_pipelines = \
create_multiple_train_eval_augmentation_pipelines(
train_augmentation_pipelines=config.train_augmentation_pipelines,
train_augmentation_pipelines_args=config.train_augmentation_pipelines_args,
eval_augmentation_pipelines=config.eval_augmentation_pipelines,
eval_augmentation_pipelines_args=config.eval_augmentation_pipelines_args,
broadcast_train_to=len(config.train_datasets),
broadcast_eval_to=len(config.eval_datasets))
train_gpu_augmentations_pipelines, eval_gpu_augmentations_pipelines = \
create_multiple_train_eval_augmentation_pipelines(
train_augmentation_pipelines=config.train_gpu_augmentation_pipelines,
train_augmentation_pipelines_args=config.train_gpu_augmentation_pipelines_args,
eval_augmentation_pipelines=config.eval_gpu_augmentation_pipelines,
eval_augmentation_pipelines_args=config.eval_gpu_augmentation_pipelines_args,
broadcast_train_to=len(config.train_datasets),
broadcast_eval_to=len(config.eval_datasets))
train_static_augmentations_pipelines, eval_static_augmentations_pipelines = \
create_multiple_train_eval_static_augmentation_pipelines(
train_augmentation_pipelines=config.train_static_augmentation_pipelines,
train_augmentation_pipelines_args=config.train_static_augmentation_pipelines_args,
eval_augmentation_pipelines=config.eval_static_augmentation_pipelines,
eval_augmentation_pipelines_args=config.eval_static_augmentation_pipelines_args,
broadcast_train_to=len(config.train_datasets),
broadcast_eval_to=len(config.eval_datasets))
# Get the dataset generators
(train_generators, val_generators, test_generators), \
(input_shape, n_classes, classes, n_training_examples, group_training_examples), \
(train_dataset_aliases, val_dataset_aliases, test_dataset_aliases) = \
fetch_list_of_data_generators_for_trainer(train_dataset_names=config.train_datasets,
train_dataset_versions=config.train_dataset_versions,
train_datadirs=config.train_datadirs,
train_dataset_aliases=config.train_dataset_aliases,
eval_dataset_names=config.eval_datasets,
eval_dataset_versions=config.eval_dataset_versions,
eval_datadirs=config.eval_datadirs,
eval_dataset_aliases=config.eval_dataset_aliases,
train_augmentations=train_augmentations_pipelines,
train_gpu_augmentations=train_gpu_augmentations_pipelines,
train_static_augmentations=train_static_augmentations_pipelines,
eval_augmentations=eval_augmentations_pipelines,
eval_gpu_augmentations=eval_gpu_augmentations_pipelines,
eval_static_augmentations=eval_static_augmentations_pipelines,
cache_dir=os.path.join(config.cache_dir, wandb.run.id),
validation_frac=config.validation_frac,
batch_size=config.batch_size,
dataflow=config.dataflow,
repeat=True,
shuffle_before_repeat=config.shuffle_before_repeat,
max_shuffle_buffer=config.max_shuffle_buffer,
train_shuffle_seeds=config.train_shuffle_seeds,
cross_validation=config.cross_validation,
fold=fold)
with devices[1]:
# Create the model
model = create_keras_classification_model(config.model_source,
config.architecture,
input_shape,
n_classes,
config.pretrained)
# Set things to float32
tf.keras.backend.set_floatx(config.dtype)
# Create a scheduler for the learning rate
steps_per_epoch = n_training_examples // config.baseline_batch_size
print(f"Number of total training examples: {n_training_examples}\nSteps per epoch: {steps_per_epoch}")
# Recalculate batch size per group
learning_rate_fn = build_lr_scheduler(scheduler=config.lr_scheduler,
steps_per_epoch=steps_per_epoch,
n_epochs=config.n_epochs,
lr_start=config.lr_start,
lr_decay_steps=config.lr_decay_steps,
lr_end=config.lr_end)
# Set up the optimizer
optimizer = build_optimizer(config.optimizer, learning_rate_fn, config.momentum)
# Compile the model
compile_keras_models([model], [optimizer])
# Set up the loss function and append it to the metrics
loss_fn = create_loss_fn(config.loss_name)
# Set up more specific loss info: the consistency training info, and GDRO loss
consistency_triplets, training_groups_mask, robust_loss_calc = get_loss_info(train_dataset_aliases,
config.augmentation_training,
group_training_examples,
config.gdro_adj_coef,
config.gdro_lr,
config.gdro_mixed,
)
# Set up the metrics being tracked
aggregate_metrics = create_metrics(config.metric_names, n_classes, output_labels=classes)
metrics_by_group = [create_metrics(config.metric_names, n_classes, output_labels=classes)
for _ in range(len(train_generators))]
eval_metrics_by_group = [create_metrics(config.metric_names, n_classes, output_labels=classes)
for _ in range(len(test_generators))]
# By default, assume we're starting training from scratch
start_epoch, start_step = 0, 0
# Resume a run from Weights and Biases
# This could be for continuing training, or reloading the model for testing its invariance
if config.resume:
start_epoch, start_step = reload_run(model=model,
optimizer=optimizer,
robust_loss_calc=robust_loss_calc,
wandb_run_id=config.prev_wandb_run_id,
wandb_project=config.prev_wandb_project,
wandb_entity=config.prev_wandb_entity,
wandb_ckpt_path=config.prev_ckpt_path,
resume_epoch=config.prev_ckpt_epoch,
continue_training=config.resume # only if we're continuing training
)
with devices[0]:
# Train the end model
_train_robust_model(train_generators=train_generators,
val_generators=val_generators,
test_generators=test_generators,
train_dataset_aliases=train_dataset_aliases,
val_dataset_aliases=val_dataset_aliases,
test_dataset_aliases=test_dataset_aliases,
model=model,
optimizer=optimizer,
loss_fn=loss_fn,
aggregate_metrics=aggregate_metrics,
metrics_by_group=metrics_by_group,
eval_metrics_by_group=eval_metrics_by_group,
n_epochs=config.n_epochs,
steps_per_epoch=steps_per_epoch,
max_global_grad_norm=config.max_global_grad_norm,
weight_decay_rate=config.weight_decay_rate,
irm_anneal_steps=config.irm_anneal_steps,
irm_penalty_weight=config.irm_penalty_weight,
robust_loss_calc=robust_loss_calc,
training_groups_mask=training_groups_mask,
consistency_triplets=consistency_triplets,
consistency_type=config.consistency_type,
consistency_penalty_weight=config.consistency_penalty_weight,
checkpoint_path=config.checkpoint_path,
checkpoint_freq=config.checkpoint_freq,
devices=devices,
start_step=start_step,
start_epoch=start_epoch,
dtype=config.dtype)
# Clean up by removing all the data caches
for cache in glob.glob(os.path.join(config.cache_dir, wandb.run.id) + '*'):
os.remove(cache)
def get_loss_info(train_dataset_aliases, augmentation_training, group_training_examples, gdro_adj_coef, gdro_lr,
gdro_mixed):
consistency_triplets = []
# TODO not general. We're grabbing subsets based on an assumed alias convention
af = {alias[:-5]: i for i, alias in enumerate(train_dataset_aliases) if alias[-5:] == '(A-F)'}
ag = {alias[:-5]: i for i, alias in enumerate(train_dataset_aliases) if alias[-5:] == '(A-G)'}
a = {alias: i for i, alias in enumerate(train_dataset_aliases) if alias in af}
for alias in af:
if alias not in a:
a[alias] = -1
assert a.keys() == af.keys() == ag.keys()
consistency_triplets = list(zip(a.values(), af.values(), ag.values()))
# Create mask over datasets indicating which ones are to be used for the main training loss
training_groups_mask = [True] * len(train_dataset_aliases)
if augmentation_training == 'original':
for orig_idx, f_idx, g_idx in consistency_triplets:
training_groups_mask[f_idx] = False
training_groups_mask[g_idx] = False
elif augmentation_training == 'augmented':
for orig_idx, f_idx, g_idx in consistency_triplets:
training_groups_mask[orig_idx] = False
elif augmentation_training == 'both':
pass
else:
assert False, f"augmentation_training value {augmentation_training} should be 'original', 'augmented', or 'both'"
assert sum(
training_groups_mask) > 0, \
"No training datasets are used for main loss calculation! Check config.augmentation_training and augmentation flags" # TODO it's conceivable that the user may want this, but have to check that some other loss is used (e.g. consistency)
print("Consistency triplets: ", consistency_triplets)
print("Dataset training mask: ", training_groups_mask)
print(flush=True)
# Set up the GDRO loss calculator
group_training_examples_used = [n for n, use in zip(group_training_examples, training_groups_mask) if use]
group_training_aliases_used = [a for a, use in zip(train_dataset_aliases, training_groups_mask) if use]
if gdro_mixed:
def extract(alias):
if '(Y=0)' in alias:
return 0
elif '(Y=1)' in alias:
return 1
else:
return -1
superclass_ids = [extract(alias) for alias in group_training_aliases_used]
else:
superclass_ids = [0] * len(group_training_aliases_used)
robust_loss_calc = GDROLoss(group_training_aliases_used, group_training_examples_used, superclass_ids,
gdro_adj_coef, gdro_lr)
return consistency_triplets, training_groups_mask, robust_loss_calc
# IRM Train Loop
def _train_robust_model(train_generators,
val_generators,
test_generators,
train_dataset_aliases,
val_dataset_aliases,
test_dataset_aliases,
model,
optimizer,
loss_fn,
aggregate_metrics,
metrics_by_group,
eval_metrics_by_group,
# batch_size,
n_epochs,
steps_per_epoch,
max_global_grad_norm,
weight_decay_rate,
irm_anneal_steps,
irm_penalty_weight,
robust_loss_calc,
# augmentation_training,
training_groups_mask,
consistency_triplets,
consistency_type,
consistency_penalty_weight,
checkpoint_path,
checkpoint_freq,
devices,
start_step=0, start_epoch=0, dtype=tf.float32):
def eval_and_log(split_name, model, generators, dataset_aliases, aggregate_metrics, eval_metrics_by_group, step):
# Evaluate the model on each evaluation set and log to weights and biases
reset_metrics(aggregate_metrics)
for i, generator in enumerate(generators):
log_metrics_to_wandb(evaluate_model(model, generator, eval_metrics_by_group[i], aggregate_metrics),
step=step, prefix=f'{split_name}_metrics/{dataset_aliases[i]}/')
log_metrics_to_wandb(aggregate_metrics, step=step, prefix=f'{split_name}_metrics/aggregate/')
# Keep track of how many gradient steps we've taken
# For the robust train loop, we track steps instead of epochs
# This is because each group in the dataset is processed at different rates
step = start_step
# Convert generators to iterators
# NOTE group DRO uses sampling with replacement to ensure each group has enough data
# this can be emulated by ensuring that the dataset modifiers have the form
# .repeat(-1).shuffle(N).batch(bs) for a value of N large relative to the dataset size
train_iterators = list(map(iter, train_generators))
# Function to create floatX inputs to the model
make_floatx_tensor = functools.partial(tf.convert_to_tensor, dtype=dtype)
if step == 0:
with devices[1]:
eval_and_log('validation', model, val_generators, val_dataset_aliases, aggregate_metrics,
eval_metrics_by_group, step)
# Run over the epochs
for epoch in range(start_epoch, n_epochs):
# Reset the metrics for each epoch
reset_metrics(aggregate_metrics)
for metrics in metrics_by_group:
reset_metrics(metrics)
for _ in range(steps_per_epoch):
# Get batches of data from each group's training iterator
group_batches, group_targets = tuple(zip(*[tuple(map(make_floatx_tensor, next(it)))
for it in train_iterators]))
# Compute the IRM penalty weight
step_irm_penalty_weight = irm_penalty_scheduler(step, irm_anneal_steps, irm_penalty_weight)
step_consistency_penalty_weight = consistency_penalty_scheduler(step, 0,
consistency_penalty_weight)
with devices[1]:
# Train using these group's batches of data
robust_loss, consistency_loss, irm_losses, group_losses, group_predictions, gradients = \
train_step_robust(model=model,
loss_fn=loss_fn,
group_batches=group_batches,
group_targets=group_targets,
optimizer=optimizer,
max_global_grad_norm=max_global_grad_norm,
weight_decay_rate=weight_decay_rate,
irm_penalty_weight=step_irm_penalty_weight,
robust_loss_calc=robust_loss_calc,
training_groups_mask=training_groups_mask,
consistency_type=consistency_type,
consistency_triplets=consistency_triplets,
consistency_penalty_weight=step_consistency_penalty_weight,
)
# Update the metrics
for targets, predictions, metrics in zip(group_targets, group_predictions, metrics_by_group):
update_metrics(aggregate_metrics, targets, predictions)
update_metrics(metrics, targets, predictions)
# Update the step counter
step += 1
# Log to weights and biases
log_robust_train_step_to_wandb(train_dataset_aliases, group_batches, group_targets, group_predictions,
group_losses, robust_loss, consistency_loss, step_consistency_penalty_weight,
irm_losses, step_irm_penalty_weight, gradients, model, optimizer,
robust_loss_calc, step,
log_images=(epoch == 0 and step < 10),
log_weights_and_grads=False)
del robust_loss, consistency_loss, group_losses, group_predictions, gradients
# Log the training metrics to weights and biases
log_metrics_to_wandb(aggregate_metrics, step, prefix='training_metrics/aggregate/')
for i, metrics in enumerate(metrics_by_group):
log_metrics_to_wandb(metrics, step, prefix=f'training_metrics/{train_dataset_aliases[i]}/')
with devices[1]:
# Evaluate the model on each validation set and log to weights and biases
eval_and_log('validation', model, val_generators, val_dataset_aliases, aggregate_metrics,
eval_metrics_by_group, step)
eval_and_log('test', model, test_generators, test_dataset_aliases, aggregate_metrics,
eval_metrics_by_group, step)
# End of epoch, log to weights and biases
wandb.log({'epochs': epoch + 1}, step=step)
# Store the model every few epochs
if (epoch + 1) % checkpoint_freq == 0:
model.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}.h5')
save_tf_optimizer_state(optimizer, f'{wandb.run.dir}/{checkpoint_path}/optimizer_{epoch + 1}.pkl')
np.save(f'{wandb.run.dir}/{checkpoint_path}/gdro_{epoch + 1}.npy',
robust_loss_calc._adv_prob_logits.numpy())
# Sync the model to the cloud
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/optimizer_{epoch + 1}.pkl')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/gdro_{epoch + 1}.npy')
with devices[1]: # TODO: add flags to make the with device optional
# Evaluate the model on each test set and log to weights and biases
reset_metrics(aggregate_metrics)
for i, test_generator in enumerate(test_generators):
log_metrics_to_wandb(evaluate_model(model, test_generator, eval_metrics_by_group[i], aggregate_metrics),
step=step, prefix=f'test_metrics/{test_dataset_aliases[i]}/')
log_metrics_to_wandb(aggregate_metrics, step=step, prefix=f'test_metrics/aggregate/')
# Commits everything
wandb.log({})
# Robust Train Step
def train_step_robust(model,
loss_fn,
group_batches,
group_targets,
optimizer,
max_global_grad_norm,
weight_decay_rate,
irm_penalty_weight,
robust_loss_calc,
training_groups_mask, # indices of groups for which to take the loss of
consistency_type,
consistency_triplets,
consistency_penalty_weight,
):
def _train_step_robust(_group_batches, _group_targets):
group_losses, group_predictions = [], []
irm_losses = []
gdro_losses = []
loss_idxs = list(range(len(training_groups_mask)))
loss_batches = [_group_batches[i] for i in loss_idxs]
# Compute the batch sizes of each group's batch
loss_batch_sizes = [e.shape[0] for e in loss_batches]
# Concatenate into one tensor
concat_batch = tf.concat(loss_batches, axis=0)
# Open up the gradient tape
with tf.GradientTape() as tape:
# Pass through the model
loss_predictions = model(concat_batch, training=True)
# Split up the predictions
loss_group_predictions = tf.split(loss_predictions, loss_batch_sizes, axis=0)
# Scatter loss back to one list
group_predictions = [None] * len(group_batches)
for i, idx in enumerate(loss_idxs):
group_predictions[idx] = loss_group_predictions[i]
for (_use_for_training, targets, predictions) in zip(training_groups_mask, _group_targets,
group_predictions):
if _use_for_training:
# Compute the loss
loss = loss_fn(targets, predictions)
# Compute the IRM penalty
irm_penalty = irm_penalty_explicit(targets, tf.math.log(predictions + 1e-6), irm_penalty_weight)
loss = loss + irm_penalty
irm_losses.append(irm_penalty)
# Rescale the loss
# loss = irm_loss_rescale(loss, irm_penalty_weight)
gdro_losses.append(loss)
group_losses.append(loss)
else:
# Trick to ensure that all losses can be logged
group_losses.append(tf.convert_to_tensor(0.))
irm_losses.append(tf.convert_to_tensor(0.))
# Compute the robust loss
robust_loss = robust_loss_calc.compute_loss(gdro_losses)
# Compute the l2 regularizer
robust_loss = robust_loss + decay_weights(model, weight_decay_rate)
# Compute the consistency loss
consistency_loss = tf.convert_to_tensor(0.)
if consistency_triplets is not None:
for orig, f, g in consistency_triplets:
consistency_loss += consistency_penalty(group_predictions[orig],
group_predictions[f],
group_predictions[g],
consistency_type,
consistency_penalty_weight)
robust_loss = robust_loss + consistency_loss
# Compute gradients
gradients = tape.gradient(robust_loss, model.trainable_weights)
# Clip the gradients
if max_global_grad_norm > 0.:
gradients, _ = tf.clip_by_global_norm(gradients, max_global_grad_norm)
# Apply the gradients to the model
optimizer.apply_gradients(zip(gradients, model.trainable_weights))
# Delete the tape
del tape
# Return the group losses, group predictions and gradients
return robust_loss, consistency_loss, irm_losses, group_losses, group_predictions, gradients
return _train_step_robust(group_batches, group_targets)
def setup_and_train_robust_model(args):
# Load up the config
config = recursively_create_config_simple_namespace(args.config, args.template_config)
# Train the end model
train_robust_model(config)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True)
parser.add_argument('--template_config', type=str, default='augmentation/configs/template_robust_training.yaml')
# Set up the configuration and train the end model
setup_and_train_robust_model(parser.parse_args())
|
model-patching-master
|
augmentation/methods/robust/train.py
|
import tensorflow as tf
import numpy as np
from tensorflow_examples.models.pix2pix.pix2pix import upsample, downsample, InstanceNormalization
def unet_generator(output_channels, input_shape=(256, 256, 3), norm_type='batchnorm', output_init=0.02,
residual_output=False):
"""Modified u-net generator model (https://arxiv.org/abs/1611.07004).
Args:
output_channels: Output channels
norm_type: Type of normalization. Either 'batchnorm' or 'instancenorm'.
Returns:
Generator model
"""
assert input_shape[0] <= 256 and input_shape[1] <= 256, 'Input shape must be less than (256, 256, 3).'
assert input_shape[0] == input_shape[1], 'Modify padding to handle this.'
ceil_pow2 = int(2 ** np.ceil(np.log2(input_shape[0])))
if ceil_pow2 == 256:
down_stack = [
downsample(64, 4, norm_type, apply_norm=False), # (bs, 128, 128, 64)
downsample(128, 4, norm_type), # (bs, 64, 64, 128)
downsample(256, 4, norm_type), # (bs, 32, 32, 256)
downsample(512, 4, norm_type), # (bs, 16, 16, 512)
downsample(512, 4, norm_type), # (bs, 8, 8, 512)
downsample(512, 4, norm_type), # (bs, 4, 4, 512)
downsample(512, 4, norm_type), # (bs, 2, 2, 512)
downsample(512, 4, norm_type), # (bs, 1, 1, 512)
]
up_stack = [
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 2, 2, 1024)
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4, norm_type), # (bs, 16, 16, 1024)
upsample(256, 4, norm_type), # (bs, 32, 32, 512)
upsample(128, 4, norm_type), # (bs, 64, 64, 256)
upsample(64, 4, norm_type), # (bs, 128, 128, 128)
]
elif ceil_pow2 == 128:
down_stack = [
downsample(64, 4, norm_type, apply_norm=False), # (bs, 128, 128, 64)
downsample(128, 4, norm_type), # (bs, 64, 64, 128)
downsample(256, 4, norm_type), # (bs, 32, 32, 256)
downsample(512, 4, norm_type), # (bs, 16, 16, 512)
downsample(512, 4, norm_type), # (bs, 8, 8, 512)
downsample(512, 4, norm_type), # (bs, 4, 4, 512)
downsample(512, 4, norm_type), # (bs, 2, 2, 512)
]
up_stack = [
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 4, 4, 1024)
upsample(512, 4, norm_type, apply_dropout=True), # (bs, 8, 8, 1024)
upsample(512, 4, norm_type), # (bs, 16, 16, 1024)
upsample(256, 4, norm_type), # (bs, 32, 32, 512)
upsample(128, 4, norm_type), # (bs, 64, 64, 256)
upsample(64, 4, norm_type), # (bs, 128, 128, 128)
]
else:
raise NotImplementedError
initializer = tf.random_normal_initializer(0., output_init)
last = tf.keras.layers.Conv2DTranspose(
output_channels, 4, strides=2,
padding='same', kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 3)
concat = tf.keras.layers.Concatenate()
inputs = tf.keras.layers.Input(shape=input_shape)
x = inputs
padding = int((ceil_pow2 - input_shape[0]) // 2)
if padding > 0:
x = tf.keras.layers.ZeroPadding2D(padding=padding)(x)
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = concat([x, skip])
x = last(x)
if padding > 0:
x = tf.keras.layers.Cropping2D(cropping=padding)(x)
outputs = x
if residual_output:
# inputs is in [-1, 1], so offset should be in [-2, 2] to flip a pixel completely
outputs = 2 * x + inputs
return tf.keras.Model(inputs=inputs, outputs=outputs)
def mnist_discriminator(norm_type='batchnorm', target=True):
"""PatchGan discriminator model (https://arxiv.org/abs/1611.07004).
Args:
norm_type: Type of normalization. Either 'batchnorm' or 'instancenorm'.
target: Bool, indicating whether target image is an input or not.
Returns:
Discriminator model
"""
initializer = tf.random_normal_initializer(0., 0.02)
inp = tf.keras.layers.Input(shape=[28, 28, 1], name='input_image')
x = inp
if target:
tar = tf.keras.layers.Input(shape=[28, 28, 1], name='target_image')
x = tf.keras.layers.concatenate([inp, tar]) # (bs, 256, 256, channels*2)
down1 = downsample(64, 4, norm_type, False)(x) # (bs, 128, 128, 64)
down2 = downsample(128, 4, norm_type)(down1) # (bs, 64, 64, 128)
down3 = downsample(256, 4, norm_type)(down2) # (bs, 32, 32, 256)
zero_pad1 = tf.keras.layers.ZeroPadding2D()(down3) # (bs, 34, 34, 256)
conv = tf.keras.layers.Conv2D(
512, 4, strides=1, kernel_initializer=initializer,
use_bias=False)(zero_pad1) # (bs, 31, 31, 512)
if norm_type.lower() == 'batchnorm':
norm1 = tf.keras.layers.BatchNormalization()(conv)
elif norm_type.lower() == 'instancenorm':
norm1 = InstanceNormalization()(conv)
leaky_relu = tf.keras.layers.LeakyReLU()(norm1)
zero_pad2 = tf.keras.layers.ZeroPadding2D()(leaky_relu) # (bs, 33, 33, 512)
last = tf.keras.layers.Conv2D(
1, 4, strides=1,
kernel_initializer=initializer)(zero_pad2) # (bs, 30, 30, 1)
if target:
return tf.keras.Model(inputs=[inp, tar], outputs=last)
else:
return tf.keras.Model(inputs=inp, outputs=last)
def mnist_unet_generator(norm_type='batchnorm'):
"""Modified u-net generator model (https://arxiv.org/abs/1611.07004).
Args:
output_channels: Output channels
norm_type: Type of normalization. Either 'batchnorm' or 'instancenorm'.
Returns:
Generator model
"""
down_stack = [
downsample(32, 4, norm_type, apply_norm=False), # (bs, 128, 128, 64)
downsample(64, 4, norm_type), # (bs, 64, 64, 128)
downsample(128, 4, norm_type), # (bs, 32, 32, 256)
downsample(256, 4, norm_type), # (bs, 16, 16, 512)
downsample(512, 4, norm_type), # (bs, 16, 16, 512)
]
up_stack = [
upsample(256, 4, norm_type), # (bs, 16, 16, 1024)
upsample(128, 4, norm_type), # (bs, 16, 16, 1024)
upsample(64, 4, norm_type), # (bs, 32, 32, 512)
upsample(32, 4, norm_type), # (bs, 64, 64, 256)
upsample(16, 4, norm_type), # (bs, 128, 128, 128)
]
initializer = tf.random_normal_initializer(0., 0.02)
last = tf.keras.layers.Conv2DTranspose(
1, 4, strides=2,
padding='same', kernel_initializer=initializer,
activation='tanh') # (bs, 256, 256, 3)
concat = tf.keras.layers.Concatenate()
inputs = tf.keras.layers.Input(shape=[28, 28, 1])
x = inputs
x = tf.keras.layers.ZeroPadding2D(padding=2)(x)
# Downsampling through the model
skips = []
for down in down_stack:
x = down(x)
skips.append(x)
skips = reversed(skips[:-1])
# Upsampling and establishing the skip connections
for up, skip in zip(up_stack, skips):
x = up(x)
x = concat([x, skip])
x = last(x)
x = tf.keras.layers.Cropping2D(cropping=2)(x)
return tf.keras.Model(inputs=inputs, outputs=x)
|
model-patching-master
|
augmentation/methods/cyclegan/models.py
|
import datetime
import tensorflow as tf
import random
import wandb
from tensorflow_examples.models.pix2pix import pix2pix
from augmentation.dataflows.utils import create_paired_direct_dataflow, \
create_paired_parallel_dataflow_via_numpy
from augmentation.methods.cyclegan.models import mnist_unet_generator, mnist_discriminator, unet_generator
from augmentation.utilities.optim import build_lr_scheduler
from augmentation.utilities.visualize import gallery
# Other places to look for training GANs
# https://github.com/eriklindernoren/Keras-GAN
def gradient_penalty(f, real, fake, mode, scale=10.0):
# https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/tf2gan/loss.py
def _gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
if b is None: # interpolation in DRAGAN
beta = tf.random.uniform(shape=tf.shape(a), minval=0., maxval=1.)
b = a + 0.5 * tf.math.reduce_std(a) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random.uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.shape)
return inter
x = _interpolate(real, fake)
with tf.GradientTape() as t:
t.watch(x)
pred = tf.reduce_mean(tf.reshape(f(x), [tf.shape(real)[0], -1]), axis=1)
grad = t.gradient(pred, x)
norm = tf.norm(tf.reshape(grad, [tf.shape(grad)[0], -1]), axis=1)
gp = tf.reduce_mean((norm - 1.) ** 2)
return gp
if mode == 'none':
gp = tf.constant(0, dtype=real.dtype)
elif mode == 'dragan':
gp = _gradient_penalty(f, real)
elif mode == 'wgan-gp':
gp = _gradient_penalty(f, real, fake)
else:
raise NotImplementedError
return gp * scale
class ReplayBuffer(object):
"""
Adapted from https://github.com/tensorflow/models/blob/master/research/pcl_rl/replay_buffer.py
"""
def __init__(self, max_size):
self.max_size = max_size
self.cur_size = 0
self.buffer = {}
self.oldest_idx = 0
self.init_length = 0
def __len__(self):
return self.cur_size
def add(self, images):
idx = 0
while self.cur_size < self.max_size and idx < len(images):
self.buffer[self.cur_size] = images[idx]
self.cur_size += 1
idx += 1
if idx < len(images):
remove_idxs = self.remove_n(len(images) - idx)
for remove_idx in remove_idxs:
self.buffer[remove_idx] = images[idx]
idx += 1
assert len(self.buffer) == self.cur_size
def remove_n(self, n):
return random.sample(range(self.init_length, self.cur_size), n)
def get_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return [self.buffer[idx] for idx in idxs]
def get_tf_batch(self, n):
idxs = random.sample(range(self.cur_size), n)
return tf.convert_to_tensor([self.buffer[idx] for idx in idxs])
def wgan_loss(targets, predictions):
return tf.reduce_mean((-2 * targets + 1.) * predictions)
def build_gan_loss_fn(loss_name):
if loss_name == 'bce':
return tf.keras.losses.BinaryCrossentropy(from_logits=True)
elif loss_name == 'lsgan':
return tf.keras.losses.MeanSquaredError()
elif loss_name == 'wgan':
return wgan_loss
else:
raise NotImplementedError
def discriminator_loss(real, generated, loss_fn):
# Classification loss for the discriminator, maximize log-prob of the real example
real_loss = loss_fn(tf.ones_like(real), real)
generated_loss = loss_fn(tf.zeros_like(generated), generated)
total_disc_loss = real_loss + generated_loss
return total_disc_loss * 0.5
def generator_loss(generated, loss_fn):
# The discriminator's probability (generated) for realness is maximized
return loss_fn(tf.ones_like(generated), generated)
def cycle_loss(real_image, cycled_image, scale):
# Cycle-consistency using an L! loss
return scale * tf.reduce_mean(tf.abs(real_image - cycled_image))
def identity_loss(real_image, same_image, scale):
# Map the image to itself and compute the L1 loss
return scale * 0.5 * tf.reduce_mean(tf.abs(real_image - same_image))
def build_cyclegan_models(n_channels, norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = pix2pix.unet_generator(n_channels, norm_type=norm_type)
generator_f = pix2pix.unet_generator(n_channels, norm_type=norm_type)
discriminator_x = pix2pix.discriminator(norm_type=norm_type, target=False)
discriminator_y = pix2pix.discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def build_mnist_cyclegan_models(norm_type):
assert norm_type in ['instancenorm', 'batchnorm']
generator_g = mnist_unet_generator(norm_type=norm_type)
generator_f = mnist_unet_generator(norm_type=norm_type)
discriminator_x = mnist_discriminator(norm_type=norm_type, target=False)
discriminator_y = mnist_discriminator(norm_type=norm_type, target=False)
return generator_g, generator_f, discriminator_x, discriminator_y
def get_models_from_input_shape(input_shape, norm_type, output_init=0.02, residual_output=False):
if input_shape == (28, 28, 1):
# MNIST-like data
return mnist_unet_generator(norm_type=norm_type), \
mnist_discriminator(norm_type=norm_type, target=False)
elif input_shape == (256, 256, 3):
# TODO: just use our unet_generator fn
if residual_output is True or output_init != 0.02:
raise NotImplementedError
return pix2pix.unet_generator(output_channels=3, norm_type=norm_type), \
pix2pix.discriminator(norm_type=norm_type, target=False)
else:
return unet_generator(output_channels=3, input_shape=input_shape, norm_type=norm_type,
output_init=output_init, residual_output=residual_output), \
pix2pix.discriminator(norm_type=norm_type, target=False)
def build_models(source_input_shape, target_input_shape, norm_type, output_init=0.02, residual_output=False):
assert norm_type in ['instancenorm', 'batchnorm']
generator_s_to_t, discriminator_s = get_models_from_input_shape(source_input_shape, norm_type, output_init, residual_output)
generator_t_to_s, discriminator_t = get_models_from_input_shape(target_input_shape, norm_type, output_init, residual_output)
return generator_s_to_t, generator_t_to_s, discriminator_s, discriminator_t
def build_optimizers(lr_gen=2e-4, lr_disc=2e-4,
beta_1_gen=0.5, beta_1_disc=0.5,
lr_scheduler='constant', lr_decay_steps=None):
generator_g_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
generator_f_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_gen,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_gen)
discriminator_x_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
discriminator_y_optimizer = tf.keras.optimizers.Adam(build_lr_scheduler(lr_scheduler, 0, 0, lr_disc,
lr_decay_steps=lr_decay_steps),
beta_1=beta_1_disc)
return generator_g_optimizer, generator_f_optimizer, discriminator_x_optimizer, discriminator_y_optimizer
def create_cyclegan_data_generator(source_dataset, target_dataset, batch_size, augmentations,
dataflow, cache_dir):
if dataflow == 'disk_cached':
cache_dir = cache_dir + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S')
# Shuffle hangs sometimes (e.g. for horse2zebra)
return create_paired_direct_dataflow(source_dataset, target_dataset, batch_size,
augmentations, x_only=True,
cache_dir1=cache_dir + '1',
cache_dir2=cache_dir + '2',
shuffle=True)
elif dataflow == 'in_memory':
return create_paired_parallel_dataflow_via_numpy(source_dataset, target_dataset,
batch_size, augmentations, x_only=True)
else:
raise NotImplementedError
def generate_and_log_one_image_batch(data_generator,
generator_g,
generator_f,
step):
# Grab a batch from the dataset
for real_x, real_y in data_generator:
# Convert to tensors
real_x, real_y = tf.convert_to_tensor(real_x), tf.convert_to_tensor(real_y)
# Compute the fake examples
fake_y = generator_g(real_x, training=True)
fake_x = generator_f(real_y, training=True)
# Cycle the fake examples
cycled_x = generator_f(fake_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# Compute the identity examples
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
# Log everything to Weights and Biases
wandb.log({'test/real_x': wandb.Image(gallery(real_x.numpy() * 0.5 + 0.5)),
'test/fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'test/cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'test/same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'test/real_y': wandb.Image(gallery(real_y.numpy() * 0.5 + 0.5)),
'test/fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'test/cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'test/same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Break after a single batch: note, this will not run if you remove the break due to wandb reasons (ask Karan)
break
if __name__ == '__main__':
buffer = ReplayBuffer(1)
buffer.add([1])
buffer.add([2])
buffer.add([3])
print(buffer.get_batch(1))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add([4])
print(buffer.get_batch(1))
print(buffer.buffer)
buffer = ReplayBuffer(1)
buffer.add(tf.convert_to_tensor([1]))
buffer.add(tf.convert_to_tensor([2]))
buffer.add(tf.convert_to_tensor([3]))
print(tf.convert_to_tensor(buffer.get_batch(1)))
print(buffer.get_batch(1))
print(buffer.get_batch(1))
buffer.add(tf.convert_to_tensor([4]))
print(buffer.get_batch(1))
print(buffer.buffer)
|
model-patching-master
|
augmentation/methods/cyclegan/utils.py
|
import argparse
import os
import functools
import time
import subprocess
from augmentation.utilities.config import *
from augmentation.utilities.metrics import *
from augmentation.datasets.utils import get_processed_dataset_info, apply_modifier_to_dataset_payload, load_dataset
from augmentation.dataflows.utils import dataflow_len
from augmentation.augment.utils import create_augmentation_pipelines
from augmentation.models.models import *
from augmentation.methods.cyclegan.utils import *
from augmentation.utilities.checkpoint import *
from augmentation.utilities.visualize import *
from augmentation.utilities.wandb import load_wandb_run, load_most_recent_keras_model_weights, \
get_most_recent_model_file, particular_checkpoint_step_extractor
from augmentation.utilities.utils import basic_setup
def train_cyclegan(config):
# Do basic setup
basic_setup(seed=config.seed, logical_gpu_memory_limits=(14336,))
# Set up the source dataset
source_dataset_payload = load_dataset(config.source_dataset,
config.source_dataset_version,
config.datadir,
config.validation_frac)
target_dataset_payload = load_dataset(config.target_dataset,
config.target_dataset_version,
config.datadir,
config.validation_frac)
# Get some dataset information
source_proc_dataset_info = get_processed_dataset_info(source_dataset_payload.dataset_info,
config.validation_frac, config.batch_size)
target_proc_dataset_info = get_processed_dataset_info(target_dataset_payload.dataset_info,
config.validation_frac, config.batch_size)
source_input_shape = source_proc_dataset_info.input_shape
target_input_shape = target_proc_dataset_info.input_shape
# Do selection on each dataset
source_dataset_payload = apply_modifier_to_dataset_payload(source_dataset_payload, config.source_dataset_modifier)
target_dataset_payload = apply_modifier_to_dataset_payload(target_dataset_payload, config.target_dataset_modifier)
# Setup the augmentation pipeline we'll be using
train_augmentations, val_augmentations, test_augmentations = \
create_augmentation_pipelines(config.train_daug_pipeline, config.train_daug_pipeline_args,
config.val_daug_pipeline, config.val_daug_pipeline_args,
config.test_daug_pipeline, config.test_daug_pipeline_args)
# Create the data generators
train_generator = create_cyclegan_data_generator(source_dataset_payload.train_dataset,
target_dataset_payload.train_dataset,
config.batch_size,
train_augmentations,
config.dataflow,
config.cache_dir + 'train')
test_generator = create_cyclegan_data_generator(source_dataset_payload.test_dataset,
target_dataset_payload.test_dataset,
config.batch_size,
test_augmentations,
config.dataflow,
config.cache_dir + 'test')
# Create the models
generator_g, generator_f, discriminator_x, discriminator_y = \
build_models(source_input_shape, target_input_shape, config.norm_type, config.output_init, config.residual_outputs)
generator_g.summary()
generator_f.summary()
discriminator_x.summary()
discriminator_y.summary()
# Set up the optimizers
generator_optimizer, _, discriminator_optimizer, _ = build_optimizers(lr_gen=config.lr_gen,
lr_disc=config.lr_disc,
beta_1_gen=config.beta_1_gen,
beta_1_disc=config.beta_1_disc,
lr_scheduler=config.lr_scheduler,
lr_decay_steps=config.n_epochs *
dataflow_len(train_generator))
# Compile the models
compile_keras_models([generator_g, generator_f, discriminator_x, discriminator_y],
[generator_optimizer, generator_optimizer, discriminator_optimizer, discriminator_optimizer])
# Create the replay buffers for the discriminators
disc_x_replay, disc_y_replay = ReplayBuffer(config.replay_buffer_size), ReplayBuffer(config.replay_buffer_size)
# Define the loss function to pass to the generator and discriminator
gan_loss_fn = build_gan_loss_fn(config.gan_loss)
# By default, assume we're starting training from scratch
start_epoch, start_step = 0, 0
if config.resume:
# If we're resuming a run
prev_run = load_wandb_run(config.prev_wandb_run_id, config.prev_wandb_project, config.prev_wandb_entity)
# If the previous run crashed, wandb_ckpt_path should be '': this is the typical use case
# but this should be changed in the future
step_extraction_fn = lambda fname: fname.split("_")[1].split(".")[0]
_, gen_g_ep = load_most_recent_keras_model_weights(generator_g, prev_run, model_name='generator_g',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, gen_f_ep = load_most_recent_keras_model_weights(generator_f, prev_run, model_name='generator_f',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, disc_x_ep = load_most_recent_keras_model_weights(discriminator_x, prev_run, model_name='discriminator_x',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
_, disc_y_ep = load_most_recent_keras_model_weights(discriminator_y, prev_run, model_name='discriminator_y',
wandb_ckpt_path=config.prev_ckpt_path,
step_extractor=step_extraction_fn)
assert gen_g_ep == gen_f_ep == disc_x_ep == disc_y_ep, 'All restored models should be from the same epoch.'
if gen_g_ep is not None:
start_epoch, start_step = gen_g_ep, 0
for line in prev_run.history():
if 'epochs' in line and line['epochs'] == start_epoch:
start_step = line['steps']
break
# Reloading the optimizer states from that epoch
step_extraction_fn = lambda fname: fname.split(".")[0].split("_")[-1]
gen_opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=config.prev_ckpt_path,
model_name='generator_optimizer',
step_extractor=
particular_checkpoint_step_extractor(start_epoch,
step_extractor=
step_extraction_fn))
load_tf_optimizer_state(generator_optimizer, gen_opt_ckpt.name)
disc_opt_ckpt = get_most_recent_model_file(prev_run,
wandb_ckpt_path=config.prev_ckpt_path,
model_name='discriminator_optimizer',
step_extractor=
particular_checkpoint_step_extractor(start_epoch,
step_extractor=
step_extraction_fn))
load_tf_optimizer_state(discriminator_optimizer, disc_opt_ckpt.name)
# Set up weights and biases
while True:
try:
if not config.resume:
# Start a new Weights and Biases run
wandb.init(entity=config.wandb_entity,
project=config.wandb_project,
group=config.wandb_group,
job_type=config.wandb_job_type,
reinit=True,
config=config)
else:
# Resume a previous Weights and Biases run
wandb.init(entity=config.prev_wandb_entity,
project=config.prev_wandb_project,
id=config.prev_wandb_run_id,
reinit=True,
resume=True)
os.makedirs(f'{wandb.run.dir}/{config.checkpoint_path}', exist_ok=True)
break
except:
continue
_train_cyclegan(train_data_generator=train_generator,
val_data_generator=test_generator,
generator_g=generator_g,
generator_f=generator_f,
discriminator_x=discriminator_x,
discriminator_y=discriminator_y,
generator_optimizer=generator_optimizer,
discriminator_optimizer=discriminator_optimizer,
discriminator_x_replay=disc_x_replay,
discriminator_y_replay=disc_y_replay,
metrics=None,
batch_size=None,
n_epochs=config.n_epochs,
gan_loss_fn=gan_loss_fn,
cycle_loss_scale_x=config.cycle_loss_scale,
cycle_loss_scale_y=config.cycle_loss_scale * (1 - config.source_cycle_loss_only),
identity_loss_scale=config.identity_loss_scale,
grad_penalty=config.grad_penalty,
grad_penalty_scale=config.grad_penalty_scale,
checkpoint_path=config.checkpoint_path,
checkpoint_freq=config.checkpoint_freq,
image_log_freq=config.image_log_freq,
start_step=start_step, start_epoch=start_epoch)
def _train_cyclegan(train_data_generator, val_data_generator,
generator_g, generator_f,
discriminator_x, discriminator_y,
generator_optimizer, discriminator_optimizer,
discriminator_x_replay, discriminator_y_replay,
metrics, batch_size, n_epochs,
gan_loss_fn, cycle_loss_scale_x, cycle_loss_scale_y, identity_loss_scale,
grad_penalty, grad_penalty_scale,
checkpoint_path, checkpoint_freq,
image_log_freq=50,
start_step=0, start_epoch=0):
# Keep track of how many gradient steps we've taken
step = start_step
# Multiple training epochs
for epoch in range(start_epoch, n_epochs):
# Iterate over the dataset
for batch_x, batch_y in train_data_generator:
# Convert to tensors
batch_x, batch_y = tf.convert_to_tensor(batch_x), tf.convert_to_tensor(batch_y)
# Train using this batch of data
gen_losses, gen_predictions, gen_gradients = train_step_generator(generator_g, generator_f,
discriminator_x, discriminator_y,
gan_loss_fn,
batch_x, batch_y,
generator_optimizer,
discriminator_x_replay,
discriminator_y_replay,
cycle_loss_scale_x, cycle_loss_scale_y,
identity_loss_scale)
disc_losses, disc_predictions, disc_gradients = train_step_discriminator(discriminator_x, discriminator_y,
gan_loss_fn,
batch_x, batch_y,
discriminator_optimizer,
discriminator_x_replay,
discriminator_y_replay,
grad_penalty,
grad_penalty_scale)
# Update the step counter
step += 1
# Unpack and log to weights and biases
(gen_g_loss, gen_f_loss, cycle_loss_x, cycle_loss_y, identity_loss_x, identity_loss_y) = gen_losses
((same_x, fake_x, cycled_x, disc_fake_x),
(same_y, fake_y, cycled_y, disc_fake_y)) = gen_predictions
(disc_x_loss, disc_y_loss, disc_x_gp, disc_y_gp) = disc_losses
((disc_real_x, disc_sampled_fake_x),
(disc_real_y, disc_sampled_fake_y)) = disc_predictions
wandb.log({'training_metrics/gen_g_loss': gen_g_loss.numpy(),
'training_metrics/gen_f_loss': gen_f_loss.numpy(),
'training_metrics/cycle_loss_x': cycle_loss_x.numpy(),
'training_metrics/cycle_loss_y': cycle_loss_y.numpy(),
'training_metrics/identity_loss_x': identity_loss_x.numpy(),
'training_metrics/identity_loss_y': identity_loss_y.numpy(),
'training_metrics/disc_x_loss': disc_x_loss.numpy(),
'training_metrics/disc_y_loss': disc_y_loss.numpy(),
'training_metrics/disc_x_gp': disc_x_gp.numpy(),
'training_metrics/disc_y_gp': disc_y_gp.numpy(),
'predictions/disc_real_x': wandb.Histogram(disc_real_x.numpy()),
'predictions/disc_real_y': wandb.Histogram(disc_real_y.numpy()),
'predictions/disc_fake_x': wandb.Histogram(disc_fake_x.numpy()),
'predictions/disc_fake_y': wandb.Histogram(disc_fake_y.numpy()),
'predictions/disc_sampled_fake_x': wandb.Histogram(disc_sampled_fake_x.numpy()),
'predictions/disc_sampled_fake_y': wandb.Histogram(disc_sampled_fake_y.numpy()),
'gradient_norms/generators': tf.linalg.global_norm(gen_gradients).numpy(),
'gradient_norms/discriminators': tf.linalg.global_norm(disc_gradients).numpy(),
'learning_rates/generators': generator_optimizer._decayed_lr(tf.float32).numpy(),
'learning_rates/discriminators': discriminator_optimizer._decayed_lr(tf.float32).numpy(),
'steps': step},
step=step)
# Log images frequently to admire
if step % image_log_freq == 0:
# Use a (* 0.5 + 0.5) offset before visualizing since the data lies in [-1, 1]
wandb.log({'real_x': wandb.Image(gallery(batch_x.numpy() * 0.5 + 0.5)),
'fake_x': wandb.Image(gallery(fake_x.numpy() * 0.5 + 0.5)),
'cycled_x': wandb.Image(gallery(cycled_x.numpy() * 0.5 + 0.5)),
'same_x': wandb.Image(gallery(same_x.numpy() * 0.5 + 0.5)),
'real_y': wandb.Image(gallery(batch_y.numpy() * 0.5 + 0.5)),
'fake_y': wandb.Image(gallery(fake_y.numpy() * 0.5 + 0.5)),
'cycled_y': wandb.Image(gallery(cycled_y.numpy() * 0.5 + 0.5)),
'same_y': wandb.Image(gallery(same_y.numpy() * 0.5 + 0.5))}, step=step)
# Visualize a batch of validation data every epoch
generate_and_log_one_image_batch(val_data_generator, generator_g, generator_f, step)
del gen_losses, disc_losses, gen_predictions, disc_predictions, gen_gradients, disc_gradients
# End of epoch, log to weights and biases
wandb.log({'epochs': epoch + 1}, step=step)
# Checkpoint every few epochs
if (epoch + 1) % checkpoint_freq == 0:
# Store the models
generator_g.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_g.h5')
generator_f.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_f.h5')
discriminator_x.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_x.h5')
discriminator_y.save_weights(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_y.h5')
# Store the optimizers
save_tf_optimizer_state(generator_optimizer,
f'{wandb.run.dir}/{checkpoint_path}/generator_optimizer_{epoch + 1}.pkl')
save_tf_optimizer_state(discriminator_optimizer,
f'{wandb.run.dir}/{checkpoint_path}/discriminator_optimizer_{epoch + 1}.pkl')
# Save to Weights and Biases
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_g.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_generator_f.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_x.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/ckpt_{epoch + 1}_discriminator_y.h5')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/generator_optimizer_{epoch + 1}.pkl')
wandb.save(f'{wandb.run.dir}/{checkpoint_path}/discriminator_optimizer_{epoch + 1}.pkl')
return generator_g, generator_f, discriminator_x, discriminator_y
def train_step_generator(generator_g, generator_f,
discriminator_x, discriminator_y,
loss_fn,
batch_x, batch_y,
generator_optimizer,
discriminator_x_replay, discriminator_y_replay,
cycle_loss_scale_x, cycle_loss_scale_y, identity_loss_scale):
def _train_step_generator(real_x, real_y):
with tf.GradientTape() as tape:
# Generator G translates X -> Y
# Generator F translates Y -> X.
fake_y = generator_g(real_x, training=True)
cycled_x = generator_f(fake_y, training=True)
fake_x = generator_f(real_y, training=True)
cycled_y = generator_g(fake_x, training=True)
# same_x and same_y are used for identity loss.
same_x = generator_f(real_x, training=True)
same_y = generator_g(real_y, training=True)
disc_fake_x = discriminator_x(fake_x, training=True)
disc_fake_y = discriminator_y(fake_y, training=True)
# Calculate all the losses
gen_g_loss = generator_loss(disc_fake_y, loss_fn)
gen_f_loss = generator_loss(disc_fake_x, loss_fn)
cycle_loss_x = cycle_loss(real_x, cycled_x, cycle_loss_scale_x)
cycle_loss_y = cycle_loss(real_y, cycled_y, cycle_loss_scale_y)
identity_loss_x = identity_loss(real_x, same_x, identity_loss_scale)
identity_loss_y = identity_loss(real_y, same_y, identity_loss_scale)
# Total generator loss = adversarial loss + cycle loss
total_gen_loss = gen_g_loss + gen_f_loss + cycle_loss_x + cycle_loss_y + identity_loss_x + identity_loss_y
# Update the discriminator replay buffers
discriminator_x_replay.add(fake_x)
discriminator_y_replay.add(fake_y)
# Calculate the gradients for generator and discriminator
generator_gradients = tape.gradient(total_gen_loss,
generator_g.trainable_variables + generator_f.trainable_variables)
# Apply the gradients to the optimizer
generator_optimizer.apply_gradients(zip(generator_gradients,
generator_g.trainable_variables + generator_f.trainable_variables))
del tape
return (gen_g_loss, gen_f_loss, cycle_loss_x, cycle_loss_y, identity_loss_x, identity_loss_y), \
((same_x, fake_x, cycled_x, disc_fake_x),
(same_y, fake_y, cycled_y, disc_fake_y)), generator_gradients
return _train_step_generator(batch_x, batch_y)
def train_step_discriminator(discriminator_x, discriminator_y,
loss_fn,
batch_x, batch_y,
discriminator_optimizer,
discriminator_x_replay, discriminator_y_replay,
grad_penalty, grad_penalty_scale):
def _train_step_discriminator(real_x, real_y):
# Sample fake_x and fake_y from the replay buffers
sampled_fake_x = discriminator_x_replay.get_tf_batch(real_x.shape[0])
sampled_fake_y = discriminator_y_replay.get_tf_batch(real_y.shape[0])
with tf.GradientTape() as tape:
disc_real_x = discriminator_x(real_x, training=True)
disc_real_y = discriminator_y(real_y, training=True)
disc_fake_x = discriminator_x(sampled_fake_x, training=True)
disc_fake_y = discriminator_y(sampled_fake_y, training=True)
disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x, loss_fn)
disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y, loss_fn)
disc_x_gp = gradient_penalty(functools.partial(discriminator_x, training=True),
real_x, sampled_fake_x, mode=grad_penalty, scale=grad_penalty_scale)
disc_y_gp = gradient_penalty(functools.partial(discriminator_y, training=True),
real_y, sampled_fake_y, mode=grad_penalty, scale=grad_penalty_scale)
total_disc_loss = disc_x_loss + disc_y_loss + disc_x_gp + disc_y_gp
# Calculate the gradients for generator and discriminator
discriminator_gradients = tape.gradient(total_disc_loss,
discriminator_x.trainable_variables +
discriminator_y.trainable_variables)
# Apply the gradients to the optimizer
discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
discriminator_x.trainable_variables +
discriminator_y.trainable_variables))
del tape
return (disc_x_loss, disc_y_loss, disc_x_gp, disc_y_gp), \
((disc_real_x, disc_fake_x),
(disc_real_y, disc_fake_y)), discriminator_gradients
return _train_step_discriminator(batch_x, batch_y)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--config', type=str, required=True, help='Path to configuration file.')
parser.add_argument('--template', type=str, default='augmentation/configs/template_cyclegan_training.yaml')
args = parser.parse_args()
# Load up the config files
config = create_config_simple_namespace(config_path=args.config, template_config_path=args.template)
# Train the end model
train_cyclegan(config)
|
model-patching-master
|
augmentation/methods/cyclegan/train.py
|
model-patching-master
|
augmentation/autoaugment/__init__.py
|
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Transforms used in the Augmentation Policies."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import numpy as np
# pylint:disable=g-multiple-import
from PIL import ImageOps, ImageEnhance, ImageFilter, Image
import tensorflow as tf
# pylint:enable=g-multiple-import
IMAGE_SIZE = 32
# What is the dataset mean and std of the images on the training set
MEANS = [0.49139968, 0.48215841, 0.44653091]
STDS = [0.24703223, 0.24348513, 0.26158784]
PARAMETER_MAX = 10 # What is the max 'level' a transform could be predicted
def random_flip(x):
"""Flip the input x horizontally with 50% probability."""
if np.random.rand(1)[0] > 0.5:
return np.fliplr(x)
return x
def zero_pad_and_crop(img, amount=4):
"""Zero pad by `amount` zero pixels on each side then take a random crop.
Args:
img: numpy image that will be zero padded and cropped.
amount: amount of zeros to pad `img` with horizontally and verically.
Returns:
The cropped zero padded img. The returned numpy array will be of the same
shape as `img`.
"""
padded_img = np.zeros((img.shape[0] + amount * 2, img.shape[1] + amount * 2,
img.shape[2]))
padded_img[amount:img.shape[0] + amount, amount: img.shape[1] + amount, :] = img
top = np.random.randint(low=0, high=2 * amount)
left = np.random.randint(low=0, high=2 * amount)
new_img = padded_img[top:top + img.shape[0], left:left + img.shape[1], :]
return new_img
def create_cutout_mask(img_height, img_width, num_channels, size):
"""Creates a zero mask used for cutout of shape `img_height` x `img_width`.
Args:
img_height: Height of image cutout mask will be applied to.
img_width: Width of image cutout mask will be applied to.
num_channels: Number of channels in the image.
size: Size of the zeros mask.
Returns:
A mask of shape `img_height` x `img_width` with all ones except for a
square of zeros of shape `size` x `size`. This mask is meant to be
elementwise multiplied with the original image. Additionally returns
the `upper_coord` and `lower_coord` which specify where the cutout mask
will be applied.
"""
assert img_height == img_width
# Sample center where cutout mask will be applied
height_loc = np.random.randint(low=0, high=img_height)
width_loc = np.random.randint(low=0, high=img_width)
# Determine upper right and lower left corners of patch
upper_coord = (max(0, height_loc - size // 2), max(0, width_loc - size // 2))
lower_coord = (min(img_height, height_loc + size // 2),
min(img_width, width_loc + size // 2))
mask_height = lower_coord[0] - upper_coord[0]
mask_width = lower_coord[1] - upper_coord[1]
assert mask_height > 0
assert mask_width > 0
mask = np.ones((img_height, img_width, num_channels))
zeros = np.zeros((mask_height, mask_width, num_channels))
mask[upper_coord[0]:lower_coord[0], upper_coord[1]:lower_coord[1], :] = (
zeros)
return mask, upper_coord, lower_coord
def cutout_numpy(img, size=16):
"""Apply cutout with mask of shape `size` x `size` to `img`.
The cutout operation is from the paper https://arxiv.org/abs/1708.04552.
This operation applies a `size`x`size` mask of zeros to a random location
within `img`.
Args:
img: Numpy image that cutout will be applied to.
size: Height/width of the cutout mask that will be
Returns:
A numpy tensor that is the result of applying the cutout mask to `img`.
"""
img_height, img_width, num_channels = (img.shape[0], img.shape[1],
img.shape[2])
assert len(img.shape) == 3
mask, _, _ = create_cutout_mask(img_height, img_width, num_channels, size)
return img * mask
def float_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
A float that results from scaling `maxval` according to `level`.
"""
return float(level) * maxval / PARAMETER_MAX
def int_parameter(level, maxval):
"""Helper function to scale `val` between 0 and maxval .
Args:
level: Level of the operation that will be between [0, `PARAMETER_MAX`].
maxval: Maximum value that the operation can have. This will be scaled
to level/PARAMETER_MAX.
Returns:
An int that results from scaling `maxval` according to `level`.
"""
return int(level * maxval / PARAMETER_MAX)
def pil_wrap(img):
"""Convert the `img` numpy tensor to a PIL Image."""
return Image.fromarray(np.uint8((img * STDS + MEANS) * 255.0)).convert('RGBA')
def pil_batch_wrap(imgs):
pil_imgs = []
for img in imgs:
pil_imgs.append(pil_wrap(img))
return pil_imgs
def pil_unwrap(pil_img):
"""Converts the PIL img to a numpy array."""
w, h = pil_img.size
pic_array = (np.array(pil_img.getdata()).reshape((w, h, 4)) / 255.0)
i1, i2 = np.where(pic_array[:, :, 3] == 0)
pic_array = (pic_array[:, :, :3] - MEANS) / STDS
pic_array[i1, i2] = [0, 0, 0]
return pic_array
def pil_batch_unwrap(pil_imgs):
imgs = []
for pil_img in pil_imgs:
imgs.append(pil_unwrap(pil_img))
return np.array(imgs)
def apply_policy(policy, img):
"""Apply the `policy` to the numpy `img`.
Args:
policy: A list of tuples with the form (name, probability, level) where
`name` is the name of the augmentation operation to apply, `probability`
is the probability of applying the operation and `level` is what strength
the operation to apply.
img: Numpy image that will have `policy` applied to it.
Returns:
The result of applying `policy` to `img`.
"""
def _apply_policy(img):
img = np.array(img)
pil_img = pil_wrap(img)
for xform in policy:
assert len(xform) == 3
name, probability, level = xform
xform_fn = NAME_TO_TRANSFORM[name].pil_transformer(probability, level)
pil_img = xform_fn(pil_img)
return pil_unwrap(pil_img)
return _apply_policy(img)#tf.py_function(_apply_policy, [img], tf.float32)
class TransformFunction(object):
"""Wraps the Transform function for pretty printing options."""
def __init__(self, func, name):
self.f = func
self.name = name
def __repr__(self):
return '<' + self.name + '>'
def __call__(self, pil_img):
return self.f(pil_img)
class TransformT(object):
"""Each instance of this class represents a specific transform."""
def __init__(self, name, xform_fn):
self.name = name
self.xform = xform_fn
def pil_transformer(self, probability, level):
def return_function(im):
if random.random() < probability:
im = self.xform(im, level)
return im
name = self.name + '({:.1f},{})'.format(probability, level)
return TransformFunction(return_function, name)
def do_transform(self, image, level):
f = self.pil_transformer(PARAMETER_MAX, level)
return pil_unwrap(f(pil_wrap(image)))
################## Transform Functions ##################
identity = TransformT('identity', lambda pil_img, level: pil_img)
flip_lr = TransformT(
'FlipLR',
lambda pil_img, level: pil_img.transpose(Image.FLIP_LEFT_RIGHT))
flip_ud = TransformT(
'FlipUD',
lambda pil_img, level: pil_img.transpose(Image.FLIP_TOP_BOTTOM))
# pylint:disable=g-long-lambda
auto_contrast = TransformT(
'AutoContrast',
lambda pil_img, level: ImageOps.autocontrast(
pil_img.convert('RGB')).convert('RGBA'))
equalize = TransformT(
'Equalize',
lambda pil_img, level: ImageOps.equalize(
pil_img.convert('RGB')).convert('RGBA'))
invert = TransformT(
'Invert',
lambda pil_img, level: ImageOps.invert(
pil_img.convert('RGB')).convert('RGBA'))
# pylint:enable=g-long-lambda
blur = TransformT(
'Blur', lambda pil_img, level: pil_img.filter(ImageFilter.BLUR))
smooth = TransformT(
'Smooth',
lambda pil_img, level: pil_img.filter(ImageFilter.SMOOTH))
def _rotate_impl(pil_img, level):
"""Rotates `pil_img` from -30 to 30 degrees depending on `level`."""
degrees = int_parameter(level, 30)
if random.random() > 0.5:
degrees = -degrees
return pil_img.rotate(degrees)
rotate = TransformT('Rotate', _rotate_impl)
def _posterize_impl(pil_img, level):
"""Applies PIL Posterize to `pil_img`."""
level = int_parameter(level, 4)
return ImageOps.posterize(pil_img.convert('RGB'), 4 - level).convert('RGBA')
posterize = TransformT('Posterize', _posterize_impl)
def _shear_x_impl(pil_img, level):
"""Applies PIL ShearX to `pil_img`.
The ShearX operation shears the image along the horizontal axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, level, 0, 0, 1, 0))
shear_x = TransformT('ShearX', _shear_x_impl)
def _shear_y_impl(pil_img, level):
"""Applies PIL ShearY to `pil_img`.
The ShearY operation shears the image along the vertical axis with `level`
magnitude.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had ShearX applied to it.
"""
level = float_parameter(level, 0.3)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, level, 1, 0))
shear_y = TransformT('ShearY', _shear_y_impl)
def _translate_x_impl(pil_img, level):
"""Applies PIL TranslateX to `pil_img`.
Translate the image in the horizontal direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateX applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, level, 0, 1, 0))
translate_x = TransformT('TranslateX', _translate_x_impl)
def _translate_y_impl(pil_img, level):
"""Applies PIL TranslateY to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had TranslateY applied to it.
"""
level = int_parameter(level, 10)
if random.random() > 0.5:
level = -level
return pil_img.transform((32, 32), Image.AFFINE, (1, 0, 0, 0, 1, level))
translate_y = TransformT('TranslateY', _translate_y_impl)
def _crop_impl(pil_img, level, interpolation=Image.BILINEAR):
"""Applies a crop to `pil_img` with the size depending on the `level`."""
cropped = pil_img.crop((level, level, IMAGE_SIZE - level, IMAGE_SIZE - level))
resized = cropped.resize((IMAGE_SIZE, IMAGE_SIZE), interpolation)
return resized
crop_bilinear = TransformT('CropBilinear', _crop_impl)
def _solarize_impl(pil_img, level):
"""Applies PIL Solarize to `pil_img`.
Translate the image in the vertical direction by `level`
number of pixels.
Args:
pil_img: Image in PIL object.
level: Strength of the operation specified as an Integer from
[0, `PARAMETER_MAX`].
Returns:
A PIL Image that has had Solarize applied to it.
"""
level = int_parameter(level, 256)
return ImageOps.solarize(pil_img.convert('RGB'), 256 - level).convert('RGBA')
solarize = TransformT('Solarize', _solarize_impl)
def _cutout_pil_impl(pil_img, level):
"""Apply cutout to pil_img at the specified level."""
size = int_parameter(level, 20)
if size <= 0:
return pil_img
img_height, img_width, num_channels = (32, 32, 3)
_, upper_coord, lower_coord = (
create_cutout_mask(img_height, img_width, num_channels, size))
pixels = pil_img.load() # create the pixel map
for i in range(upper_coord[0], lower_coord[0]): # for every col:
for j in range(upper_coord[1], lower_coord[1]): # For every row
pixels[i, j] = (125, 122, 113, 0) # set the colour accordingly
return pil_img
cutout = TransformT('Cutout', _cutout_pil_impl)
def _enhancer_impl(enhancer):
"""Sets level to be between 0.1 and 1.8 for ImageEnhance transforms of PIL."""
def impl(pil_img, level):
v = float_parameter(level, 1.8) + .1 # going to 0 just destroys it
return enhancer(pil_img).enhance(v)
return impl
color = TransformT('Color', _enhancer_impl(ImageEnhance.Color))
contrast = TransformT('Contrast', _enhancer_impl(ImageEnhance.Contrast))
brightness = TransformT('Brightness', _enhancer_impl(
ImageEnhance.Brightness))
sharpness = TransformT('Sharpness', _enhancer_impl(ImageEnhance.Sharpness))
ALL_TRANSFORMS = [
flip_lr,
flip_ud,
auto_contrast,
equalize,
invert,
rotate,
posterize,
crop_bilinear,
solarize,
color,
contrast,
brightness,
sharpness,
shear_x,
shear_y,
translate_x,
translate_y,
cutout,
blur,
smooth
]
NAME_TO_TRANSFORM = {t.name: t for t in ALL_TRANSFORMS}
TRANSFORM_NAMES = NAME_TO_TRANSFORM.keys()
|
model-patching-master
|
augmentation/autoaugment/augmentation_transforms.py
|
# Copyright 2018 The TensorFlow Authors All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
def good_policies():
"""AutoAugment policies found on Cifar."""
exp0_0 = [
[('Invert', 0.1, 7), ('Contrast', 0.2, 6)],
[('Rotate', 0.7, 2), ('TranslateX', 0.3, 9)],
[('Sharpness', 0.8, 1), ('Sharpness', 0.9, 3)],
[('ShearY', 0.5, 8), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.5, 8), ('Equalize', 0.9, 2)]]
exp0_1 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 3)],
[('TranslateY', 0.9, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 2), ('Solarize', 0.8, 3)],
[('Equalize', 0.8, 8), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('AutoContrast', 0.9, 1)]]
exp0_2 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.0, 2)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)],
[('AutoContrast', 0.9, 0), ('Solarize', 0.4, 3)],
[('Equalize', 0.7, 5), ('Invert', 0.1, 3)],
[('TranslateY', 0.7, 9), ('TranslateY', 0.7, 9)]]
exp0_3 = [
[('Solarize', 0.4, 5), ('AutoContrast', 0.9, 1)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.9, 9)],
[('AutoContrast', 0.8, 0), ('TranslateY', 0.7, 9)],
[('TranslateY', 0.2, 7), ('Color', 0.9, 6)],
[('Equalize', 0.7, 6), ('Color', 0.4, 9)]]
exp1_0 = [
[('ShearY', 0.2, 7), ('Posterize', 0.3, 7)],
[('Color', 0.4, 3), ('Brightness', 0.6, 7)],
[('Sharpness', 0.3, 9), ('Brightness', 0.7, 9)],
[('Equalize', 0.6, 5), ('Equalize', 0.5, 1)],
[('Contrast', 0.6, 7), ('Sharpness', 0.6, 5)]]
exp1_1 = [
[('Brightness', 0.3, 7), ('AutoContrast', 0.5, 8)],
[('AutoContrast', 0.9, 4), ('AutoContrast', 0.5, 6)],
[('Solarize', 0.3, 5), ('Equalize', 0.6, 5)],
[('TranslateY', 0.2, 4), ('Sharpness', 0.3, 3)],
[('Brightness', 0.0, 8), ('Color', 0.8, 8)]]
exp1_2 = [
[('Solarize', 0.2, 6), ('Color', 0.8, 6)],
[('Solarize', 0.2, 6), ('AutoContrast', 0.8, 1)],
[('Solarize', 0.4, 1), ('Equalize', 0.6, 5)],
[('Brightness', 0.0, 0), ('Solarize', 0.5, 2)],
[('AutoContrast', 0.9, 5), ('Brightness', 0.5, 3)]]
exp1_3 = [
[('Contrast', 0.7, 5), ('Brightness', 0.0, 2)],
[('Solarize', 0.2, 8), ('Solarize', 0.1, 5)],
[('Contrast', 0.5, 1), ('TranslateY', 0.2, 9)],
[('AutoContrast', 0.6, 5), ('TranslateY', 0.0, 9)],
[('AutoContrast', 0.9, 4), ('Equalize', 0.8, 4)]]
exp1_4 = [
[('Brightness', 0.0, 7), ('Equalize', 0.4, 7)],
[('Solarize', 0.2, 5), ('Equalize', 0.7, 5)],
[('Equalize', 0.6, 8), ('Color', 0.6, 2)],
[('Color', 0.3, 7), ('Color', 0.2, 4)],
[('AutoContrast', 0.5, 2), ('Solarize', 0.7, 2)]]
exp1_5 = [
[('AutoContrast', 0.2, 0), ('Equalize', 0.1, 0)],
[('ShearY', 0.6, 5), ('Equalize', 0.6, 5)],
[('Brightness', 0.9, 3), ('AutoContrast', 0.4, 1)],
[('Equalize', 0.8, 8), ('Equalize', 0.7, 7)],
[('Equalize', 0.7, 7), ('Solarize', 0.5, 0)]]
exp1_6 = [
[('Equalize', 0.8, 4), ('TranslateY', 0.8, 9)],
[('TranslateY', 0.8, 9), ('TranslateY', 0.6, 9)],
[('TranslateY', 0.9, 0), ('TranslateY', 0.5, 9)],
[('AutoContrast', 0.5, 3), ('Solarize', 0.3, 4)],
[('Solarize', 0.5, 3), ('Equalize', 0.4, 4)]]
exp2_0 = [
[('Color', 0.7, 7), ('TranslateX', 0.5, 8)],
[('Equalize', 0.3, 7), ('AutoContrast', 0.4, 8)],
[('TranslateY', 0.4, 3), ('Sharpness', 0.2, 6)],
[('Brightness', 0.9, 6), ('Color', 0.2, 8)],
[('Solarize', 0.5, 2), ('Invert', 0.0, 3)]]
exp2_1 = [
[('AutoContrast', 0.1, 5), ('Brightness', 0.0, 0)],
[('Cutout', 0.2, 4), ('Equalize', 0.1, 1)],
[('Equalize', 0.7, 7), ('AutoContrast', 0.6, 4)],
[('Color', 0.1, 8), ('ShearY', 0.2, 3)],
[('ShearY', 0.4, 2), ('Rotate', 0.7, 0)]]
exp2_2 = [
[('ShearY', 0.1, 3), ('AutoContrast', 0.9, 5)],
[('TranslateY', 0.3, 6), ('Cutout', 0.3, 3)],
[('Equalize', 0.5, 0), ('Solarize', 0.6, 6)],
[('AutoContrast', 0.3, 5), ('Rotate', 0.2, 7)],
[('Equalize', 0.8, 2), ('Invert', 0.4, 0)]]
exp2_3 = [
[('Equalize', 0.9, 5), ('Color', 0.7, 0)],
[('Equalize', 0.1, 1), ('ShearY', 0.1, 3)],
[('AutoContrast', 0.7, 3), ('Equalize', 0.7, 0)],
[('Brightness', 0.5, 1), ('Contrast', 0.1, 7)],
[('Contrast', 0.1, 4), ('Solarize', 0.6, 5)]]
exp2_4 = [
[('Solarize', 0.2, 3), ('ShearX', 0.0, 0)],
[('TranslateX', 0.3, 0), ('TranslateX', 0.6, 0)],
[('Equalize', 0.5, 9), ('TranslateY', 0.6, 7)],
[('ShearX', 0.1, 0), ('Sharpness', 0.5, 1)],
[('Equalize', 0.8, 6), ('Invert', 0.3, 6)]]
exp2_5 = [
[('AutoContrast', 0.3, 9), ('Cutout', 0.5, 3)],
[('ShearX', 0.4, 4), ('AutoContrast', 0.9, 2)],
[('ShearX', 0.0, 3), ('Posterize', 0.0, 3)],
[('Solarize', 0.4, 3), ('Color', 0.2, 4)],
[('Equalize', 0.1, 4), ('Equalize', 0.7, 6)]]
exp2_6 = [
[('Equalize', 0.3, 8), ('AutoContrast', 0.4, 3)],
[('Solarize', 0.6, 4), ('AutoContrast', 0.7, 6)],
[('AutoContrast', 0.2, 9), ('Brightness', 0.4, 8)],
[('Equalize', 0.1, 0), ('Equalize', 0.0, 6)],
[('Equalize', 0.8, 4), ('Equalize', 0.0, 4)]]
exp2_7 = [
[('Equalize', 0.5, 5), ('AutoContrast', 0.1, 2)],
[('Solarize', 0.5, 5), ('AutoContrast', 0.9, 5)],
[('AutoContrast', 0.6, 1), ('AutoContrast', 0.7, 8)],
[('Equalize', 0.2, 0), ('AutoContrast', 0.1, 2)],
[('Equalize', 0.6, 9), ('Equalize', 0.4, 4)]]
exp0s = exp0_0 + exp0_1 + exp0_2 + exp0_3
exp1s = exp1_0 + exp1_1 + exp1_2 + exp1_3 + exp1_4 + exp1_5 + exp1_6
exp2s = exp2_0 + exp2_1 + exp2_2 + exp2_3 + exp2_4 + exp2_5 + exp2_6 + exp2_7
return exp0s + exp1s + exp2s
|
model-patching-master
|
augmentation/autoaugment/policies.py
|
from augmentation.dataflows.utils import create_parallel_dataflow_via_numpy, create_direct_dataflow
import augmentation.augment.static
import tensorflow as tf
import tensorflow_datasets as tfds
import numpy as np
import matplotlib.pyplot as plt
from multiprocessing import cpu_count
from types import SimpleNamespace
import datetime
import augmentation.datasets.custom.mnist
import augmentation.datasets.custom.mnist_correlation
import augmentation.datasets.custom.waterbirds
import augmentation.datasets.custom.celeba_128
def dataset_len(tf_dataset, verbose=False):
"""
Compute the length of a TF dataset.
"""
tot = 0
for data, _ in tf_dataset.batch(128):
tot += data.shape[0]
if verbose: print('.', end='')
if verbose: print(flush=True)
return tot
def get_dataset_from_list_files_dataset(list_files_dataset, proc_batch, tfrecord_example_reader, sequential=False):
num_parallel_reads = cpu_count()
if sequential:
num_parallel_reads = 1
# Load up the TFRecord dataset with parallel reads: this will interleave unless sequential is set to True
dataset = tf.data.TFRecordDataset(list_files_dataset, num_parallel_reads=num_parallel_reads)
# Batch up the TFRecord examples
dataset = dataset.batch(proc_batch)
# Decode the examples in parallel for batches: this produces a deterministic ordering always
dataset = dataset.map(tfrecord_example_reader, num_parallel_calls=cpu_count())
return dataset
def apply_modifier_to_dataset(dataset, dataset_payload, modifier, modifier_args):
"""
Apply a modifier to a tf.data.Dataset.
"""
if modifier == 'class':
# Filter out only the examples that belong to the given class label.
class_label = int(modifier_args)
dataset = dataset.filter(lambda image, label: label == class_label)
elif modifier == 'shuffle':
# Shuffle the dataset
buffer_size, seed = [int(e) for e in modifier_args.split(",")]
dataset = dataset.shuffle(buffer_size=buffer_size, seed=seed)
elif modifier == 'take':
# Take a few examples from the dataset
n_examples = int(modifier_args)
dataset = dataset.take(n_examples)
elif modifier == 'repeat':
# Repeat the dataset a few times
n_repeats = int(modifier_args)
dataset = dataset.repeat(n_repeats)
elif modifier == 'noval':
assert dataset_payload is not None, 'dataset_payload must be a namespace with train_dataset and val_dataset.'
# Put the validation dataset into the training data and set the validation dataset to the test data
dataset_payload.train_dataset = dataset_payload.train_dataset.concatenate(dataset_payload.val_dataset)
dataset_payload.val_dataset = dataset_payload.test_dataset
elif modifier == '':
# Apply no modifier, return as is
pass
else:
raise NotImplementedError
return dataset, dataset_payload
def apply_modifier_command_to_dataset(dataset, dataset_payload, modifier_command):
"""
A modifier command is represented as a string
<modifier_1>[:modifier_1_args]/<modifier_2>[:modifier_2_args]/...
and is applied to a tf.data.Dataset.
"""
# Split up the command to grab the list of modifiers
list_of_modifiers = modifier_command.split("/")
# Apply the modifiers in sequence
for modifier in list_of_modifiers:
try:
modifier_, modifier_args = modifier.split(":")
except ValueError:
# If there's no argument, set it to None
modifier_, modifier_args = modifier.split(":")[0], None
# Apply the modifier
dataset, dataset_payload = apply_modifier_to_dataset(dataset, dataset_payload, modifier_, modifier_args)
return dataset, dataset_payload
def apply_modifier_to_dataset_payload(dataset_payload, train_dataset_modifier, eval_dataset_modifier=None):
"""
Take a dataset_payload namespace that contains train_dataset, val_dataset and test_dataset (all tf.data.Datasets)
and applies modifiers to each dataset.
"""
# Apply the modifier commands to each of the datasets in the dataset payload
dataset_payload.train_dataset, dataset_payload = apply_modifier_command_to_dataset(dataset_payload.train_dataset,
dataset_payload,
train_dataset_modifier)
# If we didn't specify an eval_dataset_modifier, just use ''
eval_dataset_modifier = '' if eval_dataset_modifier is None else eval_dataset_modifier
dataset_payload.val_dataset, dataset_payload = apply_modifier_command_to_dataset(dataset_payload.val_dataset,
dataset_payload,
eval_dataset_modifier)
dataset_payload.test_dataset, dataset_payload = apply_modifier_command_to_dataset(dataset_payload.test_dataset,
dataset_payload,
eval_dataset_modifier)
return dataset_payload
def get_processed_dataset_info(dataset_info, validation_frac, batch_size):
n_classes = dataset_info.features['label'].num_classes
try:
classes = dataset_info.classes
except:
classes = [f'Class {i}' for i in range(n_classes)]
try:
n_domains = dataset_info.features['label'].num_domains
domains = dataset_info.domains
except:
n_domains = 0
domains = []
input_shape = dataset_info.features['image'].shape
try:
n_training_examples = int(dataset_info.splits['train'].num_examples * (1 - validation_frac))
except:
# Sometimes the train split isn't exactly called 'train' for a dataset
n_training_examples = 0
n_batches = int(np.ceil(n_training_examples / float(batch_size)))
return SimpleNamespace(n_classes=n_classes,
classes=classes,
n_domains=n_domains,
domains=domains,
input_shape=input_shape,
n_training_examples=n_training_examples,
n_batches=n_batches)
def load_dataset_using_tfds(dataset_name, dataset_version, data_dir) -> tfds.core.DatasetBuilder:
"""
Load up a dataset using Tensorflow Datasets.
:param dataset_name: Name of the dataset, e.g. 'cifar10'
:param dataset_version: Dataset version, e.g. '3.*.*'
:param data_dir: Path to where data should be downloaded
:return: dataset builder of type tfds.core.DatasetBuilder
"""
# Use tensorflow datasets to load up the dataset
dataset_builder = tfds.builder(name=f'{dataset_name}:{dataset_version}', data_dir=data_dir)
dataset_builder.download_and_prepare(download_dir=data_dir + '/downloads')
return dataset_builder
def show_dataset_info(dataset_builder, plot=False):
"""
Get information about the dataset using the tfds.core.DatasetBuilder.
"""
# Get information about the dataset
dataset_info = dataset_builder.info
if plot:
# Grab 9 examples from the training data and display them
dataset = dataset_builder.as_dataset(split='train[:9]')
viz = tfds.show_examples(dataset_info, dataset)
plt.show(viz)
return dataset_info
def generate_dataset_split(validation_frac, cross_validation=False, fold=None):
"""
Generate splits conforming to the tf.data split API.
"""
assert 100 * validation_frac % 5 == 0, 'Use validation fractions that are multiples of 0.05!'
assert 0 <= validation_frac <= 1., 'Validation fractions must be in [0, 1].'
# Convert the validation fraction to the range [0, 100]
validation_frac = int(validation_frac * 100)
if not cross_validation:
if validation_frac > 0.:
# Simply grab the last x% of the training data as the validation set
split = [f'train[:{100 - validation_frac}%]',
f'train[{100 - validation_frac}%:]',
'test']
else:
# We're training on the full dataset, monitor performance on the test dataset
split = ['train',
'test',
'test']
else:
# Check if the fold is correctly specified
max_folds = 100 // validation_frac
assert 0 <= fold < max_folds, 'Cross-validation fold is out of range.'
# Find the location of the fold in the training data and slice out the validation data
fold_loc = fold * validation_frac
split = [f'train[:{fold_loc}%]+train[{fold_loc + validation_frac}%:]',
f'train[{fold_loc}%:{fold_loc + validation_frac}%]',
'test']
assert len(split) == 3, 'Split must contain descriptions of the train, validation and test datasets.'
return split
CUSTOM_DATASET_PREFIXES = ['mnist_spurious',
'mnist_combined',
'mnist_correlation',
'celeb_a',
'waterbirds',
]
def load_dataset(dataset_name, dataset_version, data_dir, validation_frac, cross_validation=False, fold=None):
"""
The main entry point to load any dataset.
"""
# For a custom dataset, call the custom dataset loader
if np.any([dataset_name.startswith(e) for e in CUSTOM_DATASET_PREFIXES]):
assert cross_validation is False, 'Cross-validation is not supported for the custom datasets.'
return load_custom_dataset(dataset_name, dataset_version, data_dir, validation_frac)
# Set up the dataset
dataset_builder = load_dataset_using_tfds(dataset_name=dataset_name,
dataset_version=dataset_version,
data_dir=data_dir)
# Get some dataset information
dataset_info = show_dataset_info(dataset_builder=dataset_builder, plot=False)
# Generate the dataset
dataset_split = generate_dataset_split(validation_frac, cross_validation, fold)
# Put the dataset into memory and return a training, validation and test dataset
train_dataset, val_dataset, test_dataset = dataset_builder.as_dataset(split=dataset_split, as_supervised=True)
return SimpleNamespace(dataset_builder=dataset_builder,
dataset_info=dataset_info,
dataset_split=dataset_split,
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset)
def decode_raw_image(raw_bytes):
return tf.image.decode_jpeg(raw_bytes, channels=3)
def fetch_datasets_for_trainer(dataset,
dataset_version,
# train_dataset_modifier,
# eval_dataset_modifier,
datadir,
validation_frac,
batch_size,
cross_validation=False,
fold=None):
# Load the dataset payload
dataset_payload = load_dataset(dataset, dataset_version, datadir, validation_frac, cross_validation, fold)
# Apply modifiers on the datasets
# dataset_payload = apply_modifier_to_dataset_payload(dataset_payload, train_dataset_modifier, eval_dataset_modifier)
# Get some dataset information
proc_dataset_info = get_processed_dataset_info(dataset_payload.dataset_info, validation_frac, batch_size)
return (dataset_payload.train_dataset, dataset_payload.val_dataset, dataset_payload.test_dataset), \
(proc_dataset_info.input_shape, proc_dataset_info.n_classes,
proc_dataset_info.classes, proc_dataset_info.n_training_examples)
def fetch_list_of_datasets(datasets,
dataset_versions,
datadirs,
validation_frac,
batch_size,
cross_validation=False,
fold=None):
dataset_splits, training_examples_by_dataset = [], []
input_shape, n_classes, classes = None, None, None
# Loop over all the datasets
for dataset, dataset_version, datadir in zip(datasets, dataset_versions, datadirs):
# Fetch the dataset
print(f"Fetching dataset {dataset} from {datadir}.")
splits, (input_shape_, n_classes_, classes_, n_training_examples_) \
= fetch_datasets_for_trainer(dataset,
dataset_version,
datadir,
validation_frac,
batch_size,
cross_validation,
fold)
dataset_splits.append(splits)
if input_shape is None:
input_shape, n_classes, classes = input_shape_, n_classes_, classes_
else:
# All the datasets should have the same schema
assert input_shape == input_shape_ and n_classes == n_classes_ and classes == classes_
# Update the n_training_examples
training_examples_by_dataset.append(n_training_examples_)
return dataset_splits, training_examples_by_dataset, input_shape, n_classes, classes
def fetch_list_of_train_datasets(train_datasets,
train_dataset_versions,
train_datadirs,
validation_frac,
batch_size,
cross_validation=False,
fold=None):
# Fetch the list of training datasets
dataset_splits, training_examples_by_dataset, input_shape, n_classes, classes = \
fetch_list_of_datasets(datasets=train_datasets,
dataset_versions=train_dataset_versions,
datadirs=train_datadirs,
validation_frac=validation_frac,
batch_size=batch_size,
cross_validation=cross_validation,
fold=fold)
# Grab the train datasets
train_datasets, _, _ = zip(*dataset_splits)
# Total number of training examples
n_training_examples = np.sum(training_examples_by_dataset)
return train_datasets, \
(training_examples_by_dataset, n_training_examples, input_shape, n_classes, classes)
def fetch_list_of_eval_datasets(eval_datasets,
eval_dataset_versions,
eval_datadirs,
validation_frac,
batch_size,
cross_validation=False,
fold=None):
# Fetch the list of training datasets
dataset_splits, _, input_shape, n_classes, classes = \
fetch_list_of_datasets(datasets=eval_datasets,
dataset_versions=eval_dataset_versions,
datadirs=eval_datadirs,
validation_frac=validation_frac,
batch_size=batch_size,
cross_validation=cross_validation,
fold=fold)
# Grab the train datasets
_, val_datasets, test_datasets = zip(*dataset_splits)
return (val_datasets, test_datasets), \
(input_shape, n_classes, classes)
def fetch_list_of_data_generators_for_trainer(train_dataset_names,
train_dataset_versions,
train_datadirs,
train_dataset_aliases,
eval_dataset_names,
eval_dataset_versions,
eval_datadirs,
eval_dataset_aliases,
# train_dataset_modifier,
# eval_dataset_modifier,
train_augmentations, train_gpu_augmentations, train_static_augmentations,
eval_augmentations, eval_gpu_augmentations, eval_static_augmentations,
cache_dir,
validation_frac,
batch_size,
dataflow,
max_shuffle_buffer=0,
train_shuffle_seeds=None,
repeat=False,
shuffle_before_repeat=False,
cross_validation=False,
fold=None):
# Fetch the list of training datasets
print("Fetching training datasets.", flush=True)
train_datasets, (training_examples_by_dataset, n_training_examples,
train_input_shape, train_n_classes, train_classes) = \
fetch_list_of_train_datasets(train_datasets=train_dataset_names,
train_dataset_versions=train_dataset_versions,
train_datadirs=train_datadirs,
validation_frac=validation_frac,
batch_size=batch_size,
cross_validation=cross_validation,
fold=fold)
# Fetch the list of evaluation datasets
print("Fetching evaluation datasets.", flush=True)
(val_datasets, test_datasets), (eval_input_shape, eval_n_classes, eval_classes) = \
fetch_list_of_eval_datasets(eval_datasets=eval_dataset_names,
eval_dataset_versions=eval_dataset_versions,
eval_datadirs=eval_datadirs,
validation_frac=validation_frac,
batch_size=batch_size,
cross_validation=cross_validation,
fold=fold)
assert train_input_shape == eval_input_shape and train_n_classes == eval_n_classes \
and train_classes == eval_classes, \
'Train and eval sets must have the same schema (input_shape, n_classes, classes).'
assert train_shuffle_seeds is None or len(train_shuffle_seeds) == len(train_datasets), \
'Either set train_shuffle_seeds to None or specify one seed per training dataset.'
print("Applying static augmentations.", flush=True)
train_dataset_identifiers = [f'[{name}].[{version}].train.{validation_frac:.3f}' for name, version in
zip(train_dataset_names, train_dataset_versions)]
val_dataset_identifiers = [f'[{name}].[{version}].val.{validation_frac:.3f}' for name, version in
zip(eval_dataset_names, eval_dataset_versions)]
test_dataset_identifiers = [f'[{name}].[{version}].test' for name, version in
zip(eval_dataset_names, eval_dataset_versions)]
train_datasets, train_dataset_aliases, training_examples_by_dataset, train_batch_sizes, train_original_idx = \
augmentation.augment.static.compose_static_augmentations(
static_augmentation_pipelines=train_static_augmentations,
datasets=train_datasets,
aliases=train_dataset_aliases,
identifiers=train_dataset_identifiers,
dataset_lens=training_examples_by_dataset,
batch_sizes=augmentation.augment.static.split_batch_size(batch_size, len(train_datasets)),
keep_datasets=False)
val_datasets, val_dataset_aliases, _, val_batch_sizes, val_original_idx = \
augmentation.augment.static.compose_static_augmentations(
static_augmentation_pipelines=eval_static_augmentations,
datasets=val_datasets,
aliases=eval_dataset_aliases,
identifiers=val_dataset_identifiers,
dataset_lens=[0] * len(val_datasets),
batch_sizes=[batch_size] * len(val_datasets),
keep_datasets=True)
test_datasets, test_dataset_aliases, _, test_batch_sizes, test_original_idx = \
augmentation.augment.static.compose_static_augmentations(
static_augmentation_pipelines=eval_static_augmentations,
datasets=test_datasets,
aliases=eval_dataset_aliases,
identifiers=test_dataset_identifiers,
dataset_lens=[0] * len(test_datasets),
batch_sizes=[batch_size] * len(test_datasets),
keep_datasets=True)
# TODO: fix this assert with np.all
assert val_dataset_aliases == test_dataset_aliases and val_batch_sizes == test_batch_sizes and \
val_original_idx == test_original_idx, \
'Currently, evaluation datasets must have the same aliases, batch_sizes and variants.'
# Make sure augmentations on the fly are applied to the appropriate datasets
train_augmentations = [train_augmentations[i] for i in train_original_idx]
train_gpu_augmentations = [train_gpu_augmentations[i] for i in train_original_idx]
val_augmentations = [eval_augmentations[i] for i in val_original_idx]
val_gpu_augmentations = [eval_gpu_augmentations[i] for i in val_original_idx]
test_augmentations = [eval_augmentations[i] for i in test_original_idx]
test_gpu_augmentations = [eval_gpu_augmentations[i] for i in test_original_idx]
print("Diagnostics...")
print("--------------------------------------------------------------")
print("Train datasets:", train_datasets)
print("Train dataset aliases:", train_dataset_aliases)
print("Train examples per group:", training_examples_by_dataset)
print("Train batch sizes:", train_batch_sizes)
print("Train original indices per new dataset:", train_original_idx)
print("--------------------------------------------------------------")
print("Val datasets:", val_datasets)
print("Val dataset aliases:", val_dataset_aliases)
print("Val batch sizes:", val_batch_sizes)
print("Val original indices per new dataset:", val_original_idx)
print("--------------------------------------------------------------")
print("Test datasets:", test_datasets)
print("Test dataset aliases:", test_dataset_aliases)
print("Test batch sizes:", test_batch_sizes)
print("Test original indices per new dataset:", test_original_idx)
print("--------------------------------------------------------------", flush=True)
# Create the generators
if max_shuffle_buffer < 0:
train_shuffle_buffers = training_examples_by_dataset
else:
train_shuffle_buffers = min(training_examples_by_dataset,
[max_shuffle_buffer] * len(training_examples_by_dataset))
if train_shuffle_seeds is None:
train_shuffle_seeds = train_original_idx
else:
train_shuffle_seeds = [train_shuffle_seeds[i] for i in train_original_idx]
train_generators = create_multiple_data_generators(datasets=train_datasets,
dataset_aliases=train_dataset_aliases,
augmentations_by_dataset=train_augmentations,
gpu_augmentations_by_dataset=train_gpu_augmentations,
label_augmentations_by_dataset=[()] * len(train_dataset_aliases),
batch_sizes=train_batch_sizes,
shuffle_buffers=train_shuffle_buffers,
shuffle_seeds=train_shuffle_seeds,
dataflow=dataflow,
repeat=repeat,
shuffle_before_repeat=shuffle_before_repeat,
cache_dir=cache_dir,
cache_dir_postfix='__train')
val_generators = create_multiple_data_generators(datasets=val_datasets,
dataset_aliases=val_dataset_aliases,
augmentations_by_dataset=val_augmentations,
gpu_augmentations_by_dataset=val_gpu_augmentations,
label_augmentations_by_dataset=[()] * len(val_dataset_aliases),
batch_sizes=val_batch_sizes,
shuffle_buffers=[0] * len(val_dataset_aliases),
shuffle_seeds=[0] * len(val_dataset_aliases),
dataflow=dataflow,
repeat=False,
shuffle_before_repeat=shuffle_before_repeat,
cache_dir=cache_dir,
cache_dir_postfix='__val')
test_generators = create_multiple_data_generators(datasets=test_datasets,
dataset_aliases=test_dataset_aliases,
augmentations_by_dataset=test_augmentations,
gpu_augmentations_by_dataset=test_gpu_augmentations,
label_augmentations_by_dataset=[()] * len(test_dataset_aliases),
batch_sizes=test_batch_sizes,
shuffle_buffers=[0] * len(test_dataset_aliases),
shuffle_seeds=[0] * len(test_dataset_aliases),
dataflow=dataflow,
repeat=False,
shuffle_before_repeat=shuffle_before_repeat,
cache_dir=cache_dir,
cache_dir_postfix='__test')
print("Generator lengths:", len(train_generators), len(val_generators), len(test_generators))
print("Done with creating generators for train and eval.", flush=True)
return (train_generators, val_generators, test_generators), \
(train_input_shape, train_n_classes, train_classes, n_training_examples, training_examples_by_dataset), \
(train_dataset_aliases, val_dataset_aliases, test_dataset_aliases)
def create_data_generator(dataset,
augmentations,
gpu_augmentations,
label_augmentations,
batch_size,
dataflow,
repeat=False,
shuffle_buffer=0,
shuffle_seed=0,
shuffle_before_repeat=False,
cache_dir=None,
cache_dir_postfix=''):
"""Given a single tf_dataset, construct a generator that applies augmentations to that dataset."""
if dataflow == 'in_memory':
generator = create_parallel_dataflow_via_numpy(tf_dataset=dataset,
batch_size=batch_size,
augmentations=augmentations,
gpu_augmentations=gpu_augmentations)
elif dataflow == 'disk_cached':
assert cache_dir is not None, 'You must specify a cache directory when using disk_cached.'
cache_dir = cache_dir + '_' + datetime.datetime.now().strftime('%d_%m_%y__%H_%M_%S') + cache_dir_postfix
# Cache the dataset first
dataset = dataset.cache(cache_dir)
# You can't test the dataflow unless you manually cache, otherwise
# concurrent caching iterators will be generated (leading to a TF error).
dataset_len(dataset, verbose=True)
if shuffle_before_repeat:
if shuffle_buffer > 0:
dataset = dataset.shuffle(shuffle_buffer, seed=shuffle_seed)
if repeat:
dataset = dataset.repeat(-1)
else:
if repeat:
dataset = dataset.repeat(-1)
if shuffle_buffer > 0:
dataset = dataset.shuffle(shuffle_buffer, seed=shuffle_seed)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
try:
# Unbatch it
dataset = dataset.unbatch()
except ValueError:
pass
generator = create_direct_dataflow(tf_dataset=dataset,
batch_size=batch_size,
augmentations=augmentations,
gpu_augmentations=gpu_augmentations,
label_augmentations=label_augmentations,
test_flow=False, # (not repeat),
)
else:
raise NotImplementedError
return generator
def create_multiple_data_generators(datasets,
dataset_aliases,
augmentations_by_dataset,
gpu_augmentations_by_dataset,
label_augmentations_by_dataset,
batch_sizes,
shuffle_buffers,
shuffle_seeds,
dataflow,
repeat=False,
shuffle_before_repeat=False,
cache_dir=None,
cache_dir_postfix=''):
"""Given a list of tf_datasets, construct a list of generators, one per dataset.
batch_sizes: list of batch sizes, one for each dataset
"""
assert len(datasets) == \
len(dataset_aliases) == \
len(augmentations_by_dataset) == \
len(gpu_augmentations_by_dataset) == \
len(label_augmentations_by_dataset) == \
len(batch_sizes) == \
len(shuffle_buffers) == \
len(shuffle_seeds), \
"All lengths passed in must be identical."
generators = []
for i, (dataset, alias, augmentations,
gpu_augmentations, label_augmentations, batch_size,
shuffle_buffer, shuffle_seed) in enumerate(zip(datasets,
dataset_aliases,
augmentations_by_dataset,
gpu_augmentations_by_dataset,
label_augmentations_by_dataset,
batch_sizes,
shuffle_buffers, shuffle_seeds)):
# Create a data generator for this dataset
print(f"Creating {alias} data generator: shuffle with {shuffle_buffer}, {shuffle_seed}")
generators.append(create_data_generator(dataset=dataset,
augmentations=augmentations,
gpu_augmentations=gpu_augmentations,
label_augmentations=label_augmentations,
batch_size=batch_size,
dataflow=dataflow,
repeat=repeat,
shuffle_buffer=shuffle_buffer,
shuffle_seed=shuffle_seed,
shuffle_before_repeat=shuffle_before_repeat,
cache_dir=cache_dir,
cache_dir_postfix=cache_dir_postfix + "_" + alias.replace("/", "")
+ str(i)))
return generators
def get_dataset_aliases(dataset_aliases, datasets):
if len(dataset_aliases) == len(datasets):
return dataset_aliases
else:
return datasets
def load_custom_dataset(dataset_name, dataset_version, data_dir, validation_frac):
"""
Load up a custom dataset.
"""
assert np.any([dataset_name.startswith(e) for e in CUSTOM_DATASET_PREFIXES]), 'Dataset specified is not supported.'
if dataset_name.startswith('mnist_spurious'):
return augmentation.datasets.custom.mnist.load_mnist_spurious(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_combined'):
return augmentation.datasets.custom.mnist.load_mnist_combined(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_yz_multihead'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_yz_multihead(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_yz'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_yz(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_y'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_y(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_partial'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_partial(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_multihead'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_multihead(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation_'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation_(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('mnist_correlation'):
return augmentation.datasets.custom.mnist_correlation.load_mnist_correlation(dataset_name,
dataset_version,
data_dir,
validation_frac)
elif dataset_name.startswith('waterbirds'):
return augmentation.datasets.custom.waterbirds.load_waterbirds(dataset_name,
dataset_version,
data_dir)
elif dataset_name.startswith('celeb_a_128'):
return augmentation.datasets.custom.celeba_128.load_celeba_128(dataset_name,
dataset_version,
data_dir)
else:
raise NotImplementedError
|
model-patching-master
|
augmentation/datasets/utils.py
|
from types import SimpleNamespace
import tensorflow as tf
import augmentation.datasets.utils
CELEBA_BASE_VARIANTS = ['5_o_Clock_Shadow',
'Arched_Eyebrows',
'Attractive',
'Bags_Under_Eyes',
'Bald',
'Bangs',
'Big_Lips',
'Big_Nose',
'Black_Hair',
'Blond_Hair',
'Blurry',
'Brown_Hair',
'Bushy_Eyebrows',
'Chubby',
'Double_Chin',
'Eyeglasses',
'Goatee',
'Gray_Hair',
'Heavy_Makeup',
'High_Cheekbones',
'Male',
'Mouth_Slightly_Open',
'Mustache',
'Narrow_Eyes',
'No_Beard',
'Oval_Face',
'Pale_Skin',
'Pointy_Nose',
'Receding_Hairline',
'Rosy_Cheeks',
'Sideburns',
'Smiling',
'Straight_Hair',
'Wavy_Hair',
'Wearing_Earrings',
'Wearing_Hat',
'Wearing_Lipstick',
'Wearing_Necklace',
'Wearing_Necktie',
'Young']
train_group_sizes = {'Blond_Hair':
{'Male':
{(0, 0): 71629, (0, 1): 66874, (1, 0): 22880, (1, 1): 1387} # 4054
}
}
val_group_sizes = {'Blond_Hair':
{'Male':
{(0, 0): 8535, (0, 1): 8276, (1, 0): 2874, (1, 1): 182}
}
}
test_group_sizes = {'Blond_Hair':
{'Male':
{(0, 0): 9767, (0, 1): 7535, (1, 0): 2480, (1, 1): 180}
}
}
def get_celeba_dataset_len(y_variant, z_variant, y_label, z_label):
if y_label == -1:
if z_label == -1:
entries_to_sum = [(0, 0), (0, 1), (1, 0), (1, 1)]
else:
entries_to_sum = [(0, z_label), (1, z_label)]
else:
if z_label == -1:
entries_to_sum = [(y_label, 0), (y_label, 1)]
else:
entries_to_sum = [(y_label, z_label)]
return sum([train_group_sizes[y_variant][z_variant][k] for k in entries_to_sum]), \
sum([val_group_sizes[y_variant][z_variant][k] for k in entries_to_sum]), \
sum([test_group_sizes[y_variant][z_variant][k] for k in entries_to_sum])
def read_celeba_tfrecord(example, batched=True, parallelism=8):
features = {"image": tf.io.FixedLenFeature([], tf.string)}
features.update({CELEBA_BASE_VARIANTS[i]: tf.io.FixedLenFeature([], tf.int64)
for i in range(len(CELEBA_BASE_VARIANTS))})
if batched:
# Parse the TFRecord
example = tf.io.parse_example(example, features)
# Decode the image
image = tf.map_fn(augmentation.datasets.utils.decode_raw_image, example['image'],
dtype=tf.uint8, back_prop=False, parallel_iterations=parallelism)
else:
# Parse the TFRecord
example = tf.io.parse_single_example(example, features)
# Decode the image
image = augmentation.datasets.utils.decode_raw_image(example['image'])
# Get all the other tags
tags = {tag: example[tag] for tag in CELEBA_BASE_VARIANTS}
return image, tags
def get_label_selection_function(label_type):
if label_type == 'y':
# Keep only the y labels
return lambda image, y_label, z_label: (image, y_label)
elif label_type == 'z':
# Keep only the z labels
return lambda image, y_label, z_label: (image, z_label)
else:
raise NotImplementedError
def load_celeba_128(dataset_name, dataset_version, data_dir):
assert dataset_name.startswith('celeb_a_128'), \
f'Dataset name is {dataset_name}, ' \
f'should be celeb_a_128/<y_task>/<z_task>/<z_frac>/<which_y>/<which_z>/<label_type>/<optional_take_from_Y0Z0>'
# Grab the names of the variants, the fraction of labeled z's available and the label type
split_name = dataset_name.split("/")[1:]
if len(split_name) == 6:
y_variant, z_variant, z_frac, y_label, z_label, label_type = split_name
n_y0z0_examples = -1
elif len(split_name) == 7:
y_variant, z_variant, z_frac, y_label, z_label, label_type, n_y0z0_examples = split_name
n_y0z0_examples = int(n_y0z0_examples)
else:
raise NotImplementedError
z_frac, y_label, z_label = float(z_frac), int(y_label), int(z_label)
assert y_variant in CELEBA_BASE_VARIANTS, f'Dataset variant {y_variant} is not available.'
assert z_variant in CELEBA_BASE_VARIANTS, f'Dataset variant {z_variant} is not available.'
assert 0 <= z_frac <= 1, f'z_frac should be in [0, 1], not {z_frac}.'
assert y_label in [-1, 0, 1], f'y_label should be in {-1, 0, 1}, not {y_label}.'
assert z_label in [-1, 0, 1], f'z_label should be in {-1, 0, 1}, not {z_label}.'
assert label_type in ['y', 'z'], f'Label types must be either y or z, not {label_type}.'
assert n_y0z0_examples > 0 or n_y0z0_examples == -1, f'Please pass in a number greater than 0 or pass in -1.'
assert z_frac == 1.0, 'z_frac has not been set up yet.'
# Load up the list of .tfrec files for the train/val/test sets
train_dataset = tf.data.Dataset.list_files(f'{data_dir}/train/*.tfrec', shuffle=False)
val_dataset = tf.data.Dataset.list_files(f'{data_dir}/val/*.tfrec', shuffle=False)
test_dataset = tf.data.Dataset.list_files(f'{data_dir}/test/*.tfrec', shuffle=False)
# Construct the TF Dataset from the list of .tfrec files
train_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(train_dataset, proc_batch=128,
tfrecord_example_reader=read_celeba_tfrecord).unbatch()
val_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(val_dataset, proc_batch=128,
tfrecord_example_reader=read_celeba_tfrecord).unbatch()
test_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(test_dataset, proc_batch=128,
tfrecord_example_reader=read_celeba_tfrecord).unbatch()
# Map to grab the y and z labels for the attributes picked
selection_fn = lambda image, tags: (image, int(tags[y_variant]), int(tags[z_variant]))
train_dataset = train_dataset.map(selection_fn, num_parallel_calls=16)
val_dataset = val_dataset.map(selection_fn, num_parallel_calls=16)
test_dataset = test_dataset.map(selection_fn, num_parallel_calls=16)
if y_label == 0 or y_label == 1:
# Keep only one of the y_labels
train_dataset = train_dataset.filter(lambda image, y, z: (y == y_label))
val_dataset = val_dataset.filter(lambda image, y, z: (y == y_label))
test_dataset = test_dataset.filter(lambda image, y, z: (y == y_label))
if z_label == 0 or z_label == 1:
# Keep only one of the z_labels
train_dataset = train_dataset.filter(lambda image, y, z: (z == z_label))
val_dataset = val_dataset.filter(lambda image, y, z: (z == z_label))
test_dataset = test_dataset.filter(lambda image, y, z: (z == z_label))
# Filter out the Y0Z0 examples and then add a subset of them back in
if n_y0z0_examples > 0:
# Take out examples from Y = 0, Z = 0
train_dataset_y0z0 = train_dataset.filter(lambda image, y, z: (y == 0 and z == 0)).take(n_y0z0_examples)
# Keep only examples from groups other than Y = 0, Z = 0
train_dataset = train_dataset.filter(lambda image, y, z: (y != 0 or z != 0))
# Add the subset of Y = 0, Z = 0 examples back into the train dataset
train_dataset = train_dataset.concatenate(train_dataset_y0z0)
# Get the label selection function and apply it
label_selection_fn = get_label_selection_function(label_type)
train_dataset = train_dataset.map(label_selection_fn, num_parallel_calls=16)
val_dataset = val_dataset.map(label_selection_fn, num_parallel_calls=16)
test_dataset = test_dataset.map(label_selection_fn, num_parallel_calls=16)
# Compute the length of the training dataset
train_dataset_len, val_dataset_len, test_dataset_len = get_celeba_dataset_len(y_variant,
z_variant,
y_label,
z_label)
# Make a dataset info namespace to ensure downstream compatibility
num_classes = 2
classes = [f'Not {z_variant}', f'{z_variant}'] if label_type == 'z' else [f'Not {y_variant}', f'{y_variant}']
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=(128, 128, 3))},
splits={'train': SimpleNamespace(num_examples=train_dataset_len),
'val': SimpleNamespace(num_examples=val_dataset_len),
'test': SimpleNamespace(num_examples=test_dataset_len)},
# These 14 classes are in the same order as the labels in the dataset
classes=classes)
# Return the data sets
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=train_dataset,
val_dataset=val_dataset,
test_dataset=test_dataset)
|
model-patching-master
|
augmentation/datasets/custom/celeba_128.py
|
import tensorflow as tf
import os
import augmentation.datasets.utils
# Basic feature construction, taken from the tutorial on TFRecords
def _bytestring_feature(list_of_bytestrings):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=list_of_bytestrings))
def _int_feature(list_of_ints):
return tf.train.Feature(int64_list=tf.train.Int64List(value=list_of_ints))
def _float_feature(list_of_floats):
return tf.train.Feature(float_list=tf.train.FloatList(value=list_of_floats))
def image_label_to_tfrecord(img_bytes, label):
# Construct a TFRecord Example using an (image, label) pair
feature = {"image": _bytestring_feature([img_bytes]),
"label": _int_feature([int(label)])}
return tf.train.Example(features=tf.train.Features(feature=feature))
def read_image_label_tfrecord(example, batched=True, parallelism=8):
# Read a TFRecord Example that contains an (image, label) pair
features = {"image": tf.io.FixedLenFeature([], tf.string),
"label": tf.io.FixedLenFeature([], tf.int64)}
if batched:
# Parse the TFRecord
example = tf.io.parse_example(example, features)
# Decode the image
image = tf.map_fn(augmentation.datasets.utils.decode_raw_image, example['image'],
dtype=tf.uint8, back_prop=False, parallel_iterations=parallelism)
else:
# Parse the TFRecord
example = tf.io.parse_single_example(example, features)
# Decode the image
image = augmentation.datasets.utils.decode_raw_image(example['image'])
# Get all the other tags
label = example['label']
return image, label
|
model-patching-master
|
augmentation/datasets/custom/tfrecords.py
|
from types import SimpleNamespace
import tensorflow as tf
import augmentation.datasets.utils
WATERBIRDS_CLASSES = ['landbird', 'waterbird']
WATERBIRDS_DOMAINS = ['land', 'water']
# Group Sizes
# ------------------------------------
# [y, z] = [[0, 0], [0, 1], [1, 0], [1, 1]]
#
# Training Set (split = 0)
# [3498, 184, 56, 1057]
#
# Validation Set (split = 1)
# [467, 466, 133, 133]
#
# Test Set (split = 2)
# [2255, 2255, 642, 642]
# ------------------------------------------
train_group_sizes = {(0, 0): 3498, (0, 1): 184, (1, 0): 56, (1, 1): 1057}
val_group_sizes = {(0, 0): 467, (0, 1): 466, (1, 0): 133, (1, 1): 133}
test_group_sizes = {(0, 0): 2255, (0, 1): 2255, (1, 0): 642, (1, 1): 642}
def read_waterbirds_tfrecord(example, batched=True, parallelism=8):
features = {"image": tf.io.FixedLenFeature([], tf.string),
"img_id": tf.io.FixedLenFeature([], tf.int64),
"img_filename": tf.io.FixedLenFeature([], tf.string),
"place_filename": tf.io.FixedLenFeature([], tf.string),
"y": tf.io.FixedLenFeature([], tf.int64),
"split": tf.io.FixedLenFeature([], tf.int64),
"place": tf.io.FixedLenFeature([], tf.int64)}
if batched:
# Parse the TFRecord
example = tf.io.parse_example(example, features)
# Decode the image
image = tf.map_fn(augmentation.datasets.utils.decode_raw_image, example['image'],
dtype=tf.uint8, back_prop=False, parallel_iterations=parallelism)
else:
# Parse the TFRecord
example = tf.io.parse_single_example(example, features)
# Decode the image
image = augmentation.datasets.utils.decode_raw_image(example['image'])
# Get all the other tags
img_id = example['img_id']
img_filename = example["img_filename"]
place_filename = example['place_filename']
y = example['y']
split = example['split']
place = example['place']
return image, img_id, img_filename, place_filename, y, split, place
def get_label_selection_function(label_type):
if label_type == 'y':
# Keep only the y labels
return lambda image, img_id, img_filename, place_filename, y, split, place: \
(image, y)
elif label_type == 'z':
# Keep only the z labels
return lambda image, img_id, img_filename, place_filename, y, split, place: \
(image, place)
elif label_type == 'full':
# Keep everything
return lambda image, img_id, img_filename, place_filename, y, split, place: \
(image, img_id, img_filename, place_filename, y, split, place)
else:
raise NotImplementedError
def load_base_variant(data_dir, y_label, z_label, label_type, proc_batch=128):
# Load up the list of .tfrec files for the train/val/test sets
waterbirds_dataset = tf.data.Dataset.list_files(f'{data_dir}/*.tfrec', shuffle=False)
# Construct the TF Dataset from the list of .tfrec files
waterbirds_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(waterbirds_dataset, proc_batch=proc_batch,
tfrecord_example_reader=read_waterbirds_tfrecord).unbatch()
# Split the data into train, validation and test
waterbirds_train = waterbirds_dataset.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(split == 0))
waterbirds_val = waterbirds_dataset.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(split == 1))
waterbirds_test = waterbirds_dataset.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(split == 2))
if y_label == 0 or y_label == 1:
# Keep only one of the y_labels
waterbirds_train = waterbirds_train.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(y == y_label))
waterbirds_val = waterbirds_val.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(y == y_label))
waterbirds_test = waterbirds_test.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(y == y_label))
if z_label == 0 or z_label == 1:
# Keep only one of the z_labels
waterbirds_train = waterbirds_train.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(place == z_label))
waterbirds_val = waterbirds_val.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(place == z_label))
waterbirds_test = waterbirds_test.filter(lambda image, img_id, img_filename, place_filename, y, split, place:
(place == z_label))
# Get the label selection function
label_selection_fn = get_label_selection_function(label_type)
# Apply the label selection function and cache the dataset into memory since it's quite small
# \approx 11000 * (224 * 224 * 3)/(1024 * 1024) < 2 GiB
waterbirds_train = waterbirds_train.map(label_selection_fn).cache()
waterbirds_val = waterbirds_val.map(label_selection_fn).cache()
waterbirds_test = waterbirds_test.map(label_selection_fn).cache()
return waterbirds_train, waterbirds_val, waterbirds_test
def get_waterbirds_dataset_len(y_label, z_label):
if y_label == -1:
if z_label == -1:
entries_to_sum = [(0, 0), (0, 1), (1, 0), (1, 1)]
else:
entries_to_sum = [(0, z_label), (1, z_label)]
else:
if z_label == -1:
entries_to_sum = [(y_label, 0), (y_label, 1)]
else:
entries_to_sum = [(y_label, z_label)]
return sum([train_group_sizes[k] for k in entries_to_sum]), \
sum([val_group_sizes[k] for k in entries_to_sum]), \
sum([test_group_sizes[k] for k in entries_to_sum])
def load_waterbirds(dataset_name, dataset_version, data_dir):
assert dataset_name.startswith(
'waterbirds'), f'Dataset name is {dataset_name}, should be waterbirds/<which_y>/<which_z>/<y or z>'
# Grab the name of the variant and label type
y_label, z_label, label_type = dataset_name.split("/")[1:]
y_label, z_label = int(y_label), int(z_label)
assert y_label in [-1, 0, 1], f'y_label should be in {-1, 0, 1}, not {y_label}.'
assert z_label in [-1, 0, 1], f'z_label should be in {-1, 0, 1}, not {z_label}.'
assert label_type in ['y', 'z', 'full'], 'Label types must be in {y, z, full}.'
if dataset_version == '1.*.*':
# Load up the basic dataset
waterbirds_train, waterbirds_val, waterbirds_test = load_base_variant(data_dir, y_label, z_label, label_type)
# Compute the lengths of the dataset
train_dataset_len, val_dataset_len, test_dataset_len = get_waterbirds_dataset_len(y_label, z_label)
# Make a dataset info namespace to ensure downstream compatibility
num_classes = 2
classes = WATERBIRDS_DOMAINS if label_type == 'z' else WATERBIRDS_CLASSES
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=(224, 224, 3))},
splits={'train': SimpleNamespace(num_examples=train_dataset_len),
'val': SimpleNamespace(num_examples=val_dataset_len),
'test': SimpleNamespace(num_examples=test_dataset_len)},
classes=classes)
# Return the data sets
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=waterbirds_train,
val_dataset=waterbirds_val,
test_dataset=waterbirds_test)
else:
raise NotImplementedError
|
model-patching-master
|
augmentation/datasets/custom/waterbirds.py
|
from types import SimpleNamespace
import augmentation.datasets.utils
from augmentation.datasets.custom.mnist import MNIST_CORRUPTED_VARIANTS
import tensorflow as tf
# TODO multihead should be specified as an option to the dataset instead of a separate one
def load_mnist_correlation_yz_multihead(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation_yz_multihead/zigzag/{a}/{b}/{size}/{test_size}/[z]'
This loads a training set with Y=a and Z=b, of total size {size},
where Y is the existence of the spurious feature and Z is the digit parity
If the last option "z" is included, dataset is labeled by z instead of y
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation_yz_multihead', f'Dataset name is {dataset_name}, ' \
f'should be mnist_correlation_yz_multihead/<variant>/<y_class>/<z_class>/<size>/<test_size>/<label>.'
variant = params[1]
y = int(params[2])
z = int(params[3])
size = int(params[4])
test_size = int(params[5])
label_var = params[6] if len(params) > 6 else 'y'
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert y in [0, 1] and z in [0, 1], f'Classes Y={y} and Z={z} must be in {0, 1}.'
assert label_var == 'y' or label_var == 'z'
if z == 0:
# Load up the standard MNIST dataset
mnists_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name='mnist',
dataset_version='3.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
else:
# Load up the corrupted MNIST dataset
mnists_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name=f'mnist_corrupted/{variant}',
dataset_version='1.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
mnists_train = mnists_dataset_payload.train_dataset. \
filter(lambda image, label: label % 2 == y)
mnists_val = mnists_dataset_payload.val_dataset. \
filter(lambda image, label: label % 2 == y)
mnists_test = mnists_dataset_payload.test_dataset. \
filter(lambda image, label: label % 2 == y)
train_sz_ = augmentation.datasets.utils.dataset_len(mnists_train)
val_sz_ = augmentation.datasets.utils.dataset_len(mnists_val)
test_sz_ = augmentation.datasets.utils.dataset_len(mnists_test)
size = train_sz_ if size == -1 else size
test_size = test_sz_ if test_size == -1 else min(test_size, test_sz_)
assert size <= train_sz_ + val_sz_, f'Dataset size {size} for {dataset_name} should be at most {train_sz_ + val_sz_}.'
val_size = int(size * validation_frac)
if z == 0:
mnists_train = mnists_train.take(size - val_size)
mnists_val = mnists_val.take(val_size)
mnists_test = mnists_test.take(test_size)
else:
mnists_train = mnists_train.skip(train_sz_ - (size - val_size))
mnists_val = mnists_val.skip(val_sz_ - val_size)
mnists_test = mnists_test.skip(test_sz_ - test_size)
# relabel labels to 0/1
if label_var == 'y':
mnists_train = mnists_train.map(lambda image, label: (image, y))
mnists_val = mnists_val.map(lambda image, label: (image, y))
mnists_test = mnists_test.map(lambda image, label: (image, y))
if label_var == 'z':
mnists_train = mnists_train.map(lambda image, label: (image, z))
mnists_val = mnists_val.map(lambda image, label: (image, z))
mnists_test = mnists_test.map(lambda image, label: (image, z))
print(
f'{dataset_name} splits: {augmentation.datasets.utils.dataset_len(mnists_train)}, '
f'{augmentation.datasets.utils.dataset_len(mnists_val)}, {augmentation.datasets.utils.dataset_len(mnists_test)}')
# Make a dataset info namespace to ensure downstream compatibility
# num_classes = mnists_dataset_payload.dataset_info.features['label'].num_classes
num_classes = 2
shape = mnists_dataset_payload.dataset_info.features['image'].shape
num_examples = size
# Change to multihead binary classification
num_classes = 1
mnists_train = mnists_train.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
mnists_val = mnists_val.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
mnists_test = mnists_test.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=shape)},
splits={'train': SimpleNamespace(num_examples=num_examples)})
if label_var == 'y':
dataset_info.classes = ['parity']
else:
dataset_info.classes = ['corruption']
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=mnists_train,
val_dataset=mnists_val,
test_dataset=mnists_test)
def load_mnist_correlation_yz(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation_yz/zigzag/{a}/{b}/{size}/{test_size}/[z]'
This loads a training set with Y=a and Z=b, of total size {size},
where Y is the existence of the spurious feature and Z is the digit parity
If the last option "z" is included, dataset is labeled by z instead of y
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation_yz', f'Dataset name is {dataset_name}, ' \
f'should be mnist_correlation_yz/<variant>/<y_class>/<z_class>/<size>/<test_size>/<label>.'
variant = params[1]
y = int(params[2])
z = int(params[3])
size = int(params[4])
test_size = int(params[5])
label_var = params[6] if len(params) > 6 else 'y'
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert y in [0, 1] and z in [0, 1], f'Classes Y={y} and Z={z} must be in {0, 1}.'
assert label_var == 'y' or label_var == 'z'
if z == 0:
# Load up the standard MNIST dataset
mnists_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name='mnist',
dataset_version='3.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
else:
# Load up the corrupted MNIST dataset
mnists_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name=f'mnist_corrupted/{variant}',
dataset_version='1.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
mnists_train = mnists_dataset_payload.train_dataset. \
filter(lambda image, label: label % 2 == y). \
cache()
mnists_val = mnists_dataset_payload.val_dataset. \
filter(lambda image, label: label % 2 == y). \
cache()
mnists_test = mnists_dataset_payload.test_dataset. \
filter(lambda image, label: label % 2 == y). \
cache()
train_sz_ = augmentation.datasets.utils.dataset_len(mnists_train)
val_sz_ = augmentation.datasets.utils.dataset_len(mnists_val)
test_sz_ = augmentation.datasets.utils.dataset_len(mnists_test)
size = train_sz_ + val_sz_, if size == -1 else size
test_size = test_sz_ if test_size == -1 else min(test_size, test_sz_)
assert size <= train_sz_ + val_sz_, f'Dataset size {size} for {dataset_name} should be at most {train_sz_ + val_sz_}.'
val_size = int(size * validation_frac)
if z == 0:
mnists_train = mnists_train.take(size - val_size)
mnists_val = mnists_val.take(val_size)
mnists_test = mnists_test.take(test_size)
else:
mnists_train = mnists_train.skip(train_sz_ - (size - val_size))
mnists_val = mnists_val.skip(val_sz_ - val_size)
mnists_test = mnists_test.skip(test_sz_ - test_size)
# relabel labels to 0/1
if label_var == 'y':
mnists_train = mnists_train.map(lambda image, label: (image, y))
mnists_val = mnists_val.map(lambda image, label: (image, y))
mnists_test = mnists_test.map(lambda image, label: (image, y))
if label_var == 'z':
mnists_train = mnists_train.map(lambda image, label: (image, z))
mnists_val = mnists_val.map(lambda image, label: (image, z))
mnists_test = mnists_test.map(lambda image, label: (image, z))
print(
f'{dataset_name} splits: {augmentation.datasets.utils.dataset_len(mnists_train)}, '
f'{augmentation.datasets.utils.dataset_len(mnists_val)}, {augmentation.datasets.utils.dataset_len(mnists_test)}')
# Make a dataset info namespace to ensure downstream compatibility
num_classes = 2
shape = mnists_dataset_payload.dataset_info.features['image'].shape
num_examples = size
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=shape)},
splits={'train': SimpleNamespace(num_examples=num_examples)})
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=mnists_train,
val_dataset=mnists_val,
test_dataset=mnists_test)
def load_mnist_correlation_y(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation_y/zigzag/{a}/{p}/{size}/[z]'
This loads a training set with Y=a and p(Z=a) = p, of total size {size},
where Y is the existence of the spurious feature and Z is the digit parity
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation_y', f'Dataset name is {dataset_name}, ' \
f'should be mnist_correlation_y/<variant>/<y_class>/<z_prob>/<size>/<label>.'
variant = params[1]
y = int(params[2])
p_z = float(params[3])
size = int(params[4])
label_var = params[5] if len(params) > 5 else 'y'
if size == -1: size = 30000 # TODO FIX THIS - WHY ISN'T MNIST CLASS BALANCED
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert y in [0, 1], f'Class Y={y} must be in {0, 1}.'
assert 0. <= p_z <= 1., f'Probability p(Z=y)={p_z} should be in [0.0, 1.0].'
assert size <= 30000, f'Dataset size {size} should be at most 30000.'
assert label_var == 'y' or label_var == 'z'
size_z = int(size * p_z)
test_size_z = int(5000 * p_z)
dataset_z = load_mnist_correlation_yz(
f'mnist_correlation_yz/{variant}/{y}/{y}/{size_z}/{test_size_z}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_z_ = load_mnist_correlation_yz(
f'mnist_correlation_yz/{variant}/{y}/{1 - y}/{size - size_z}/{5000 - test_size_z}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_z.train_dataset = dataset_z.train_dataset.concatenate(dataset_z_.train_dataset)
dataset_z.val_dataset = dataset_z.val_dataset.concatenate(dataset_z_.val_dataset)
dataset_z.test_dataset = dataset_z.test_dataset.concatenate(dataset_z_.test_dataset)
dataset_z.dataset_info.splits['train'].num_examples += dataset_z_.dataset_info.splits['train'].num_examples
return dataset_z
def load_mnist_correlation_partial(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation_partial/{variant}/{z}/{size}/[label_var]'.
Creates a balanced dataset with Pr(Y = 1) = Pr(Y = 0) = 1/2 and Pr(Z = z) = 1.
Use this for training CycleGANs:
E.g. for 'mnist_correlation/zigzag/p/size/y' as your main dataset
you can create source and target datasets using
'mnist_correlation_partial/zigzag/0/some_size/y' and
'mnist_correlation_partial/zigzag/1/some_size/y'
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation_partial', \
f'Dataset name is {dataset_name}, should be mnist_correlation_partial/<variant>/<z_class>/<size>/<label>.'
variant = params[1]
z = int(params[2])
size = int(params[3])
label_var = params[4] if len(params) > 4 else 'y'
size = 30000 if size == -1 else size
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert z == 1 or z == 0, f'Clean should be 0 or 1 not {z}.'
assert size <= 30000, f'Dataset size {size} should be at most 30000.'
assert size % 2 == 0, f"C'mon why would you use an odd dataset size..."
assert label_var == 'y' or label_var == 'z'
dataset_evens = load_mnist_correlation_yz(
f'mnist_correlation_yz/{variant}/{0}/{z}/{size // 2}/{5000}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_odds = load_mnist_correlation_yz(
f'mnist_correlation_yz/{variant}/{1}/{z}/{size // 2}/{5000}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_evens.train_dataset = dataset_evens.train_dataset.concatenate(dataset_odds.train_dataset)
dataset_evens.val_dataset = dataset_evens.val_dataset.concatenate(dataset_odds.val_dataset)
dataset_evens.test_dataset = dataset_evens.test_dataset.concatenate(dataset_odds.test_dataset)
dataset_evens.dataset_info.splits['train'].num_examples += dataset_odds.dataset_info.splits['train'].num_examples
return dataset_evens
def load_mnist_correlation(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation/{variant}/{p}/{size}/[label_var]'
This loads a training+val set of total size ~{size}, where the spurious feature {variant} and digit parity are correlated
More precisely:
- Y=parity and Z=variant are marginally balanced [p(Y=0) = p(Y=1) = P(Z=0) = P(Z=1) = 1/2]
- P(Y=a | Z=a) = P(Z=a | Y=a) = p
- Alternatively, Y and Z are correlated with strength (2p-1)
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation', \
f'Dataset name is {dataset_name}, should be mnist_correlation/<variant>/<prob>/<size>/<label>.'
variant = params[1]
p = float(params[2])
size = int(params[3])
label_var = params[4] if len(params) > 4 else 'y'
size = 60000 if size == -1 else size
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert 0. <= p <= 1., f'Probability p(Z=y)={p} should be in [0.0, 1.0].'
assert size <= 60000, f'Dataset size {size} should be at most 60000.'
assert size % 2 == 0, f"C'mon why would you use an odd dataset size..."
assert label_var == 'y' or label_var == 'z'
dataset_evens = load_mnist_correlation_y(
f'mnist_correlation_y/{variant}/{0}/{p}/{size // 2}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_odds = load_mnist_correlation_y(
f'mnist_correlation_y/{variant}/{1}/{p}/{size // 2}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_evens.train_dataset = dataset_evens.train_dataset.concatenate(dataset_odds.train_dataset)
dataset_evens.val_dataset = dataset_evens.val_dataset.concatenate(dataset_odds.val_dataset)
dataset_evens.test_dataset = dataset_evens.test_dataset.concatenate(dataset_odds.test_dataset)
dataset_evens.dataset_info.splits['train'].num_examples += dataset_odds.dataset_info.splits['train'].num_examples
return dataset_evens
def load_mnist_correlation_multihead(dataset_name, dataset_version, data_dir, validation_frac):
"""
Dataset of the form 'mnist_correlation_multihead/{variant}/{p}/{size}/[label_var]'
This loads a training+val set of total size ~{size}, where the spurious feature {variant} and digit parity are correlated
More precisely:
- Y=parity and Z=variant are marginally balanced [p(Y=0) = p(Y=1) = P(Z=0) = P(Z=1) = 1/2]
- P(Y=a | Z=a) = P(Z=a | Y=a) = p
- Alternatively, Y and Z are correlated with strength (2p-1)
"""
params = dataset_name.split("/")
assert params[0] == 'mnist_correlation_multihead', \
f'Dataset name is {dataset_name}, should be mnist_correlation_multihead/<variant>/<prob>/<size>/<label>.'
variant = params[1]
p = float(params[2])
size = int(params[3])
label_var = params[4] if len(params) > 4 else 'y'
size = 60000 if size == -1 else size
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert 0. <= p <= 1., f'Probability p(Z=y)={p} should be in [0.0, 1.0].'
assert size <= 60000, f'Dataset size {size} should be at most 30000.'
assert size % 2 == 0, f"C'mon why would you use an odd dataset size..."
assert label_var == 'y' or label_var == 'z'
dataset_evens = load_mnist_correlation_y(
f'mnist_correlation_y/{variant}/{0}/{p}/{size // 2}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_odds = load_mnist_correlation_y(
f'mnist_correlation_y/{variant}/{1}/{p}/{size // 2}/{label_var}',
dataset_version, data_dir, validation_frac)
dataset_evens.train_dataset = dataset_evens.train_dataset.concatenate(dataset_odds.train_dataset)
dataset_evens.val_dataset = dataset_evens.val_dataset.concatenate(dataset_odds.val_dataset)
dataset_evens.test_dataset = dataset_evens.test_dataset.concatenate(dataset_odds.test_dataset)
dataset_evens.dataset_info.splits['train'].num_examples += dataset_odds.dataset_info.splits['train'].num_examples
# To turn this into a "multihead" method: pretend there is 1 binary head
dataset_evens.dataset_info.num_classes = 1
if label_var == 'y':
dataset_evens.dataset_info.classes = ['parity']
else:
dataset_evens.dataset_info.classes = ['corruption']
dataset_evens.train_dataset = dataset_evens.train_dataset.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
dataset_evens.val_dataset = dataset_evens.val_dataset.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
dataset_evens.test_dataset = dataset_evens.test_dataset.map(lambda x, y: (x, tf.convert_to_tensor(y)[..., None]))
return dataset_evens
|
model-patching-master
|
augmentation/datasets/custom/mnist_correlation.py
|
from types import SimpleNamespace
import augmentation.datasets.utils
MNIST_CORRUPTED_VARIANTS = ['identity',
'shot_noise',
'impulse_noise',
'glass_blur',
'motion_blur',
'shear',
'scale',
'rotate',
'brightness',
'translate',
'stripe',
'fog',
'spatter',
'dotted_line',
'zigzag',
'canny_edges']
def load_mnist_spurious_variants(dataset_variant, data_dir, modified_class=7, validation_frac=0.):
assert dataset_variant in MNIST_CORRUPTED_VARIANTS, f'The requested variant _{dataset_variant}_ does not exist.'
# Load up the standard MNIST dataset
mnist_dataset_payload = augmentation.datasets.utils.load_dataset('mnist', '3.*.*', data_dir,
validation_frac=validation_frac)
# Load up the corrupted MNIST dataset
mnistc_dataset_payload = augmentation.datasets.utils.load_dataset(f'mnist_corrupted/{dataset_variant}', '1.*.*',
data_dir,
validation_frac=validation_frac)
# Grab the training and test sets
mnist_train = mnist_dataset_payload.train_dataset
mnistc_train = mnistc_dataset_payload.train_dataset
mnist_test = mnist_dataset_payload.test_dataset
mnistc_test = mnistc_dataset_payload.test_dataset
# Construct a dataset -- MNIST spurious,
# where a class in MNIST is replaced with a corrupted variant of it from MNIST-C
mnists_train = mnist_train.filter(lambda image, label: label != modified_class).concatenate(
mnistc_train.filter(lambda image, label: label == modified_class))
mnists_test = mnist_test.filter(lambda image, label: label != modified_class).concatenate(
mnistc_test.filter(lambda image, label: label == modified_class))
# Construct a dataset -- MNIST combined,
# where each class has digits from both MNIST and MNIST-C (for the chosen corruption)
mnistcom_train = mnist_train.concatenate(mnistc_train)
mnistcom_test = mnist_test.concatenate(mnistc_test)
return mnist_train, mnist_test, mnistc_train, mnistc_test, mnists_train, mnists_test, mnistcom_train, mnistcom_test
def load_mnist_spurious(dataset_name, dataset_version, data_dir, validation_frac):
assert dataset_name.startswith('mnist_spurious'), f'Dataset name is {dataset_name}, ' \
f'should be mnist_spurious/<variant>/<modified_class>.'
# Grab the name of the variant requested
variant = dataset_name.split("/")[1]
modified_class = int(dataset_name.split("/")[2])
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
assert modified_class in range(10), f'Cannot modify class {modified_class}. Pick a class between 0-9.'
if dataset_version == '1.*.*':
# Load up the standard MNIST dataset
mnist_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name='mnist',
dataset_version='3.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
# Load up the corrupted MNIST dataset
mnistc_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name=f'mnist_corrupted/{variant}',
dataset_version='1.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
# Construct the dataset by replacing a class in MNIST with a corrupted variant of it from MNIST-C
mnists_train = mnist_dataset_payload.train_dataset.filter(lambda image, label: label != modified_class). \
concatenate(mnistc_dataset_payload.train_dataset.filter(lambda image, label: label == modified_class))
mnists_val = mnist_dataset_payload.val_dataset.filter(lambda image, label: label != modified_class). \
concatenate(mnistc_dataset_payload.val_dataset.filter(lambda image, label: label == modified_class))
mnists_test = mnist_dataset_payload.test_dataset.filter(lambda image, label: label != modified_class). \
concatenate(mnistc_dataset_payload.test_dataset.filter(lambda image, label: label == modified_class))
# Make a dataset info namespace to ensure downstream compatibility
num_classes = mnist_dataset_payload.dataset_info.features['label'].num_classes
shape = mnist_dataset_payload.dataset_info.features['image'].shape
num_examples = mnist_dataset_payload.dataset_info.splits['train'].num_examples
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=shape)},
splits={'train': SimpleNamespace(num_examples=num_examples)})
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=mnists_train,
val_dataset=mnists_val,
test_dataset=mnists_test)
else:
raise NotImplementedError
def load_mnist_combined(dataset_name, dataset_version, data_dir, validation_frac):
assert dataset_name.startswith('mnist_combined'), f'Dataset name is {dataset_name}, ' \
f'should be mnist_combined/<variant>.'
# Grab the name of the variant requested
variant = dataset_name.split("/")[1]
assert variant in MNIST_CORRUPTED_VARIANTS, f'Dataset variant {variant} is not available.'
if dataset_version == '1.*.*':
# Load up the standard MNIST dataset
mnist_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name='mnist',
dataset_version='3.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
# Load up the corrupted MNIST dataset
mnistc_dataset_payload = augmentation.datasets.utils.load_dataset(dataset_name=f'mnist_corrupted/{variant}',
dataset_version='1.*.*',
data_dir=data_dir,
validation_frac=validation_frac)
# Construct the dataset by combining MNIST and MNIST-Corrupted (for the chosen corruption)
mnistcom_train = mnist_dataset_payload.train_dataset.concatenate(mnistc_dataset_payload.train_dataset)
mnistcom_val = mnist_dataset_payload.val_dataset.concatenate(mnistc_dataset_payload.val_dataset)
mnistcom_test = mnist_dataset_payload.test_dataset.concatenate(mnistc_dataset_payload.test_dataset)
# Make a dataset info namespace to ensure downstream compatibility
num_classes = mnist_dataset_payload.dataset_info.features['label'].num_classes
shape = mnist_dataset_payload.dataset_info.features['image'].shape
num_examples = mnist_dataset_payload.dataset_info.splits['train'].num_examples + \
mnistc_dataset_payload.dataset_info.splits['train'].num_examples
dataset_info = SimpleNamespace(features={'label': SimpleNamespace(num_classes=num_classes),
'image': SimpleNamespace(shape=shape)},
splits={'train': SimpleNamespace(num_examples=num_examples)})
return SimpleNamespace(dataset_info=dataset_info,
train_dataset=mnistcom_train,
val_dataset=mnistcom_val,
test_dataset=mnistcom_test)
else:
raise NotImplementedError
|
model-patching-master
|
augmentation/datasets/custom/mnist.py
|
import numpy as np
import imgaug.augmenters as iaa
from imgaug.augmenters import *
from augmentation.methods.cyclegan.models import *
from augmentation.autoaugment import augmentation_transforms
from augmentation.autoaugment.augmentation_transforms import MEANS, STDS
from augmentation.autoaugment.policies import good_policies
from augmentation.utilities.wandb import *
from scipy import ndimage
def compose_augmentations(x, augmentations):
for f in augmentations:
x = f(x)
return x
def create_augmentation_pipeline(daug_pipeline, daug_pipeline_args, broadcast_to=1):
"""Takes as input an augmentation pipeline: a list of strings where each string is an augmentation. Their
corresponding arguments are in daug_pipeline_args."""
# Setup the augmentation pipeline we'll be using
if broadcast_to > 1:
# If broadcasting, return a list of augmentation pipelines (rather than a single augmentation pipeline)
# by replication
return [[globals()[daug](*daug_args) for daug, daug_args in zip(daug_pipeline, daug_pipeline_args)]] \
* broadcast_to
# By default, just return a single augmentation pipeline
return [globals()[daug](*daug_args) for daug, daug_args in zip(daug_pipeline, daug_pipeline_args)]
def create_augmentation_pipelines(train_daug_pipeline, train_daug_pipeline_args,
val_daug_pipeline, val_daug_pipeline_args,
test_daug_pipeline, test_daug_pipeline_args):
# Setup the augmentation pipeline we'll be using
train_augmentations = create_augmentation_pipeline(train_daug_pipeline, train_daug_pipeline_args)
val_augmentations = create_augmentation_pipeline(val_daug_pipeline, val_daug_pipeline_args)
test_augmentations = create_augmentation_pipeline(test_daug_pipeline, test_daug_pipeline_args)
return train_augmentations, val_augmentations, test_augmentations
def create_multiple_train_eval_augmentation_pipelines(train_augmentation_pipelines,
train_augmentation_pipelines_args,
eval_augmentation_pipelines,
eval_augmentation_pipelines_args,
broadcast_train_to=1,
broadcast_eval_to=1):
assert len(train_augmentation_pipelines) == len(train_augmentation_pipelines_args) and \
len(eval_augmentation_pipelines) == len(eval_augmentation_pipelines_args), \
'Number of pipelines and args must be the same.'
# Find the number of pipelines
n_train_pipelines = len(train_augmentation_pipelines)
n_eval_pipelines = len(eval_augmentation_pipelines)
if n_train_pipelines == 0:
# No train augmentation, push in an empty list to handle this properly
train_augmentation_pipelines, train_augmentation_pipelines_args = [[]], [[]]
if n_eval_pipelines == 0:
# No eval augmentation, push in an empty list to handle this properly
eval_augmentation_pipelines, eval_augmentation_pipelines_args = [[]], [[]]
# 'Broadcast' the single pipeline and replicate it broadcast_to times (otherwise don't)
broadcast_train_to = broadcast_train_to if (n_train_pipelines <= 1 and broadcast_train_to > 1) else 1
broadcast_eval_to = broadcast_eval_to if (n_eval_pipelines <= 1 and broadcast_eval_to > 1) else 1
# Standard stuff, just create the pipelines and return them
train_augmentations = [
(create_augmentation_pipeline(*z))
for z in zip(train_augmentation_pipelines * broadcast_train_to,
train_augmentation_pipelines_args * broadcast_train_to)
]
eval_augmentations = [
(create_augmentation_pipeline(*z))
for z in zip(eval_augmentation_pipelines * broadcast_eval_to,
eval_augmentation_pipelines_args * broadcast_eval_to)
]
return train_augmentations, eval_augmentations
def create_multiple_augmentation_pipelines(train_daug_pipelines, train_daug_pipelines_args,
val_daug_pipelines, val_daug_pipelines_args,
test_daug_pipelines, test_daug_pipelines_args,
broadcast_to=1):
"""
Same as create_augmentation_pipelines but takes list of pipelines each
and returns lists of same length.
'Broadcast' to pass in a single pipeline and get k replicates.
"""
assert len(train_daug_pipelines) == len(train_daug_pipelines_args) and \
len(val_daug_pipelines) == len(val_daug_pipelines_args) and \
len(test_daug_pipelines) == len(test_daug_pipelines_args), 'Number of pipelines and args must be the same.'
# Find the number of pipelines
n_train_pipelines = len(train_daug_pipelines)
n_val_pipelines = len(val_daug_pipelines)
n_test_pipelines = len(test_daug_pipelines)
if n_train_pipelines == 0:
# No augmentation, push in an empty list to handle this properly
train_daug_pipelines, train_daug_pipelines_args = [[]], [[]]
val_daug_pipelines, val_daug_pipelines_args = [[]], [[]]
test_daug_pipelines, test_daug_pipelines_args = [[]], [[]]
# 'Broadcast' the single pipeline and replicate it broadcast_to times (otherwise don't)
broadcast_train_to = broadcast_to if (n_train_pipelines <= 1 and broadcast_to > 1) else 1
broadcast_val_to = broadcast_to if (n_val_pipelines <= 1 and broadcast_to > 1) else 1
broadcast_test_to = broadcast_to if (n_test_pipelines <= 1 and broadcast_to > 1) else 1
# Standard stuff, just create the pipelines and return them
augmentations = [
create_augmentation_pipelines(*z)
for z in zip(train_daug_pipelines * broadcast_train_to,
train_daug_pipelines_args * broadcast_train_to,
val_daug_pipelines * broadcast_val_to,
val_daug_pipelines_args * broadcast_val_to,
test_daug_pipelines * broadcast_test_to,
test_daug_pipelines_args * broadcast_test_to)
]
return tuple(zip(*augmentations))
class AugmentationPipeline:
"""
Base class for performing augmentations.
"""
def __init__(self, *args, **kwargs):
pass
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4:
return np.array([self.transform(e) for e in data])
elif len(data.shape) == 3:
return np.array(self.transform(data))
else:
raise NotImplementedError
def improve(self, *args, **kwargs):
pass
def transform(self, data, *args, **kwargs):
pass
class NoAugmentationPipeline(AugmentationPipeline):
"""
An empty augmentation pipeline that returns the data as-is.
"""
def __init__(self, *args, **kwargs):
super(NoAugmentationPipeline, self).__init__(*args, **kwargs)
def transform(self, data, *args, **kwargs):
return data
class ResizeImage(AugmentationPipeline):
def __init__(self, size, *args, **kwargs):
super(ResizeImage, self).__init__(*args, **kwargs)
self.resizer = iaa.Sequential([iaa.Resize(size=size)])
def transform(self, data, *args, **kwargs):
return self.resizer(images=data)
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4 or len(data.shape) == 3:
return np.array(self.transform(data))
else:
raise NotImplementedError
class ImgAugAugmentationPipeline(AugmentationPipeline):
def __init__(self, pipeline, *args, **kwargs):
super(ImgAugAugmentationPipeline, self).__init__(*args, **kwargs)
self.iaa_pipeline = iaa.Sequential([])
if pipeline == 'fliplr:crop':
self.iaa_pipeline.append(iaa.Fliplr(0.5))
self.iaa_pipeline.append(iaa.Crop(percent=(0, 0.10), keep_size=True, sample_independently=True))
elif pipeline == 'heavy':
self.iaa_pipeline.append(self.create_heavy_augmentation_pipeline())
else:
raise NotImplementedError
def create_heavy_augmentation_pipeline(self):
# Adapting most of what AugMix/AutoAugment/RandAugment uses
# -----------------
# Shear (-30, 30): this is simplified from the shear_x and shear_y ops used
shear = iaa.Affine(shear=(-30, 30))
# Translation (-150 pixels, 150 pixels): this is simplified from the translate_x and translate_y ops used
# We translate 20% of the image independently in either direction
translate = iaa.Affine(translate_percent={"x": (-0.2, 0.2), "y": (-0.2, 0.2)})
# Rotation (-30 degrees, 30 degrees)
rotate = iaa.Affine(rotate=(-30, 30))
# Auto Contrast: can't find this in imgaug
# auto_contrast = iaa.Identity()
# Invert
invert = iaa.Invert()
# Equalize
equalize = iaa.HistogramEqualization()
# Solarize (0, 255)
solarize = iaa.Invert(threshold=(0, 255))
# Posterize (4, 8) bits
posterize = iaa.Posterize(nb_bits=(4, 8))
# Contrast
contrast = iaa.GammaContrast(gamma=(0.1, 1.9))
# Color
color = iaa.MultiplyHue()
# Brightness
brightness = iaa.Multiply((0.1, 1.9))
# Sharpness
sharpness = iaa.Sharpen(alpha=(0.1, 1.0), lightness=1.0)
# Cutout: approximates Cutout
cutout = iaa.CoarseDropout(p=0.1, size_percent=0.02)
# Sample Pairing: linearly mixes images (by convex combination)
mixup = iaa.Lambda(self.linear_mixup)
# Flip
flip = iaa.Fliplr(0.5)
# Sample between 1 and 3 of these augmentations and chain them
return iaa.SomeOf((1, 3), [shear,
translate,
rotate,
invert,
equalize,
solarize,
posterize,
contrast,
color,
brightness,
sharpness,
cutout,
mixup,
flip], random_order=True)
def linear_mixup(self, images, random_state, parents, hooks):
randomized_images = images[random_state.permutation(images.shape[0])]
scale = random_state.uniform(0.5, 1.0, size=images.shape[0]).reshape(images.shape[0], 1, 1, 1)
return (scale * images + (1 - scale) * randomized_images).astype(np.uint8)
def transform(self, data, *args, **kwargs):
return self.iaa_pipeline(images=data)
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4 or len(data.shape) == 3:
return np.array(self.transform(data))
else:
raise NotImplementedError
class BasicImagePreprocessingPipeline(AugmentationPipeline):
"""
A basic image preprocessing pipeline that
(1) casts an image to tf.float32,
(2) normalizes pixel values to lie in [0, 1] or [-1, 1].
"""
def __init__(self, type='zero-one', *args, **kwargs):
super(BasicImagePreprocessingPipeline, self).__init__(*args, **kwargs)
if type == 'zero-one':
self.transform = self.zero_one_normalization
self.zero_one_conversion = lambda x: x
elif type == 'minusone-one':
self.transform = self.minusone_one_normalization
self.zero_one_conversion = self.minuseone_one_to_zero_one_normalization
elif type == 'minusone-one-to-zero-one':
self.transform = self.minuseone_one_to_zero_one_normalization
elif type == 'grayscale':
self.transform = self.grayscale
elif type == 'none':
self.transform = lambda x: x
else:
raise NotImplementedError
def zero_one_normalization(self, image):
return image.astype(np.float32) / 255.
def inverse_zero_one_normalization(self, image):
return (image * 255.).astype(np.uint8)
def minusone_one_normalization(self, image):
return (image.astype(np.float32) / 127.5) - 1.
def minuseone_one_to_zero_one_normalization(self, image):
return image * 0.5 + 0.5
def grayscale(self, image):
# See https://stackoverflow.com/questions/12201577/how-can-i-convert-an-rgb-image-into-grayscale-in-python
return np.dot(image[..., :3], [0.2989, 0.5870, 0.1140])
class CIFAR10PreprocessingPipeline(BasicImagePreprocessingPipeline):
"""
A basic image preprocessing pipeline for the CIFAR10 dataset. It first calls the BasicImagePreprocessingPipeline,
followed by standardizing the images using a precomputed mean and standard deviation.
The mean and std values are taken from the AutoAugment repository.
"""
def __init__(self, *args, **kwargs):
super(CIFAR10PreprocessingPipeline, self).__init__(*args, **kwargs)
def transform(self, image, *args, **kwargs):
# First do basic preprocessing
image = BasicImagePreprocessingPipeline.transform(self, image, *args, **kwargs)
# Then subtract off the mean and std
return (image - MEANS) / STDS
class OnlyImageNetPreprocessingPipeline(AugmentationPipeline):
"""
A basic image preprocessing pipeline for ImageNet.
"""
MEANS = [0.485, 0.456, 0.406]
STDS = [0.229, 0.224, 0.225]
def __init__(self, *args, **kwargs):
super(OnlyImageNetPreprocessingPipeline, self).__init__(*args, **kwargs)
def transform(self, image, *args, **kwargs):
# Subtract off the mean and std
return (image - self.MEANS) / self.STDS
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4 or len(data.shape) == 3:
return np.array(self.transform(data))
else:
raise NotImplementedError
class ImageNetPreprocessingPipeline(AugmentationPipeline):
"""
A basic image preprocessing pipeline for the CIFAR10 dataset. It first calls the BasicImagePreprocessingPipeline,
followed by standardizing the images using a precomputed mean and standard deviation.
The mean and std values are taken from the AutoAugment repository.
"""
MEANS = [0.485, 0.456, 0.406]
STDS = [0.229, 0.224, 0.225]
def __init__(self, *args, **kwargs):
super(ImageNetPreprocessingPipeline, self).__init__(*args, **kwargs)
self.basic_preprocessor = BasicImagePreprocessingPipeline()
def transform(self, image, *args, **kwargs):
# First do basic preprocessing
image = self.basic_preprocessor(image)
# Then subtract off the mean and std
return (image - self.MEANS) / self.STDS
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4 or len(data.shape) == 3:
return np.array(self.transform(data))
else:
raise NotImplementedError
class HeuristicImageAugmentationPipeline(AugmentationPipeline):
"""
A variety of heuristic pipelines for data augmentation.
"""
def __init__(self, heuristic, *args, **kwargs):
super(HeuristicImageAugmentationPipeline, self).__init__(*args, **kwargs)
if heuristic == 'pad:crop:flip':
self.transform = self.pad_crop_flip
elif heuristic == 'pad:crop':
self.transform = self.pad_crop
elif heuristic == 'cutout':
self.transform = self.cutout
elif heuristic == 'pad:crop:flip:cutout':
self.transform = self.pad_crop_flip_cutout
elif heuristic == 'pad16:crop:flip:cutout':
self.transform = lambda x: self.pad_crop_flip_cutout(x, padding=16)
elif heuristic == 'rotr':
self.transform = self.rotate_random
else:
raise NotImplementedError
def pad_crop_flip(self, image, padding=4):
return augmentation_transforms.random_flip(augmentation_transforms.zero_pad_and_crop(image, padding))
def pad_crop(self, image, padding=4):
return augmentation_transforms.zero_pad_and_crop(image, padding)
def cutout(self, image, size=16):
return augmentation_transforms.cutout_numpy(image, size)
def pad_crop_flip_cutout(self, image, padding=4, cutout_size=16):
image = self.pad_crop_flip(image, padding)
return self.cutout(image, cutout_size)
def rotate_random(self, image, max_angle=45):
return ndimage.rotate(image, np.random.uniform(-max_angle, max_angle), reshape=False)
class AutoAugmentPipeline(AugmentationPipeline):
"""
Implements the augmentation pipeline learned by AutoAugment.
Code for AutoAugment is taken from
https://github.com/tensorflow/models/tree/048f5a9541c1400c0345bab4e3d9b5c9eb234989/research/autoaugment
"""
def __init__(self, dataset, *args, **kwargs):
super(AutoAugmentPipeline, self).__init__(*args, **kwargs)
if dataset == 'cifar10':
self.policy = self._cifar_policy()
elif dataset == 'imagenet':
self.policy = self._imagenet_policy()
elif dataset == 'svhn':
self.policy = self._svhn_policy()
else:
raise NotImplementedError('AutoAugment only supports (\'cifar10\', \'imagenet\', \'svhn\') policies.')
def transform(self, image, *args, **kwargs):
# Implementation is borrowed from
# lines 152-162 in
# https://github.com/tensorflow/models/blob/048f5a9541c1400c0345bab4e3d9b5c9eb234989/research/autoaugment/data_utils.py
# Convert tensor to a numpy array
image = np.array(image)
# Randomly sample one of the AutoAugment policies
epoch_policy = self.policy[np.random.choice(len(self.policy))]
# Apply the policy transformation to the image
image = augmentation_transforms.apply_policy(epoch_policy, image)
# Zero-pad, crop and flip the image randomly
image = augmentation_transforms.random_flip(augmentation_transforms.zero_pad_and_crop(image, 4))
# Apply cutout to the image
image = augmentation_transforms.cutout_numpy(image)
return image
def _cifar_policy(self):
return good_policies()
def _imagenet_policy(self):
raise NotImplementedError
def _svhn_policy(self):
raise NotImplementedError
class AutoAugmentCIFAR10Pipeline(CIFAR10PreprocessingPipeline, AutoAugmentPipeline):
def __init__(self, *args, **kwargs):
super(AutoAugmentCIFAR10Pipeline, self).__init__(dataset='cifar10', *args, **kwargs)
def transform(self, image, *args, **kwargs):
image = CIFAR10PreprocessingPipeline.transform(self, image, *args, **kwargs)
image = AutoAugmentPipeline.transform(self, image, *args, **kwargs)
return image
class RandomPolicyImageAugmentationPipeline(AugmentationPipeline):
def __init__(self, policy, *args, **kwargs):
super(RandomPolicyImageAugmentationPipeline, self).__init__()
if policy == 'basic':
pass
else:
raise NotImplementedError
def _basic_policy(self):
pass
def transform(self, image, *args, **kwargs):
return image
class TandaPipeline(AugmentationPipeline):
def __init__(self, *args, **kwargs):
super(TandaPipeline, self).__init__(*args, **kwargs)
pass
def improve(self):
pass
class WandbModelPseudoLabelingPipeline(AugmentationPipeline):
LABELING_METHODS = ['argmax', 'sigmoid_argmax', 'sigmoid_threshold']
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
input_shape,
n_classes,
checkpoint_path='checkpoints/',
labeling_method='argmax',
placeholder_labels=(),
*args, **kwargs):
super(WandbModelPseudoLabelingPipeline, self).__init__(*args, **kwargs)
# Load up the Weights and Biases run, get information about the model source and architecture and
# create the model.
wandb_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
self.keras_model = \
load_pretrained_keras_classification_model(source=wandb_run.cfg['model_source']['value'],
architecture=wandb_run.cfg['architecture']['value'],
input_shape=input_shape,
n_classes=n_classes,
imagenet_pretrained=False,
pretraining_source='wandb',
pretraining_info=f'{wandb_run_id}:{wandb_project}:{wandb_entity}',
checkpoint_path=checkpoint_path)
# Assume we only need to normalize to [0, 1] to run the Keras model
self.basic_preprocessor = BasicImagePreprocessingPipeline()
# What labels need to be pseudo-labeled? These are the placeholder labels we're replacing
# If empty, pseudolabel all the data
# TODO: add support for nonempty placeholder labels (only those labels are pseudolabeled)
assert len(placeholder_labels) == 0
self.placeholder_labels = np.array(placeholder_labels)
assert labeling_method in self.LABELING_METHODS, f'Labeling method {labeling_method} is invalid.'
self.labeling_method = labeling_method
def pseudolabel(self, outputs):
if self.labeling_method == 'argmax':
return np.argmax(outputs, axis=-1)
else:
raise NotImplementedError
def __call__(self, data, *args, **kwargs):
return self.transform(data)
def transform(self, data, *args, **kwargs):
# The data consists of inputs and labels
inputs, labels = data
# Transform the inputs using the model
outputs = self.keras_model(self.basic_preprocessor(inputs))
# Create pseudolabels
pseudolabels = self.pseudolabel(outputs)
# Return the data along with the pseudolabels
return inputs, pseudolabels
class BinaryMNISTWandbModelPseudoLabelingPipeline(WandbModelPseudoLabelingPipeline):
def __init__(self, wandb_entity, wandb_project, wandb_run_id, *args, **kwargs):
# Initialize the pseudolabeler: we just use the argmax labeling method since this is MNIST and
# pseudolabel everything
super(BinaryMNISTWandbModelPseudoLabelingPipeline, self).__init__(wandb_entity=wandb_entity,
wandb_project=wandb_project,
wandb_run_id=wandb_run_id,
input_shape=(28, 28, 1),
n_classes=2, *args, **kwargs)
def shuffle_and_split_data(data, proportion):
"""
Shuffle the data, split the data and return the shuffled data splits along with the permutation applied to the data.
"""
perm = np.random.permutation(len(data))
shuffled = data[perm]
return shuffled[:int(proportion * len(data))], shuffled[int(proportion * len(data)):], perm
def unshuffle_data(data, permutation):
"""
Unshuffle data that was shuffled using a permutation.
"""
return data[np.argsort(permutation)]
class PretrainedGenerativeModelAugmentationPipeline(AugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
model_name,
keras_model_creation_fn,
keras_model_creation_fn_args,
basic_preprocessing='minusone-one',
step_extractor=None,
aug_proportion=0.5,
run_in_eval_mode=True,
*args, **kwargs):
super(PretrainedGenerativeModelAugmentationPipeline, self).__init__(*args, **kwargs)
self.keras_model, _ = load_pretrained_keras_model_from_wandb(wandb_run_id=wandb_run_id,
wandb_project=wandb_project,
wandb_entity=wandb_entity,
keras_model_creation_fn=keras_model_creation_fn,
keras_model_creation_fn_args=
keras_model_creation_fn_args,
model_name=model_name,
step_extractor=step_extractor)
self.basic_preprocessor = BasicImagePreprocessingPipeline(type=basic_preprocessing)
self.aug_proportion = aug_proportion
self.training = not run_in_eval_mode
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4:
return np.array(self.transform(data))
else:
raise NotImplementedError
def transform(self, data, *args, **kwargs):
# Rescale the data
data = self.basic_preprocessor(data)
# Get splits of the data
split_1, split_2, permutation = shuffle_and_split_data(data, self.aug_proportion)
# Pass it through the generator
split_1 = self.keras_model(split_1, training=self.training)
# Combine the data
data = np.concatenate([split_1, split_2], axis=0)
# Unshuffle the data
data = unshuffle_data(data, permutation)
# Rescale output to [0, 1]
return self.basic_preprocessor.zero_one_conversion(data)
class PretrainedMNISTCycleGANAugmentationPipeline(PretrainedGenerativeModelAugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
model_name,
aug_proportion=1.0,
run_in_eval_mode=False,
norm_type='batchnorm',
checkpoint_step=-1,
*args, **kwargs):
assert model_name in ['generator_g', 'generator_f'], 'model_name must be {generator_g, generator_f}.'
super(PretrainedMNISTCycleGANAugmentationPipeline,
self).__init__(wandb_entity=wandb_entity,
wandb_project=wandb_project,
wandb_run_id=wandb_run_id,
model_name=model_name,
keras_model_creation_fn='mnist_unet_generator',
keras_model_creation_fn_args={'norm_type': norm_type},
step_extractor=particular_checkpoint_step_extractor(checkpoint_step),
aug_proportion=aug_proportion,
run_in_eval_mode=run_in_eval_mode,
*args, **kwargs)
class PretrainedCycleGANAugmentationPipeline(AugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
model_name,
keras_model_creation_fn,
keras_model_creation_fn_args,
step_extractor=None,
aug_proportion=0.5,
*args, **kwargs):
super(PretrainedCycleGANAugmentationPipeline, self).__init__(*args, **kwargs)
raise DeprecationWarning("Please use PretrainedGenerativeModelAugmentationPipeline "
"instead as a drop-in replacement (with an optional argument for the preprocessor).")
# Load the run
wandb_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
# Create the model architecture
self.keras_model = globals()[keras_model_creation_fn](**keras_model_creation_fn_args)
# Load up the model weights
if step_extractor is None:
load_most_recent_keras_model_weights(self.keras_model, wandb_run, model_name=model_name)
else:
load_most_recent_keras_model_weights(self.keras_model, wandb_run,
model_name=model_name,
step_extractor=step_extractor)
self.basic_preprocessor = BasicImagePreprocessingPipeline(type='minusone-one')
self.aug_proportion = aug_proportion
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4:
return np.array(self.transform(data))
else:
raise NotImplementedError
def transform(self, data, *args, **kwargs):
# Rescale the data to [-1, 1]
data = self.basic_preprocessor(data)
# Get splits of the data
split_1, split_2, permutation = shuffle_and_split_data(data, self.aug_proportion)
# Pass it through the generator
split_1 = self.keras_model(split_1, training=False)
# Combine the data
data = np.concatenate([split_1, split_2], axis=0)
# Unshuffle the data
data = unshuffle_data(data, permutation)
# Rescale output to [0, 1]
return data * 0.5 + 0.5
class PretrainedCycleGANBatchBalancingAugmentationPipeline(AugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
generator_1_name,
generator_2_name,
keras_model_creation_fn,
keras_model_creation_fn_args,
step_extractor=None,
basic_preprocessing='minusone-one',
aug_proportion=1.0,
generator_1_balance=0.5,
run_in_eval_mode=True,
*args, **kwargs):
super(PretrainedCycleGANBatchBalancingAugmentationPipeline, self).__init__(*args, **kwargs)
# Load up the generators for both domains
self.generator_1, _ = load_pretrained_keras_model_from_wandb(wandb_run_id=wandb_run_id,
wandb_project=wandb_project,
wandb_entity=wandb_entity,
keras_model_creation_fn=keras_model_creation_fn,
keras_model_creation_fn_args=
keras_model_creation_fn_args,
model_name=generator_1_name,
step_extractor=step_extractor)
self.generator_2, _ = load_pretrained_keras_model_from_wandb(wandb_run_id=wandb_run_id,
wandb_project=wandb_project,
wandb_entity=wandb_entity,
keras_model_creation_fn=keras_model_creation_fn,
keras_model_creation_fn_args=
keras_model_creation_fn_args,
model_name=generator_2_name,
step_extractor=step_extractor)
# Set up the preprocessing
self.basic_preprocessor = BasicImagePreprocessingPipeline(type=basic_preprocessing)
# The proportion of examples that are augmented in a data batch
self.aug_proportion = aug_proportion
# The proportion of augmented examples that are augmented by generator_1
self.generator_1_balance = generator_1_balance
# The mode to run the Keras model in
self.training = not run_in_eval_mode
def __call__(self, data, *args, **kwargs):
if len(data.shape) == 4:
return np.array(self.transform(data))
else:
raise NotImplementedError
def transform(self, data, *args, **kwargs):
# Rescale the data to [-1, 1]
data = self.basic_preprocessor(data)
# Get splits of the data
aug_split, unchanged_split, permutation = shuffle_and_split_data(data, self.aug_proportion)
# Get splits of the data to be augmented
gen1_split, gen2_split, aug_permutation = shuffle_and_split_data(aug_split, self.generator_1_balance)
# Pass the splits through the generators
gen1_split = self.generator_1(gen1_split, training=self.training)
gen2_split = self.generator_2(gen2_split, training=self.training)
# Combine to recover the augmented data split
aug_split = np.concatenate([gen1_split, gen2_split], axis=0)
# Unshuffle the augmented data split
aug_split = unshuffle_data(aug_split, aug_permutation)
# Combine to recover the data
data = np.concatenate([aug_split, unchanged_split], axis=0)
# Unshuffle to recover the original data
data = unshuffle_data(data, permutation)
# Rescale output to [0, 1]
return self.basic_preprocessor.zero_one_conversion(data)
class GenerativeAugmentationPipeline(AugmentationPipeline):
def __init__(self, *args, **kwargs):
super(GenerativeAugmentationPipeline, self).__init__()
pass
def improve(self):
pass
|
model-patching-master
|
augmentation/augment/utils.py
|
from augmentation.augment.utils import WandbModelPseudoLabelingPipeline, BinaryMNISTWandbModelPseudoLabelingPipeline, \
PretrainedMNISTCycleGANAugmentationPipeline, ResizeImage
from augmentation.utilities.wandb import load_pretrained_keras_model_from_wandb, particular_checkpoint_step_extractor, \
load_wandb_run, get_most_recent_model_file
from augmentation.datasets.custom.tfrecords import image_label_to_tfrecord, read_image_label_tfrecord
import augmentation.datasets.utils
import numpy as np
import tensorflow as tf
import os
from copy import copy
def split_batch_size(total_batch_size, n_groups):
return [total_batch_size // n_groups] * (n_groups - 1) + [
total_batch_size // n_groups + total_batch_size % n_groups]
def compose_static_augmentations(static_augmentation_pipelines, datasets, aliases, identifiers, dataset_lens,
batch_sizes,
keep_datasets=False):
print(
f"Composing static augmentations:\ndatasets - {datasets}\naliases - {aliases}\nlengths - {dataset_lens}\nbatch sizes - {batch_sizes}\nidentifiers - {identifiers}\nstatic_aug_pipelines - {static_augmentation_pipelines}",
flush=True)
assert len(static_augmentation_pipelines) == len(datasets) == len(aliases) == len(dataset_lens) == len(
batch_sizes) == len(identifiers), "compose_static_augmentations: lengths of arguments should be equal"
datasets = list(datasets)
original_idx = list(range(len(datasets)))
all_datasets = []
all_aliases = []
all_dataset_lens = []
all_batch_sizes = []
all_original_idx = []
# Loop over all the datasets and their corresponding static augmentations
for dataset, alias, ident, dlen, batch_size, idx, aug_pipeline \
in zip(datasets, aliases, identifiers, dataset_lens, batch_sizes, original_idx,
static_augmentation_pipelines):
dataset, alias, dlen, batch_size, idx = [dataset], [alias], [dlen], [batch_size], [idx]
# Run the dataset through the augmentation pipeline
for augmentation in aug_pipeline:
# Append the augmented datasets
if keep_datasets:
all_datasets += list(dataset)
all_aliases += alias
all_dataset_lens += dlen
all_batch_sizes += batch_size
all_original_idx += idx
# Call the static augmentation
dataset, alias, dlen, batch_size, idx = augmentation(dataset, alias, dlen, batch_size, idx,
**{'dataset_identifier': ident})
all_datasets += list(dataset)
all_aliases += alias
all_dataset_lens += dlen
all_batch_sizes += batch_size
all_original_idx += idx
return all_datasets, all_aliases, all_dataset_lens, all_batch_sizes, all_original_idx
def create_static_augmentation_pipeline(daug_pipeline, daug_pipeline_args, broadcast_to=1):
"""Takes as input an augmentation pipeline: a list of strings where each string is an augmentation. Their
corresponding arguments are in daug_pipeline_args."""
# Setup the augmentation pipeline we'll be using
if broadcast_to > 1:
# If broadcasting, return a list of augmentation pipelines (rather than a single augmentation pipeline)
# by replication
return [[globals()[daug](*daug_args) for daug, daug_args in zip(daug_pipeline, daug_pipeline_args)]] \
* broadcast_to
# By default, just return a single augmentation pipeline
return [globals()[daug](*daug_args) for daug, daug_args in zip(daug_pipeline, daug_pipeline_args)]
def create_static_augmentation_pipelines(train_daug_pipeline, train_daug_pipeline_args,
val_daug_pipeline, val_daug_pipeline_args,
test_daug_pipeline, test_daug_pipeline_args):
# Setup the augmentation pipeline we'll be using
train_augmentations = create_static_augmentation_pipeline(train_daug_pipeline, train_daug_pipeline_args)
val_augmentations = create_static_augmentation_pipeline(val_daug_pipeline, val_daug_pipeline_args)
test_augmentations = create_static_augmentation_pipeline(test_daug_pipeline, test_daug_pipeline_args)
return train_augmentations, val_augmentations, test_augmentations
def create_multiple_train_eval_static_augmentation_pipelines(train_augmentation_pipelines,
train_augmentation_pipelines_args,
eval_augmentation_pipelines,
eval_augmentation_pipelines_args,
broadcast_train_to=1,
broadcast_eval_to=1):
assert len(train_augmentation_pipelines) == len(train_augmentation_pipelines_args) and \
len(eval_augmentation_pipelines) == len(eval_augmentation_pipelines_args), \
'Number of pipelines and args must be the same.'
# Find the number of pipelines
n_train_pipelines = len(train_augmentation_pipelines)
n_eval_pipelines = len(eval_augmentation_pipelines)
if n_train_pipelines == 0:
# No train augmentation, push in an empty list to handle this properly
train_augmentation_pipelines, train_augmentation_pipelines_args = [[]], [[]]
if n_eval_pipelines == 0:
# No eval augmentation, push in an empty list to handle this properly
eval_augmentation_pipelines, eval_augmentation_pipelines_args = [[]], [[]]
# 'Broadcast' the single pipeline and replicate it broadcast_to times (otherwise don't)
broadcast_train_to = broadcast_train_to if (n_train_pipelines <= 1 and broadcast_train_to > 1) else 1
broadcast_eval_to = broadcast_eval_to if (n_eval_pipelines <= 1 and broadcast_eval_to > 1) else 1
# Standard stuff, just create the pipelines and return them
train_augmentations = [
(create_static_augmentation_pipeline(*z))
for z in zip(train_augmentation_pipelines * broadcast_train_to,
train_augmentation_pipelines_args * broadcast_train_to)
]
eval_augmentations = [
(create_static_augmentation_pipeline(*z))
for z in zip(eval_augmentation_pipelines * broadcast_eval_to,
eval_augmentation_pipelines_args * broadcast_eval_to)
]
return train_augmentations, eval_augmentations
def create_multiple_static_augmentation_pipelines(train_daug_pipelines, train_daug_pipelines_args,
val_daug_pipelines, val_daug_pipelines_args,
test_daug_pipelines, test_daug_pipelines_args,
broadcast_to=1):
"""
Same as create_augmentation_pipelines but takes list of pipelines each
and returns lists of same length.
'Broadcast' to pass in a single pipeline and get k replicates.
"""
assert len(train_daug_pipelines) == len(train_daug_pipelines_args) and \
len(val_daug_pipelines) == len(val_daug_pipelines_args) and \
len(test_daug_pipelines) == len(test_daug_pipelines_args), 'Number of pipelines and args must be the same.'
# Find the number of pipelines
n_train_pipelines = len(train_daug_pipelines)
n_val_pipelines = len(val_daug_pipelines)
n_test_pipelines = len(test_daug_pipelines)
if n_train_pipelines == 0:
# No augmentation, push in an empty list to handle this properly
train_daug_pipelines, train_daug_pipelines_args = [[]], [[]]
val_daug_pipelines, val_daug_pipelines_args = [[]], [[]]
test_daug_pipelines, test_daug_pipelines_args = [[]], [[]]
# 'Broadcast' the single pipeline and replicate it broadcast_to times (otherwise don't)
broadcast_train_to = broadcast_to if (n_train_pipelines <= 1 and broadcast_to > 1) else 1
broadcast_val_to = broadcast_to if (n_val_pipelines <= 1 and broadcast_to > 1) else 1
broadcast_test_to = broadcast_to if (n_test_pipelines <= 1 and broadcast_to > 1) else 1
# Standard stuff, just create the pipelines and return them
augmentations = [
create_static_augmentation_pipelines(*z)
for z in zip(train_daug_pipelines * broadcast_train_to,
train_daug_pipelines_args * broadcast_train_to,
val_daug_pipelines * broadcast_val_to,
val_daug_pipelines_args * broadcast_val_to,
test_daug_pipelines * broadcast_test_to,
test_daug_pipelines_args * broadcast_test_to)
]
return tuple(zip(*augmentations))
class StaticAugmentation:
def __init__(self, *args, **kwargs):
pass
def __call__(self, datasets, aliases, dataset_lens, batch_sizes, original_idx, *args, **kwargs):
"""
Returns list of lists, one copy for each pseudolabel.
original_idx: for each dataset, specifies which of the "original" datasets (in the config)
batch_sizes: batch size for each dataset
it was generated from. Useful for broadcasting other arguments
"""
updated_datasets, updated_aliases, updated_lens, updated_batch_sizes, updated_idx = [], [], [], [], []
for dataset, alias, dataset_len, batch_size, idx \
in zip(datasets, aliases, dataset_lens, batch_sizes, original_idx):
# Apply the transform to the dataset
datasets_, aliases_, lens_, batch_sizes_ = self.transform(dataset, alias, dataset_len, batch_size,
*args, **kwargs)
# Keep track of things
updated_datasets += datasets_
updated_aliases += aliases_
updated_lens += lens_
updated_batch_sizes += batch_sizes_
k = len(datasets_)
updated_idx += [idx] * k
return updated_datasets, updated_aliases, updated_lens, updated_batch_sizes, updated_idx
def transform(self, dataset, alias, dataset_len, batch_size, *args, **kwargs):
""" Takes a dataset and returns a list of datasets and alias [suffixes] """
# """ This must take a list of datasets and aliases and returns updated lists """
raise NotImplementedError
class PretrainedExternalGANStaticAugmentationTFRecordPipeline(StaticAugmentation):
def __init__(self,
store_path,
gan_name,
version,
relabel=False,
keep_original=False,
shard_size=10240,
overwrite=False,
*args, **kwargs):
super(PretrainedExternalGANStaticAugmentationTFRecordPipeline, self).__init__(*args, **kwargs)
assert relabel is False, 'Relabeling not supported.'
# Base path for location of the TFRecords
self.base_store_path = store_path
# Name of the GAN model that was used to dump augmentations
self.gan_name = gan_name
# Version of the dump from the GAN model that we're using
self.version = version
# Prefix for the folder where the TFRecords will be stored
self.filename_prefix = f'gan[{gan_name}].version[{version}]'
# Size of the TFRecord shard
self.shard_size = shard_size
# Keep the original dataset
self.keep_original = keep_original
# If the TFRecords were previously dumped, overwrite them
assert not overwrite, 'Overwriting is not yet implemented.'
self.overwrite = overwrite
# Set the batch size to 1: a batch size larger than 1 is not supported for this class
self.batch_size = 1
def transform(self, dataset, alias, dataset_len, batch_size, *args, **kwargs):
assert 'dataset_identifier' in kwargs, 'Please pass in a unique identifier for the dataset.'
# Specific paths for the TFRecords
dataset_identifier = kwargs['dataset_identifier'].replace("/", ".")
gen_f_store_path = os.path.join(self.base_store_path, dataset_identifier,
self.filename_prefix + f'.model[gen_f]')
gen_g_store_path = os.path.join(self.base_store_path, dataset_identifier,
self.filename_prefix + f'.model[gen_g]')
# Specific paths from which we're loading the pre-cached outputs of the external GAN that was already run
gen_f_dump_path = os.path.join(self.base_store_path, 'external', self.gan_name,
dataset_identifier, f'gen_f_v{self.version}.npy')
gen_g_dump_path = os.path.join(self.base_store_path, 'external', self.gan_name,
dataset_identifier, f'gen_g_v{self.version}.npy')
print(os.path.exists(gen_f_dump_path), os.path.exists(gen_g_dump_path))
print(os.path.exists(gen_f_store_path), os.path.exists(gen_g_store_path))
if not os.path.exists(gen_f_store_path) and not os.path.exists(gen_g_store_path):
# Write the TF Records to disk
os.makedirs(gen_f_store_path, exist_ok=True)
os.makedirs(gen_g_store_path, exist_ok=True)
lockfile = os.path.join(gen_f_store_path, 'writer.lock')
# Try to procure a lock to dump TF Records: if it already exists, wait until it is released to continue
someone_has_lock = False
while True:
if not os.path.exists(lockfile) and not someone_has_lock:
# Nobody has the lock: create the lock
open(lockfile, 'w')
# Write the TFRecords
self.dump_tf_records(dataset.batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE),
gen_f_store_path, gen_g_store_path,
gen_f_dump_path, gen_g_dump_path)
# Release the lock
os.remove(lockfile)
# Break out
break
elif not os.path.exists(lockfile) and someone_has_lock:
# The lock was released and the TFRecords are available to read
break
elif os.path.exists(lockfile):
# Another run is writing the TFRecords, so wait around until they're done
someone_has_lock = True
# Load up the TF Records datasets
dataset_f, dataset_g = self.build_tf_datasets(gen_f_store_path, gen_g_store_path)
alias_f = alias + '(A-F)'
alias_g = alias + '(A-G)'
if self.keep_original:
return [dataset, dataset_f, dataset_g], [alias, alias_f, alias_g], [dataset_len] * 3, [batch_size] * 3
else:
return [dataset_f, dataset_g], [alias_f, alias_g], [dataset_len] * 2, [batch_size] * 2
def dump_tf_records(self, dataset, gen_f_store_path, gen_g_store_path, gen_f_dump_path, gen_g_dump_path):
# Take a dataset and write TFRecords after loading the pre-dumped outputs from an external CycleGAN-like model
print(f"Writing TFRecords with shard size {self.shard_size} at \n{gen_f_store_path}\nand\n{gen_g_store_path}.")
curr_shard = -1
shard_progress = 10 ** 10
assert self.shard_size < 10 ** 10
# Load up the dataset from disk
gen_f_images = np.load(gen_f_dump_path)
gen_g_images = np.load(gen_g_dump_path)
# Initialize the TFRecords file writers
f_out_file, g_out_file = None, None
# Assume dataset contains (image, label) pairs and iterate over it
for i, (image, label) in enumerate(dataset):
# Check if the current shard needs to be incremented
if shard_progress >= self.shard_size:
# Update the current shard
curr_shard += 1
shard_progress = 0
# Get the new filenames
f_filename = os.path.join(gen_f_store_path, "{:02d}-{}.tfrec".format(curr_shard, self.shard_size))
g_filename = os.path.join(gen_g_store_path, "{:02d}-{}.tfrec".format(curr_shard, self.shard_size))
# Open up the new files
f_out_file = tf.io.TFRecordWriter(f_filename)
g_out_file = tf.io.TFRecordWriter(g_filename)
print(f"Opened files {f_filename} and {g_filename}.")
# Grab the batch size for the current batch: this will be 1
batch_size = image.numpy().shape[0]
# Run the image batch through the generators
f_image = gen_f_images[i:i + 1]
g_image = gen_g_images[i:i + 1]
# Encode the images to JPEG for storage
f_image = tf.convert_to_tensor(
[tf.image.encode_jpeg(im, optimize_size=True, chroma_downsampling=False) for im in f_image])
g_image = tf.convert_to_tensor(
[tf.image.encode_jpeg(im, optimize_size=True, chroma_downsampling=False) for im in g_image])
# Iterate over the batch of data
for i in range(batch_size):
if isinstance(label, tuple):
ith_labels = nested_map(lambda e: int(e.numpy()), list(zip(*label))[i])
else:
ith_labels = int(label.numpy()[i])
try:
f_example = image_label_to_tfrecord(f_image.numpy()[i], ith_labels)
g_example = image_label_to_tfrecord(g_image.numpy()[i], ith_labels)
except IndexError:
continue
f_out_file.write(f_example.SerializeToString())
g_out_file.write(g_example.SerializeToString())
print(f"\tShard progress: {shard_progress}/{self.shard_size}")
shard_progress += batch_size
def build_tf_datasets(self, gen_f_store_path, gen_g_store_path):
# Load up the files for the CycleGAN-ed dataset
gen_f_store_path = gen_f_store_path.replace("[", "\[").replace("]", "\]").replace("*", "\*")
gen_g_store_path = gen_g_store_path.replace("[", "\[").replace("]", "\]").replace("*", "\*")
gen_f_dataset = tf.data.Dataset.list_files(os.path.join(gen_f_store_path, '*.tfrec'), shuffle=False)
gen_g_dataset = tf.data.Dataset.list_files(os.path.join(gen_g_store_path, '*.tfrec'), shuffle=False)
# Load up the TFRecords datasets
gen_f_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(gen_f_dataset, proc_batch=128,
tfrecord_example_reader=read_image_label_tfrecord,
sequential=True).unbatch()
gen_g_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(gen_g_dataset, proc_batch=128,
tfrecord_example_reader=read_image_label_tfrecord,
sequential=True).unbatch()
gen_f_dataset, gen_g_dataset = self.remap_tfdataset(gen_f_dataset, gen_g_dataset)
return gen_f_dataset, gen_g_dataset
def remap_tfdataset(self, gen_f_dataset, gen_g_dataset):
for img, label in gen_f_dataset:
if not isinstance(label, tuple):
if len(label.shape) == 0:
return gen_f_dataset, gen_g_dataset
else:
# TODO: generalize to more than 2 labels
return gen_f_dataset.map(lambda im, lab: (im, (lab[0], lab[1]))), \
gen_g_dataset.map(lambda im, lab: (im, (lab[0], lab[1])))
else:
return gen_f_dataset, gen_g_dataset
class PretrainedCycleGANStaticAugmentationTFRecordPipeline(StaticAugmentation):
def __init__(self,
store_path,
wandb_entity,
wandb_project,
wandb_run_id,
run_in_eval_mode=False,
input_shape=(256, 256, 3),
norm_type='batchnorm',
checkpoint_step=-1,
relabel=False,
wandb_ckpt_path='checkpoints/',
batch_size=1,
keep_original=False,
shard_size=10240,
overwrite=False,
*args, **kwargs):
super(PretrainedCycleGANStaticAugmentationTFRecordPipeline, self).__init__(*args, **kwargs)
assert relabel is False, 'Relabeling not supported.'
# Since checkpoint_step can be -1, figure out the actual load steps: this is separately checked
# to avoid instantiating the CycleGAN models unless they're needed for dumping the data.
f_load_step, g_load_step = self.get_load_epochs(wandb_run_id,
wandb_project,
wandb_entity,
wandb_ckpt_path,
checkpoint_step)
# Instantiate the CycleGAN pipeline but don't load the models yet
self.cyclegan = PretrainedDefaultCycleGANStaticAugmentationPipeline(
wandb_entity=wandb_entity,
wandb_project=wandb_project,
wandb_run_id=wandb_run_id,
run_in_eval_mode=run_in_eval_mode,
input_shape=input_shape,
norm_type=norm_type,
checkpoint_step=checkpoint_step,
relabel=relabel,
wandb_ckpt_path=wandb_ckpt_path,
batch_size=batch_size,
keep_original=keep_original,
load_immediately=False, # don't load the models
*args, **kwargs)
# Base path for location of the TFRecords
self.base_store_path = store_path
# Prefix for the folder where the TFRecords will be stored
self.filename_prefix = f'wandb[{wandb_entity}:{wandb_project}:{wandb_run_id}].' \
f'f_epoch[{f_load_step}].g_epoch[{g_load_step}].' \
f'mode[{run_in_eval_mode}].batch[{batch_size}]'
# Size of the TFRecord shard
self.shard_size = shard_size
# Batch size for the CycleGAN augmentation (important for speed, batchnorm's behavior)
self.batch_size = batch_size
# Keep the original dataset
self.keep_original = keep_original
# If the TFRecords were previously dumped, overwrite them
assert not overwrite, 'Overwriting is not yet implemented.'
self.overwrite = overwrite
def get_load_epochs(self, wandb_run_id, wandb_project, wandb_entity, wandb_ckpt_path, checkpoint_step):
# Create a function for doing step extraction for CycleGAN generator models
step_extractor = particular_checkpoint_step_extractor(checkpoint_step)
f_model_file = get_most_recent_model_file(wandb_run=load_wandb_run(wandb_run_id, wandb_project, wandb_entity),
wandb_ckpt_path=wandb_ckpt_path,
model_name='generator_f',
step_extractor=step_extractor)
g_model_file = get_most_recent_model_file(wandb_run=load_wandb_run(wandb_run_id, wandb_project, wandb_entity),
wandb_ckpt_path=wandb_ckpt_path,
model_name='generator_g',
step_extractor=step_extractor)
return step_extractor(f_model_file.name.split("/")[-1]), \
step_extractor(g_model_file.name.split("/")[-1])
def transform(self, dataset, alias, dataset_len, batch_size, *args, **kwargs):
assert 'dataset_identifier' in kwargs, 'Please pass in a unique identifier for the dataset.'
# Specific paths for the TFRecords
dataset_identifier = kwargs['dataset_identifier'].replace("/", ".")
gen_f_store_path = os.path.join(self.base_store_path, dataset_identifier,
self.filename_prefix + f'.model[gen_f]')
gen_g_store_path = os.path.join(self.base_store_path, dataset_identifier,
self.filename_prefix + f'.model[gen_g]')
print(os.path.exists(gen_f_store_path), os.path.exists(gen_g_store_path))
if not os.path.exists(gen_f_store_path) and not os.path.exists(gen_g_store_path):
# Write the TF Records to disk
os.makedirs(gen_f_store_path, exist_ok=True)
os.makedirs(gen_g_store_path, exist_ok=True)
lockfile = os.path.join(gen_f_store_path, 'writer.lock')
# Try to procure a lock to dump TF Records: if it already exists, wait until it is released to continue
someone_has_lock = False
while True:
if not os.path.exists(lockfile) and not someone_has_lock:
# Nobody has the lock: create the lock
open(lockfile, 'w')
# Write the TFRecords
self.dump_tf_records(dataset.batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE),
gen_f_store_path, gen_g_store_path)
# Release the lock
os.remove(lockfile)
# Break out
break
elif not os.path.exists(lockfile) and someone_has_lock:
# The lock was released and the TFRecords are available to read
break
elif os.path.exists(lockfile):
# Another run is writing the TFRecords, so wait around until they're done
someone_has_lock = True
# Don't delete the CycleGAN model if you're doing this on the val set, because the test set will reuse it
if not 'val' in dataset_identifier:
del self.cyclegan
# Load up the TF Records datasets
dataset_f, dataset_g = self.build_tf_datasets(gen_f_store_path, gen_g_store_path)
alias_f = alias + '(A-F)'
alias_g = alias + '(A-G)'
if self.keep_original:
return [dataset, dataset_f, dataset_g], [alias, alias_f, alias_g], [dataset_len] * 3, [batch_size] * 3
else:
return [dataset_f, dataset_g], [alias_f, alias_g], [dataset_len] * 2, [batch_size] * 2
def dump_tf_records(self, dataset, gen_f_store_path, gen_g_store_path):
# Load up the CycleGAN models
self.cyclegan.load_models()
# Take a dataset and write TFRecords after passing through both CycleGAN generators
print(f"Writing TFRecords with shard size {self.shard_size} at \n{gen_f_store_path}\nand\n{gen_g_store_path}.")
curr_shard = -1
shard_progress = 10 ** 10
assert self.shard_size < 10 ** 10
# Initialize the TFRecords file writers
f_out_file, g_out_file = None, None
# Assume dataset contains (image, label) pairs and iterate over it
for image, label in dataset:
# Check if the current shard needs to be incremented
if shard_progress >= self.shard_size:
# Update the current shard
curr_shard += 1
shard_progress = 0
# Get the new filenames
f_filename = os.path.join(gen_f_store_path, "{:02d}-{}.tfrec".format(curr_shard, self.shard_size))
g_filename = os.path.join(gen_g_store_path, "{:02d}-{}.tfrec".format(curr_shard, self.shard_size))
# Open up the new files
f_out_file = tf.io.TFRecordWriter(f_filename)
g_out_file = tf.io.TFRecordWriter(g_filename)
print(f"Opened files {f_filename} and {g_filename}.")
# Grab the batch size for the current batch
batch_size = image.numpy().shape[0]
# Run the image batch through the generators
f_image, _ = self.cyclegan.map_fn_f(image=image, label=None)
g_image, _ = self.cyclegan.map_fn_g(image=image, label=None)
# Encode the images to JPEG for storage
f_image = tf.convert_to_tensor(
[tf.image.encode_jpeg(im, optimize_size=True, chroma_downsampling=False) for im in f_image])
g_image = tf.convert_to_tensor(
[tf.image.encode_jpeg(im, optimize_size=True, chroma_downsampling=False) for im in g_image])
# Iterate over the batch of data
for i in range(batch_size):
try:
f_example = image_label_to_tfrecord(f_image.numpy()[i], label.numpy()[i])
g_example = image_label_to_tfrecord(g_image.numpy()[i], label.numpy()[i])
except IndexError:
continue
f_out_file.write(f_example.SerializeToString())
g_out_file.write(g_example.SerializeToString())
print(f"\tShard progress: {shard_progress}/{self.shard_size}")
shard_progress += batch_size
def build_tf_datasets(self, gen_f_store_path, gen_g_store_path):
# Load up the files for the CycleGAN-ed dataset
gen_f_store_path = gen_f_store_path.replace("[", "\[").replace("]", "\]").replace("*", "\*")
gen_g_store_path = gen_g_store_path.replace("[", "\[").replace("]", "\]").replace("*", "\*")
gen_f_dataset = tf.data.Dataset.list_files(os.path.join(gen_f_store_path, '*.tfrec'), shuffle=False)
gen_g_dataset = tf.data.Dataset.list_files(os.path.join(gen_g_store_path, '*.tfrec'), shuffle=False)
# Load up the TFRecords datasets
gen_f_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(gen_f_dataset, proc_batch=128,
tfrecord_example_reader=read_image_label_tfrecord,
sequential=True).unbatch()
gen_g_dataset = augmentation.datasets.utils. \
get_dataset_from_list_files_dataset(gen_g_dataset, proc_batch=128,
tfrecord_example_reader=read_image_label_tfrecord,
sequential=True).unbatch()
return gen_f_dataset, gen_g_dataset
class PretrainedCycleGANStaticAugmentationPipeline(StaticAugmentation):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
keras_model_creation_fn,
keras_model_creation_fn_args,
step_extractor=None,
run_in_eval_mode=True,
relabel=False,
wandb_ckpt_path='checkpoints/',
batch_size=1,
keep_original=False,
load_immediately=True,
*args, **kwargs):
super(PretrainedCycleGANStaticAugmentationPipeline, self).__init__(*args, **kwargs)
# Store the parameters passed in
self.wandb_entity, self.wandb_project, self.wandb_run_id = wandb_entity, wandb_project, wandb_run_id
self.wandb_ckpt_path = wandb_ckpt_path
self.keras_model_creation_fn, self.keras_model_creation_fn_args = \
keras_model_creation_fn, keras_model_creation_fn_args
self.step_extractor = step_extractor
self.run_in_eval_mode = run_in_eval_mode
self.training = not run_in_eval_mode
self.relabel = relabel
self.batch_size = batch_size
self.keep_original = keep_original
self.models_loaded = False
if load_immediately:
self.load_models()
def load_models(self):
self.generator_f, (_, self.f_load_step) = load_pretrained_keras_model_from_wandb(
wandb_run_id=self.wandb_run_id,
wandb_project=self.wandb_project,
wandb_entity=self.wandb_entity,
keras_model_creation_fn=self.keras_model_creation_fn,
keras_model_creation_fn_args=self.keras_model_creation_fn_args,
model_name='generator_f',
step_extractor=self.step_extractor,
wandb_ckpt_path=self.wandb_ckpt_path)
self.generator_g, (_, self.g_load_step) = load_pretrained_keras_model_from_wandb(
wandb_run_id=self.wandb_run_id,
wandb_project=self.wandb_project,
wandb_entity=self.wandb_entity,
keras_model_creation_fn=self.keras_model_creation_fn,
keras_model_creation_fn_args=self.keras_model_creation_fn_args,
model_name='generator_g',
step_extractor=self.step_extractor,
wandb_ckpt_path=self.wandb_ckpt_path)
self.models_loaded = True
print("Done building CycleGAN models.")
def map_fn_f(self, image, label):
# Rescale the data
image = (tf.cast(image, tf.float32) / 127.5) - 1.
# Pass it through the generator
image = self.generator_f(image, training=self.training)
# Rescale output to [0, 255]
image = tf.cast(255 * (image * 0.5 + 0.5), tf.uint8)
if self.relabel:
return image, tf.zeros_like(label)
else:
return image, label
def map_fn_g(self, image, label):
# Rescale the data
image = (tf.cast(image, tf.float32) / 127.5) - 1.
# Pass it through the generator
image = self.generator_g(image, training=self.training)
# Rescale output to [0, 255]
image = tf.cast(255 * (image * 0.5 + 0.5), tf.uint8)
if self.relabel:
return image, tf.ones_like(label)
else:
return image, label
def transform(self, dataset, alias, dataset_len, batch_size, *args, **kwargs):
dataset_f = dataset.batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE).map(self.map_fn_f,
num_parallel_calls=16).unbatch()
dataset_g = dataset.batch(self.batch_size).prefetch(tf.data.experimental.AUTOTUNE).map(self.map_fn_g,
num_parallel_calls=16).unbatch()
alias_f = alias + '(A-F)'
alias_g = alias + '(A-G)'
if self.keep_original:
return [dataset, dataset_f, dataset_g], [alias, alias_f, alias_g], [dataset_len] * 3, [batch_size] * 3
else:
return [dataset_f, dataset_g], [alias_f, alias_g], [dataset_len] * 2, [batch_size] * 2
class PretrainedDefaultCycleGANStaticAugmentationPipeline(PretrainedCycleGANStaticAugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
run_in_eval_mode=False,
input_shape=(256, 256, 3),
norm_type='batchnorm',
checkpoint_step=-1,
relabel=False,
wandb_ckpt_path='checkpoints/',
batch_size=1,
keep_original=False,
load_immediately=True,
*args, **kwargs):
super(PretrainedDefaultCycleGANStaticAugmentationPipeline,
self).__init__(wandb_entity=wandb_entity,
wandb_project=wandb_project,
wandb_run_id=wandb_run_id,
keras_model_creation_fn='unet_generator',
keras_model_creation_fn_args={'output_channels': 3,
'input_shape': input_shape,
'norm_type': norm_type},
step_extractor=particular_checkpoint_step_extractor(checkpoint_step),
run_in_eval_mode=run_in_eval_mode,
relabel=relabel,
wandb_ckpt_path=wandb_ckpt_path,
batch_size=batch_size,
keep_original=keep_original,
load_immediately=load_immediately,
*args, **kwargs)
class PretrainedMNISTCycleGANStaticAugmentationPipeline(PretrainedCycleGANStaticAugmentationPipeline):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
run_in_eval_mode=False,
norm_type='batchnorm',
checkpoint_step=-1,
relabel=False,
batch_size=128,
keep_original=False,
*args, **kwargs):
super(PretrainedMNISTCycleGANStaticAugmentationPipeline,
self).__init__(wandb_entity=wandb_entity,
wandb_project=wandb_project,
wandb_run_id=wandb_run_id,
keras_model_creation_fn='mnist_unet_generator',
keras_model_creation_fn_args={'norm_type': norm_type},
step_extractor=particular_checkpoint_step_extractor(checkpoint_step),
run_in_eval_mode=run_in_eval_mode,
relabel=relabel,
batch_size=batch_size,
keep_original=keep_original,
*args, **kwargs)
class BinaryMNISTWandbModelPseudolabelPartition(StaticAugmentation):
def __init__(self,
wandb_entity,
wandb_project,
wandb_run_id,
partition=True,
relabel=False,
batch_size=128,
*args, **kwargs):
super(BinaryMNISTWandbModelPseudolabelPartition, self).__init__(*args, **kwargs)
self.pseudolabeler = BinaryMNISTWandbModelPseudoLabelingPipeline(wandb_entity, wandb_project, wandb_run_id)
self.batch_size = batch_size
self.partition = partition
self.relabel = relabel
assert partition or relabel, "BinaryMNISTWandbModelPseudolabelPartition is a no-op if both partition and relabel are False."
def transform(self, tf_dataset, alias, dataset_len, batch_size, *args, **kwargs):
"""
Splits a dataset into multiple datasets based on the labels generated by a helper
There is no way to handle shuffling before pseudolabeling since once a .shuffle is applied,
the dataset is (by default) reshuffled *every batch*. There's no easy way to undo this shuffling.
"""
# Batch up the dataset
try:
tf_dataset = tf_dataset.unbatch().batch(self.batch_size)
except:
tf_dataset = tf_dataset.batch(self.batch_size)
pseudolabels = []
# Generate pseudolabels for the entire dataset: tf_dataset must serve up (image_batch, label_batch) pairs
for batch in tf_dataset:
# Pseudolabel the data batch
data, labels = batch
plab_batch = self.pseudolabeler((data.numpy(), labels.numpy()))
pseudolabels.append(plab_batch[1])
# pseudolabels.append(plab_batch)
# Concatenate data from all the batches
pseudolabels = np.concatenate(pseudolabels, axis=0)
# Unbatch the dataset again so we can line up the labels properly
tf_dataset = tf_dataset.unbatch()
# Zip the pseudolabels with the tf_dataset
pseudolabels_tf_dataset = tf.data.Dataset.from_tensor_slices(pseudolabels)
pseudolabels_tf_dataset = tf.data.Dataset.zip((tf_dataset, pseudolabels_tf_dataset))
if self.relabel:
pseudolabels_tf_dataset = pseudolabels_tf_dataset.map(
lambda xy, z: ((xy[0], tf.reshape(z, xy[1].shape)), z))
if self.partition:
# Partition the dataset according to pseudolabels and remove pseudolabels
z_classes = list(set(pseudolabels))
aliases = [alias + f'P{z}' for z in z_classes]
k = len(z_classes)
batch_sizes = [batch_size // k] * (k - 1) + [batch_size // k + batch_size % k]
datasets = [pseudolabels_tf_dataset.filter(lambda _, z_: z_ == z) for z in z_classes]
# Compute the lengths of the split datasets
dataset_lens = [np.sum(pseudolabels == z) for z in z_classes]
else:
aliases = [alias]
batch_sizes = [batch_size]
datasets = [pseudolabels_tf_dataset]
dataset_lens = [dataset_len]
# Remove auxiliary z label
datasets = [dataset.map(lambda x, z: x) for dataset in datasets]
return datasets, aliases, dataset_lens, batch_sizes
class ConcatenateStaticAugmentation(StaticAugmentation):
""" Takes a list of datasets and concatenates it into one dataset.
Useful for training pipelines that expect only one dataset (e.g. multihead methods (MTL, DAT)) instead of multiple datasets (e.g. GDRO),
yet wants to apply other 'partitioning' static augmentations
"""
def __call__(self, datasets, aliases, dataset_lens, batch_sizes, original_idx, *args, **kwargs):
"""
Returns list of lists, one copy for each pseudolabel.
original_idx: for each dataset, specifies which of the "original" datasets (in the config)
it was generated from. Useful for broadcasting other arguments
"""
concat_datasets, concat_aliases, concat_batch_sizes, concat_dataset_lens = {}, {}, {}, {}
for dataset, alias, dataset_len, batch_size, idx in zip(datasets, aliases, dataset_lens, batch_sizes,
original_idx):
if idx in concat_datasets:
concat_datasets[idx] = concat_datasets[idx].concatenate(dataset)
concat_aliases[idx] += '+' + alias
concat_batch_sizes[idx] += batch_size
concat_dataset_lens[idx] += dataset_len
else:
concat_datasets[idx] = dataset
concat_aliases[idx] = alias
concat_batch_sizes[idx] = batch_size
concat_dataset_lens[idx] = dataset_len
updated_datasets = list(concat_datasets.values())
updated_dataset_lens = list(concat_dataset_lens.values())
updated_aliases = list(concat_aliases.values())
updated_batch_sizes = list(concat_batch_sizes.values())
updated_idx = list(concat_datasets.keys())
return updated_datasets, updated_aliases, updated_dataset_lens, updated_batch_sizes, updated_idx
|
model-patching-master
|
augmentation/augment/static.py
|
import tensorflow.keras as keras
from classification_models.tfkeras import Classifiers
def simple_model(input_shape, n_classes):
inputs = keras.layers.Input(shape=input_shape, name='digits')
x = keras.layers.Flatten()(inputs)
x = keras.layers.Dense(64, activation='relu', name='dense_1')(x)
x = keras.layers.Dense(64, activation='relu', name='dense_2')(x)
outputs = keras.layers.Dense(n_classes, activation='softmax', name='predictions')(x)
model = keras.Model(inputs=inputs, outputs=outputs)
return model
def simple_cnn_model(input_shape, n_classes):
model = keras.Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), padding='same', input_shape=input_shape))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Conv2D(32, (3, 3)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Conv2D(64, (3, 3), padding='same'))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Conv2D(64, (3, 3)))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(keras.layers.Dropout(0.25))
model.add(keras.layers.Flatten())
model.add(keras.layers.Dense(64))
model.add(keras.layers.Activation('relu'))
model.add(keras.layers.Dropout(0.5))
model.add(keras.layers.Dense(n_classes))
model.add(keras.layers.Activation('softmax'))
return model
def create_keras_classification_model(source, architecture, input_shape, n_classes, pretrained=False):
assert input_shape[-1] in [1, 3], 'The input shape is incompatible with the model.'
if source.startswith('cm'):
# Create the model using the classification_models repository
Architecture, preprocessing = Classifiers.get(architecture)
weights = 'imagenet' if pretrained else None
model = Architecture(input_shape=input_shape, classes=n_classes, weights=weights, include_top=not pretrained)
if pretrained:
# Perform model surgery and add an output softmax layer
new_output = keras.layers.GlobalAveragePooling2D()(model.layers[-1].output)
new_output = keras.layers.Dense(n_classes)(new_output)
if source == 'cm_cxr':
# Models that do multi-label classification use sigmoid outputs
new_output = keras.activations.sigmoid(new_output)
else:
# Standard softmax output is best for most cases
new_output = keras.activations.softmax(new_output)
model = keras.Model(inputs=model.inputs, outputs=new_output)
elif source == 'simple_cnn':
model = simple_cnn_model(input_shape, n_classes)
else:
raise NotImplementedError
# Print the model summary
print(model.summary())
return model
def freeze_all_layers_except_last_linear_layer(model):
"""
Freezes all the layers in a model except the last Dense layer.
According to https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization,
setting trainable = False for BatchNorm:
- freezes the weights
- runs the layer in inference mode
"""
# Set all layers to be not trainable
for layer in model.layers:
layer.trainable = False
# Find the last linear layer
for layer in reversed(model.layers):
if isinstance(layer, keras.layers.Dense):
layer.trainable = True
break
def reinitialize_last_linear_layer(model):
# Loop over the layers in reverse
for layer in reversed(model.layers):
if isinstance(layer, keras.layers.Dense):
# Compute the shapes for this layer
kernel_shape = [dim.value for dim in layer.kernel.shape.dims]
bias_shape = [dim.value for dim in layer.bias.shape.dims]
# Initialize using Glorot Uniform for the weights, and zeros for the biases
init_kernel_weights = keras.initializers.glorot_uniform()(kernel_shape)
init_bias_weights = keras.initializers.zeros()(bias_shape)
layer.set_weights([init_kernel_weights, init_bias_weights])
break
|
model-patching-master
|
augmentation/models/models.py
|
# The code in this file is adapted from
# https://keras.io/examples/cifar10_resnet/
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import AveragePooling2D, Input, Flatten
from tensorflow.keras.layers import Dense, Conv2D, BatchNormalization, Activation
from tensorflow.keras.models import Model
from tensorflow.keras.regularizers import l2
def resnet_layer(inputs,
num_filters=16,
kernel_size=3,
strides=1,
activation='relu',
batch_normalization=True,
conv_first=True):
"""2D Convolution-Batch Normalization-Activation stack builder
# Arguments
inputs (tensor): input tensor from input image or previous layer
num_filters (int): Conv2D number of filters
kernel_size (int): Conv2D square kernel dimensions
strides (int): Conv2D square stride dimensions
activation (string): activation name
batch_normalization (bool): whether to include batch normalization
conv_first (bool): conv-bn-activation (True) or
bn-activation-conv (False)
# Returns
x (tensor): tensor as input to the next layer
"""
conv = Conv2D(num_filters,
kernel_size=kernel_size,
strides=strides,
padding='same',
kernel_initializer='he_normal',
kernel_regularizer=l2(1e-4))
x = inputs
if conv_first:
x = conv(x)
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
else:
if batch_normalization:
x = BatchNormalization()(x)
if activation is not None:
x = Activation(activation)(x)
x = conv(x)
return x
def resnet_v1(input_shape, depth, num_classes=10):
"""ResNet Version 1 Model builder [a]
Stacks of 2 x (3 x 3) Conv2D-BN-ReLU
Last ReLU is after the shortcut connection.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filters is
doubled. Within each stage, the layers have the same number filters and the
same number of filters.
Features maps sizes:
stage 0: 32x32, 16
stage 1: 16x16, 32
stage 2: 8x8, 64
The Number of parameters is approx the same as Table 6 of [a]:
ResNet20 0.27M
ResNet32 0.46M
ResNet44 0.66M
ResNet56 0.85M
ResNet110 1.7M
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 6 != 0:
raise ValueError('depth should be 6n+2 (eg 20, 32, 44 in [a])')
# Start model definition.
num_filters = 16
num_res_blocks = int((depth - 2) / 6)
inputs = Input(shape=input_shape)
x = resnet_layer(inputs=inputs)
# Instantiate the stack of residual units
for stack in range(3):
for res_block in range(num_res_blocks):
strides = 1
if stack > 0 and res_block == 0: # first layer but not first stack
strides = 2 # down-sample
y = resnet_layer(inputs=x,
num_filters=num_filters,
strides=strides)
y = resnet_layer(inputs=y,
num_filters=num_filters,
activation=None)
if stack > 0 and res_block == 0: # first layer but not first stack
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
x = Activation('relu')(x)
num_filters *= 2
# Add classifier on top.
# v1 does not use BN after last shortcut connection-ReLU
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def resnet_v2(input_shape, depth, num_classes=10):
"""ResNet Version 2 Model builder [b]
Stacks of (1 x 1)-(3 x 3)-(1 x 1) BN-ReLU-Conv2D or also known as
bottleneck layer
First shortcut connection per layer is 1 x 1 Conv2D.
Second and onwards shortcut connection is identity.
At the beginning of each stage, the feature map size is halved (downsampled)
by a convolutional layer with strides=2, while the number of filter maps is
doubled. Within each stage, the layers have the same number filters and the
same filter map sizes.
Features maps sizes:
conv1 : 32x32, 16
stage 0: 32x32, 64
stage 1: 16x16, 128
stage 2: 8x8, 256
# Arguments
input_shape (tensor): shape of input image tensor
depth (int): number of core convolutional layers
num_classes (int): number of classes (CIFAR10 has 10)
# Returns
model (Model): Keras model instance
"""
if (depth - 2) % 9 != 0:
raise ValueError('depth should be 9n+2 (eg 56 or 110 in [b])')
# Start model definition.
num_filters_in = 16
num_res_blocks = int((depth - 2) / 9)
inputs = Input(shape=input_shape)
# v2 performs Conv2D with BN-ReLU on input before splitting into 2 paths
x = resnet_layer(inputs=inputs,
num_filters=num_filters_in,
conv_first=True)
# Instantiate the stack of residual units
for stage in range(3):
for res_block in range(num_res_blocks):
activation = 'relu'
batch_normalization = True
strides = 1
if stage == 0:
num_filters_out = num_filters_in * 4
if res_block == 0: # first layer and first stage
activation = None
batch_normalization = False
else:
num_filters_out = num_filters_in * 2
if res_block == 0: # first layer but not first stage
strides = 2 # downsample
# bottleneck residual unit
y = resnet_layer(inputs=x,
num_filters=num_filters_in,
kernel_size=1,
strides=strides,
activation=activation,
batch_normalization=batch_normalization,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_in,
conv_first=False)
y = resnet_layer(inputs=y,
num_filters=num_filters_out,
kernel_size=1,
conv_first=False)
if res_block == 0:
# linear projection residual shortcut connection to match
# changed dims
x = resnet_layer(inputs=x,
num_filters=num_filters_out,
kernel_size=1,
strides=strides,
activation=None,
batch_normalization=False)
x = keras.layers.add([x, y])
num_filters_in = num_filters_out
# Add classifier on top.
# v2 has BN-ReLU before Pooling
x = BatchNormalization()(x)
x = Activation('relu')(x)
x = AveragePooling2D(pool_size=8)(x)
y = Flatten()(x)
outputs = Dense(num_classes,
activation='softmax',
kernel_initializer='he_normal')(y)
# Instantiate model.
model = Model(inputs=inputs, outputs=outputs)
return model
def get_resnet_model(version, input_shape, depth, num_classes, **kwargs):
if version == 2:
model = resnet_v2(input_shape=input_shape, depth=depth, num_classes=num_classes, **kwargs)
elif version == 1:
model = resnet_v1(input_shape=input_shape, depth=depth, num_classes=num_classes, **kwargs)
else:
raise NotImplementedError('The only versions of ResNet available are ResNet-v1 and ResNet-v2.')
return model
if __name__ == '__main__':
# Test that the model gets created
model = get_resnet_model(version=1, input_shape=(32, 32, 3), depth=20, num_classes=15)
# Print a summary of the model
model.summary()
# Pass in some input to the model
x = tf.random.normal((8, 32, 32, 3))
y = model(x)
# Make sure the output looks good
assert(y.shape == (8, 15))
|
model-patching-master
|
augmentation/models/resnet.py
|
import tensorflow as tf
import tensorflow.keras as keras
import wandb
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from tensorflow.python.keras import backend as K
from augmentation.methods.robust.utils import irm_penalty_explicit
class ConfusionMatrix(keras.metrics.Metric):
def __init__(self, n_classes, name='confusion_matrix', **kwargs):
super(ConfusionMatrix, self).__init__(name=name, **kwargs)
self.confusion_matrix = self.add_weight(name='cm', initializer='zeros',
shape=(n_classes, n_classes), dtype=tf.int32)
self.n_classes = n_classes
def reset_states(self):
K.batch_set_value([(self.variables[0], tf.zeros((self.n_classes, self.n_classes)))])
def update_state(self, y_true, y_pred, sample_weight=None):
self.confusion_matrix.assign_add(tf.math.confusion_matrix(labels=y_true,
predictions=y_pred,
weights=sample_weight,
num_classes=self.n_classes))
def result(self):
return self.confusion_matrix
def log_wandb(self, step, prefix='metrics/'):
cm = self.result().numpy()
wandb.run.summary[f'{prefix}confusion_matrix'] = cm
wandb.log({f'{prefix}confusion_matrix': [wandb.Image(sns.heatmap(cm))]}, step=step, commit=False)
plt.clf()
class MultiLabelAUC(keras.metrics.Metric):
def __init__(self, n_outputs, output_labels, num_thresholds=200, curve='ROC', summation_method='interpolation'):
super(MultiLabelAUC, self).__init__(name='multi_label_auc')
self.AUCs = [keras.metrics.AUC(num_thresholds, curve, summation_method) for _ in range(n_outputs)]
self.n_outputs = n_outputs
self.output_labels = output_labels
def reset_states(self):
[auc.reset_states() for auc in self.AUCs]
def update_state(self, y_true, y_pred):
assert y_true.shape[-1] == y_pred.shape[-1] == self.n_outputs, 'Number of outputs must match shapes.'
assert len(y_true.shape) == 2, 'Shape of y_true and y_pred must be 2.'
for i, auc in enumerate(self.AUCs):
auc.update_state(y_true[:, i], y_pred[:, i])
def result(self):
return tf.convert_to_tensor([auc.result() for auc in self.AUCs])
def log_wandb(self, step, prefix='metrics/'):
aucs = self.result().numpy()
wandb.log({f'{prefix}multi_label_auc_{self.output_labels[i].lower()}': aucs[i] for i in range(self.n_outputs)},
step=step, commit=False)
class AUC(keras.metrics.Metric):
def __init__(self, num_thresholds=200, curve='ROC', summation_method='interpolation'):
super(AUC, self).__init__(name='auc')
self.auc = keras.metrics.AUC(num_thresholds=num_thresholds, curve=curve, summation_method=summation_method)
def update_state(self, y_true, y_pred):
self.auc.update_state(y_true, y_pred)
def reset_states(self):
self.auc.reset_states()
def result(self):
return self.auc.result()
def log_wandb(self, step, prefix='metrics/'):
auc = self.result().numpy()
wandb.run.summary[f'{prefix}{self.name}'] = auc
wandb.log({f'{prefix}{self.name}': auc}, step=step, commit=False)
class MultiLabelRecall(keras.metrics.Metric):
def __init__(self, n_outputs, output_labels, thresholds=np.linspace(0, 1, 11, dtype=np.float32).tolist()):
super(MultiLabelRecall, self).__init__(name='multi_label_recall')
self.recalls = [keras.metrics.Recall(thresholds) for _ in range(n_outputs)]
self.thresholds = thresholds
self.n_outputs = n_outputs
self.output_labels = output_labels
def reset_states(self):
[recall.reset_states() for recall in self.recalls]
def update_state(self, y_true, y_pred):
assert y_true.shape[-1] == y_pred.shape[-1] == self.n_outputs, 'Number of outputs must match shapes.'
assert len(y_true.shape) == 2, 'Shape of y_true and y_pred must be 2.'
for i, recall in enumerate(self.recalls):
recall.update_state(y_true[:, i], y_pred[:, i])
def result(self):
return tf.convert_to_tensor([recall.result() for recall in self.recalls])
def log_wandb(self, step, prefix='metrics/'):
recalls = self.result().numpy()
for i in range(self.n_outputs):
for j, rec in enumerate(recalls[i]):
wandb.log({f'{prefix}multi_label_recall_{self.output_labels[i].lower()}@{self.thresholds[j]}': rec},
step=step, commit=False)
class Recall(keras.metrics.Metric):
def __init__(self, thresholds=np.linspace(0, 1, 11, dtype=np.float32).tolist()):
super(Recall, self).__init__(name='recall')
self.recall = keras.metrics.Recall(thresholds=thresholds)
self.thresholds = thresholds
def update_state(self, y_true, y_pred):
self.recall.update_state(y_true, y_pred)
def reset_states(self):
self.recall.reset_states()
def result(self):
return self.recall.result()
def log_wandb(self, step, prefix='metrics/'):
recall = self.result().numpy()
for i, rec in enumerate(recall):
wandb.log({f'{prefix}recall@{self.thresholds[i]:.2f}': rec}, step=step, commit=False)
class IRMPenalty(keras.metrics.Metric):
def __init__(self):
super(IRMPenalty, self).__init__(name='irm_penalty')
self.loss = self.add_weight(name='irm', initializer='zeros', shape=(1,), dtype=tf.float32)
self.count = self.add_weight(name='count', initializer='zeros', shape=1, dtype=tf.int32)
def reset_states(self):
K.set_value(self.loss, tf.zeros(1))
K.set_value(self.count, [0])
def update_state(self, y_true, y_pred):
# Compute the IRM penalty
y_pred_logits = tf.math.log(y_pred + 1e-6)
self.loss.assign_add(tf.convert_to_tensor([irm_penalty_explicit(y_true, y_pred_logits, penalty_weight=1.0)]))
# Update the total count
self.count.assign_add([y_true.shape[0]])
def result(self):
if self.count > 0:
return self.loss / tf.cast(self.count, tf.float32)
else:
return self.loss
def log_wandb(self, step, prefix='metrics/'):
loss = self.result().numpy()
wandb.log({f'{prefix}irm_penalty': loss}, step=step, commit=False)
class Accuracy(keras.metrics.Accuracy):
def __init__(self):
super(Accuracy, self).__init__(name='accuracy', dtype=None)
def log_wandb(self, step, prefix='metrics/'):
acc = self.result().numpy()
wandb.run.summary[f'{prefix}{self.name}'] = acc
wandb.log({f'{prefix}{self.name}': acc}, step=step, commit=False)
class BinaryCrossentropy(keras.metrics.BinaryCrossentropy):
def __init__(self, from_logits=False, label_smoothing=0.):
super(BinaryCrossentropy, self).__init__(from_logits=from_logits, label_smoothing=label_smoothing)
def log_wandb(self, step, prefix='metrics/'):
bce = self.result().numpy()
wandb.run.summary[f'{prefix}{self.name}'] = bce
wandb.log({f'{prefix}{self.name}': bce}, step=step, commit=False)
class SparseCategoricalCrossentropy(keras.metrics.SparseCategoricalCrossentropy):
def __init__(self, from_logits=False):
super(SparseCategoricalCrossentropy, self).__init__(from_logits=from_logits)
def log_wandb(self, step, prefix='metrics/'):
cce = self.result().numpy()
wandb.run.summary[f'{prefix}{self.name}'] = cce
wandb.log({f'{prefix}{self.name}': cce}, step=step, commit=False)
class MultiLabelBinaryAccuracy(keras.metrics.Metric):
def __init__(self, n_outputs, output_labels, threshold=0.5, name='multi_label_binary_accuracy', **kwargs):
super(MultiLabelBinaryAccuracy, self).__init__(name=name, **kwargs)
self.accuracies = self.add_weight(name='mob_acc', initializer='zeros', shape=n_outputs, dtype=tf.int32)
self.count = self.add_weight(name='count', initializer='zeros', shape=1, dtype=tf.int32)
self.n_outputs = n_outputs
self.output_labels = output_labels
self.threshold = threshold
def reset_states(self):
K.batch_set_value([(self.variables[0], tf.zeros(self.n_outputs))])
K.set_value(self.count, [0])
def update_state(self, y_true, y_pred):
assert y_true.shape[-1] == y_pred.shape[-1] == self.n_outputs, 'Number of outputs must match shapes.'
assert len(y_true.shape) == 2, 'Shape of y_true and y_pred must be 2.'
y_true = tf.cast(y_true, tf.bool)
y_pred = tf.cast(y_pred > self.threshold, tf.bool)
# Update the total count
self.count.assign_add([y_true.shape[0]])
# Add in the number of correctly predicted targets for each output
correct_or_not = tf.math.reduce_sum(tf.cast(y_true == y_pred, tf.int32), 0)
self.accuracies.assign_add(correct_or_not)
def result(self):
if self.count > 0:
return self.accuracies / self.count
else:
return self.accuracies
def log_wandb(self, step, prefix='metrics/'):
accuracies = self.result().numpy()
wandb.log({f'{prefix}binary_accuracy_{self.output_labels[i].lower()}': accuracies[i]
for i in range(self.n_outputs)}, step=step, commit=False)
def create_metrics(metric_names, n_classes, output_labels):
metrics = []
for metric_name in metric_names:
if metric_name == 'accuracy':
metrics.append(keras.metrics.Accuracy())
elif metric_name == 'recall':
metrics.append(Recall())
elif metric_name == 'auc':
metrics.append(AUC())
elif metric_name == 'confusion_matrix':
metrics.append(ConfusionMatrix(n_classes=n_classes))
elif metric_name == 'multi_label_binary_accuracy':
metrics.append(MultiLabelBinaryAccuracy(n_outputs=n_classes, output_labels=output_labels))
elif metric_name == 'binary_crossentropy':
metrics.append(BinaryCrossentropy())
elif metric_name == 'sparse_categorical_crossentropy':
metrics.append(SparseCategoricalCrossentropy())
elif metric_name == 'multi_label_auc':
metrics.append(MultiLabelAUC(n_outputs=n_classes, output_labels=output_labels))
elif metric_name == 'multi_label_recall':
metrics.append(MultiLabelRecall(n_outputs=n_classes, output_labels=output_labels))
elif metric_name == 'irm_penalty':
metrics.append(IRMPenalty())
else:
raise NotImplementedError
return metrics
def reset_metrics(list_of_metrics):
"""
Reset each metric in a list of Keras metrics
"""
[metric.reset_states() for metric in list_of_metrics]
def log_metric_to_wandb(metric, step, prefix='metrics/'):
"""
Manually log a Keras metric to wandb.
"""
wandb.log({f'{prefix}{metric.name}': metric.result().numpy()}, step=step, commit=False)
def log_metrics_to_wandb(list_of_metrics, step, prefix='metrics/'):
"""
Log a list of Keras Metrics to wandb.
"""
for metric in list_of_metrics:
try:
metric.log_wandb(step, prefix)
except AttributeError:
log_metric_to_wandb(metric, step, prefix)
def update_metrics(list_of_metrics, targets, predictions):
for metric in list_of_metrics:
if metric.name in ['accuracy', 'confusion_matrix']:
# Compatible with Softmax at the output
metric.update_state(targets, tf.argmax(predictions, axis=-1))
elif metric.name in ['auc', 'recall']:
# Compatible with Softmax at the output
metric.update_state(targets, predictions[..., 1])
elif metric.name in [
'multi_label_binary_accuracy',
'binary_crossentropy',
'sparse_categorical_crossentropy',
'multi_label_auc',
'multi_label_recall',
'irm_penalty',
]:
# Compatible with multiple Sigmoids at the output
metric.update_state(targets, predictions)
else:
raise NotImplementedError
def test_auc():
auc = AUC()
auc.reset_states()
y_true = tf.convert_to_tensor([0, 1, 1, 0])
y_pred = tf.convert_to_tensor([[0.3, 0.7], [0.2, 0.8], [0.3, 0.7], [0.3, 0.7]])
print(tf.argmax(y_pred, axis=-1))
auc.update_state(y_true, y_pred[:, 1])
print(auc.result())
def test_recall():
recall = Recall()
recall.reset_states()
y_true = tf.convert_to_tensor([0, 1, 1, 0])
y_pred = tf.convert_to_tensor([[0.3, 0.7], [0.2, 0.8], [0.3, 0.7], [0.3, 0.7]])
print(tf.argmax(y_pred, axis=-1))
recall.update_state(y_true, y_pred[..., 1])
print(recall.result())
def test_mlba():
mlba = MultiLabelBinaryAccuracy(3, range(3))
mlba.reset_states()
y_true = tf.convert_to_tensor([[0, 1, 0.], [1, 0, 0]])
y_pred = tf.convert_to_tensor([[1, 0.49, .99], [1, 0, 0]])
mlba.update_state(y_true, y_pred)
print(mlba.result())
print(mlba.count)
assert np.allclose(mlba.result().numpy(), [0.5, 0.5, 0.5])
y_true = tf.convert_to_tensor([[0, 1, 0.], [1, 0, 0]])
y_pred = tf.convert_to_tensor([[0, 1, 0.], [0, 0, 0]])
mlba.update_state(y_true, y_pred)
print(mlba.result())
print(mlba.count)
assert np.allclose(mlba.result().numpy(), [0.5, 0.75, 0.75])
mlba.reset_states()
y_true = tf.convert_to_tensor([[0, 1, 0.], [1, 0, 0]])
y_pred = tf.convert_to_tensor([[1, 0, 1.], [1, 0, 0]])
mlba.update_state(y_true, y_pred)
print(mlba.result())
print(mlba.count)
assert np.allclose(mlba.result().numpy(), [0.5, 0.5, 0.5])
y_true = tf.convert_to_tensor([[0, 1, 0.], [1, 0, 0]])
y_pred = tf.convert_to_tensor([[0, 1, 0.], [0, 0, 0]])
mlba.update_state(y_true, y_pred)
print(mlba.result())
print(mlba.count)
assert np.allclose(mlba.result().numpy(), [0.5, 0.75, 0.75])
def test_bce():
y_true = tf.convert_to_tensor([[0, 1, 0.], [1, 0, 0]])
y_pred = tf.convert_to_tensor([[0, 1, 0.], [0, 0, 0]])
bce = BinaryCrossentropy()
bce.update_state(y_true, y_pred)
print(bce.result())
def test_irm():
y_true = tf.convert_to_tensor([1, 2, 0, 1])
y_pred = tf.convert_to_tensor([[0, 1, 0.], [0.5, 0.4, 0.1], [0.3, 0.3, 0.4], [0.9, 0, 0.1]])
irm = IRMPenalty()
irm.update_state(y_true, y_pred)
print(irm.name in ['irm_penalty'])
print(irm.result()) # 215.75023
def test_mauc():
mauc = MultiLabelAUC(3, range(3), num_thresholds=3)
y_true = tf.convert_to_tensor([[0, 1, 0.], [0, 0, 0], [1, 0, 0], [1, 0, 1]])
y_pred = tf.convert_to_tensor([[0, 1, 0.], [0.5, 0, 0], [0.3, 0, 0], [0.9, 0, 0.6]])
mauc.update_state(y_true, y_pred)
print(mauc.result())
y_true = tf.convert_to_tensor([[0, 1, 0.]])
y_pred = tf.convert_to_tensor([[0.3, 0.1, 0.1]])
mauc.update_state(y_true, y_pred)
print(mauc.result())
mauc.reset_states()
if __name__ == '__main__':
import numpy as np
test_mlba()
test_mauc()
test_irm()
test_auc()
test_recall()
|
model-patching-master
|
augmentation/utilities/metrics.py
|
import os
import yaml
from types import SimpleNamespace
def load_yaml_config(path: str, prefix_keys=False) -> SimpleNamespace:
"""
Load a yaml configuration file from the specified path, apply preprocessing operations to it and return
the configuration in a SimpleNamespace.
:param path: Path to the configuration file.
:return: SimpleNamespace containing the config.
"""
with open(path) as f:
config = SimpleNamespace(**yaml.load(f, Loader=yaml.FullLoader))
config = preprocess_yaml_config(config, prefix_keys=prefix_keys)
return config
def preprocess_yaml_config(config: SimpleNamespace, prefix_keys=False) -> SimpleNamespace:
"""
Preprocess a simple namespace. Currently,
- prepend the prefix key to all the configuration parameters
- change 'None' strings to None values
:param config: The SimpleNamespace containing the configuration.
:return: Preprocessed configuration as a SimpleNamespace
"""
# Make sure there's a prefix in the configuration
assert 'prefix' in config.__dict__, 'Please include a prefix in the yaml.'
if prefix_keys:
# Grab the prefix from the yaml file
prefix = config.prefix
# Prepend the prefix to all the keys, and get rid of the prefix
config = SimpleNamespace(**{f'{prefix}_{k}': v for k, v in config.__dict__.items() if k != prefix})
# Change 'None' to None in the top level: recommended behavior is to use null instead of None in the yaml
for key, value in config.__dict__.items():
config.__dict__[key] = value if value != 'None' else None
return config
def subtract_simple_namespaces(sns_main: SimpleNamespace, sns_diff: SimpleNamespace) -> SimpleNamespace:
"""
Subtract a SimpleNamespace from another. Subtraction corresponds to removing keys in sns_main that are present
in sns_diff.
:param sns_main: The SimpleNamespace to subtract *from*.
:param sns_diff: The SimpleNamespace to subtract off.
:return: A SimpleNamespace containing the difference.
"""
# Find the keys that are in sns_main but not in sns_diff
diff_keys = sns_main.__dict__.keys() - sns_diff.__dict__.keys()
# Return a SimpleNamespace that contains the diff_keys
return SimpleNamespace(**{k: sns_main.__dict__[k] for k in diff_keys})
def update_simple_namespace(sns_main: SimpleNamespace, sns_added: SimpleNamespace) -> SimpleNamespace:
"""
Update a SimpleNamespace with another and return the updated SimpleNamespace. For keys that overlap,
sns_added's values will replace the original values in sns_main.
:param sns_main: The SimpleNamespace that is updated.
:param sns_added: The SimpleNamespace that is added in.
:return: An updated SimpleNamespace.
"""
# Create a new SimpleNamespace which contains the data in sns_main
updated_sns = SimpleNamespace(**sns_main.__dict__)
# Update this SimpleNamespace with data from sns_added
updated_sns.__dict__.update(sns_added.__dict__)
return updated_sns
def recursively_create_config_simple_namespace(config_path, base_template_config_path, match_on='prefix'):
"""
A helper function to create a config SimpleNamespace that can be passed to train methods. Requires a
config and its template, and replaces the template with the settings you've specified.
"""
def _update_config(_config, _template_config):
# Make sure that config and parent are matched
if match_on is not None:
assert _config.__dict__[match_on] == _template_config.__dict__[match_on], \
f'Configs are mismatched {_config.__dict__[match_on]} =/= {_template_config.__dict__[match_on]}.'
# Make sure we didn't include any configuration options that aren't in the parent
extra_keys = subtract_simple_namespaces(_config, _template_config).__dict__
assert len(extra_keys) == 0, f'Extra configuration option specified in {config_path}: {extra_keys}.'
# Update the template with the configuration options from the config
_config = update_simple_namespace(_template_config, _config)
return _config
def _recurse(_config, _base_template_config):
assert 'parent_template' in _config.__dict__, 'The parent_template argument must be implemented.'
# Find the location of the parent configuration file
if _config.parent_template is None:
# There's no parent template: this config points to the base_template_config
parent_config_path = base_template_config_path
else:
# There's a parent template: we assume that it must be in the same folder as the config
parent_config_path = os.path.join(os.path.dirname(config_path), _config.parent_template)
# Load up the parent config
parent_config = load_yaml_config(parent_config_path)
parent_config = _update_config(parent_config, _base_template_config)
# The template the parent points to
parent_template = parent_config.parent_template
# Update the config using the parent's
_config = _update_config(_config, parent_config)
# Add the parent's path to the list of applied templates
_config._template_config_path.append(parent_config_path)
if _config.parent_template is None:
# Return if the parent template is None
return _config
# Replace the template parameter with the parent's: now if we recurse we'll be using the parent's template
# to do another update
_config.parent_template = parent_template
# Recurse and return the config
return _recurse(_config, _base_template_config)
# Load up the config files
config = load_yaml_config(config_path) # top level config
base_template_config = load_yaml_config(base_template_config_path) # base template
# Keep track of what templates are applied to the config, and where this config is
config._template_config_path = []
config._config_path = config_path
# Remember who this config's parent is
if 'parent_template' not in config.__dict__:
config.parent_template = None
parent = config.parent_template
# Recurse to apply all the parent configurations
config = _recurse(config, base_template_config)
# Ensure the parent is set correctly
config.parent_template = parent
# Assert to make sure we hit the base template config
assert config._template_config_path[-1] == base_template_config_path, '{template_config_path} is never used.'
return config
def create_config_simple_namespace(config_path, template_config_path, match_on='prefix'):
"""
A helper function to create a config SimpleNamespace that can be passed to train methods. Requires a
config and its template, and replaces the template with the settings you've specified.
"""
# Load up the config files
config = load_yaml_config(config_path)
template_config = load_yaml_config(template_config_path)
# Make sure that config and template are matched
if match_on is not None:
assert config.__dict__[match_on] == template_config.__dict__[match_on], \
f'Configs are mismatched {config.__dict__[match_on]} =/= {template_config.__dict__[match_on]}.'
# Make sure we didn't include any configuration options that aren't in the template
extra_keys = subtract_simple_namespaces(config, template_config).__dict__
assert len(extra_keys) == 0, \
f'Extra configuration option specified: {extra_keys}'
# Update the template with the configuration options that we picked
config = update_simple_namespace(template_config, config)
# Add the config and template path to the config
config._config_path = config_path
config._template_config_path = template_config_path
return config
def pretty_print_simple_namespace(sns):
"""
Pretty print a SimpleNamespace. Currently just loops over and prints each (key, value) pair.
"""
# Loop and print the outer level
for k, v in sns.__dict__.items():
print(f'{k}: {v}')
|
model-patching-master
|
augmentation/utilities/config.py
|
import pickle
import gzip
def compile_keras_models(models, optimizers):
# Compile the models: this is necessary in order to save model architecture, weights and optimizer to disk
# It doesn't matter what loss we use here since we're not going to be calling model.fit: TODO check!
for model, optimizer in zip(models, optimizers):
model.compile(optimizer=optimizer, loss='mse')
# Calling _make_train_function populates the optimizer with per-variable weights
model._make_train_function()
def save_tf_optimizer_state(optimizer, store_path, zip=True):
if zip:
open = gzip.open
with open(store_path, 'wb') as f:
pickle.dump(optimizer.get_weights(), f)
def load_tf_optimizer_state(optimizer, load_path, zip=True):
if zip:
open = gzip.open
with open(load_path, 'rb') as f:
optimizer.set_weights(pickle.load(f))
|
model-patching-master
|
augmentation/utilities/checkpoint.py
|
import wandb
import json
import time
import numpy as np
from collections import namedtuple
from augmentation.methods.cyclegan.models import mnist_unet_generator, unet_generator
from augmentation.models.models import create_keras_classification_model
WandbRun = namedtuple('WandbRun', 'path id name history files cfg url')
def particular_checkpoint_step_extractor(checkpoint, step_extractor=lambda fname: fname.split("_")[1].split(".")[0]):
def particular_checkpoint_step_extractor_(filename):
step = int(step_extractor(filename))
if step == checkpoint:
return step
else:
return 0
if checkpoint > 0:
return particular_checkpoint_step_extractor_
return step_extractor
def fetch_all_wandb_run_ids(wandb_project, wandb_entity='hazy-research', wandb_api=None):
if wandb_api is None:
wandb_api = wandb.Api()
wandb_path = f'{wandb_entity}/{wandb_project}/*'
runs = wandb_api.runs(wandb_path)
return [run.id for run in runs]
def load_wandb_run(wandb_run_id, wandb_project, wandb_entity='hazy-research', wandb_api=None):
if wandb_api is None:
wandb_api = wandb.Api()
wandb_path = f'{wandb_entity}/{wandb_project}/{wandb_run_id}'
run = wandb_api.run(wandb_path)
return WandbRun(path=wandb_path, id=run.id, name=run.name, history=run.scan_history,
files=run.files(per_page=10000), cfg=json.loads(run.json_config), url=run.url)
def get_most_recent_model_file(wandb_run: WandbRun, wandb_ckpt_path='checkpoints/',
model_name='', exclude=None,
step_extractor=lambda fname: fname.split("_")[1].split(".")[0]):
# Find checkpoints
checkpoints = [file for file in wandb_run.files if file.name.startswith(wandb_ckpt_path.lstrip("/"))]
relevant_checkpoints = [e for e in checkpoints if model_name in e.name]
if exclude:
relevant_checkpoints = [e for e in relevant_checkpoints if exclude not in e.name]
# Grab the latest checkpoint
latest_checkpoint = relevant_checkpoints[np.argmax([int(step_extractor(e.name)) for e in relevant_checkpoints])]
print(f"Retrieved checkpoint {latest_checkpoint.name}.")
# Restore the model
model_file = wandb.restore(latest_checkpoint.name, run_path=wandb_run.path, replace=True)
return model_file
def load_most_recent_keras_model_weights(keras_model,
wandb_run,
wandb_ckpt_path='checkpoints/',
model_name='',
exclude=None,
step_extractor=None):
# Make sure the step extractor is set to a reasonable default
if step_extractor is None:
step_extractor = lambda fname: fname.split(".")[-2].split("_")[-1]
# Get the most recent model file and load weights from it
try:
model_file = get_most_recent_model_file(wandb_run, wandb_ckpt_path, model_name, exclude, step_extractor)
time.sleep(3)
keras_model.load_weights(model_file.name)
print('load_most_recent_keras_model_weights: file ', model_file.name)
try:
return model_file.name, int(step_extractor(model_file.name))
except ValueError:
return model_file.name, int(step_extractor(model_file.name.split("/")[-1]))
except ValueError:
print("No model file found. Continuing without loading..")
return None, None
def load_pretrained_keras_model_from_wandb(wandb_run_id, wandb_project, wandb_entity,
keras_model_creation_fn, keras_model_creation_fn_args,
model_name, step_extractor,
wandb_ckpt_path='checkpoints/'):
# Load the run
wandb_run = load_wandb_run(wandb_run_id, wandb_project, wandb_entity)
# Create the model architecture
keras_model = globals()[keras_model_creation_fn](**keras_model_creation_fn_args)
# Load up the model weights
if step_extractor is None:
load_file, load_step = load_most_recent_keras_model_weights(keras_model, wandb_run,
model_name=model_name,
wandb_ckpt_path=wandb_ckpt_path)
else:
load_file, load_step = load_most_recent_keras_model_weights(keras_model, wandb_run,
model_name=model_name,
step_extractor=step_extractor,
wandb_ckpt_path=wandb_ckpt_path)
return keras_model, (load_file, load_step)
def load_pretrained_keras_classification_model(source, architecture, input_shape, n_classes, imagenet_pretrained,
pretraining_source, pretraining_info, checkpoint_path):
# Create the model
model = create_keras_classification_model(source, architecture, input_shape, n_classes, imagenet_pretrained)
if pretraining_source == 'wandb':
# Extract the Weights and Biases run information
run_id, project, entity = pretraining_info.split(":")
# Load up the relevant run
wandb_run = load_wandb_run(run_id, project, entity)
# Load up the most recent checkpoint from that run
load_most_recent_keras_model_weights(model, wandb_run, checkpoint_path)
return model
|
model-patching-master
|
augmentation/utilities/wandb.py
|
import numpy as np
def gallery(array, ncols=None):
# https://stackoverflow.com/questions/42040747/more-idiomatic-way-to-display-images-in-a-grid-with-numpy
nindex, height, width, intensity = array.shape
if ncols is None:
ncols = int(np.floor(np.sqrt(nindex)))
while nindex % ncols != 0: ncols += 1
nrows = nindex//ncols
assert nindex == nrows*ncols
# want result.shape = (height*nrows, width*ncols, intensity)
result = (array.reshape(nrows, ncols, height, width, intensity)
.swapaxes(1, 2)
.reshape(height*nrows, width*ncols, intensity))
return result
|
model-patching-master
|
augmentation/utilities/visualize.py
|
import tensorflow as tf
import numpy as np
import wandb
from types import SimpleNamespace
def set_global_seeds(seed):
"""
Set all the random seeds.
"""
tf.random.set_seed(seed)
np.random.seed(seed)
def basic_setup(seed, logical_gpu_memory_limits=(4096, 10240)):
"""
Function for setting up basic options.
"""
# Set seeds
set_global_seeds(seed)
# Set print options
np.set_printoptions(precision=2, suppress=True)
# Set GPU growth in Tensorflow: disable for the moment
# set_gpu_growth()
# Create logical GPUs
logical_gpus = create_logical_gpus(logical_gpu_memory_limits)
# Figure out the devices we can put things on
device_0 = tf.device(logical_gpus[0].name)
device_1 = tf.device(logical_gpus[0].name) if len(logical_gpus) == 1 else tf.device(logical_gpus[1].name)
devices = [device_0, device_1]
return SimpleNamespace(logical_gpus=logical_gpus, devices=devices)
def create_logical_gpus(memory_limits=(4096, 10240)):
"""
Create logical GPUs that split the physical GPU into separate devices.
One use case is when we want to put models on separate logical GPUs to manage memory allocation on the GPU.
"""
# Get a list of GPUs
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
# Create 2 virtual GPUs with 1GB memory each
try:
virtual_devices = [tf.config.experimental.VirtualDeviceConfiguration(memory_limit=lim)
for lim in memory_limits]
tf.config.experimental.set_virtual_device_configuration(gpus[0], virtual_devices)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPU,", len(logical_gpus), "Logical GPUs")
return logical_gpus
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def set_gpu_growth():
"""
Set the GPU growth in Tensorflow so that GPU memory is not a bottleneck.
"""
# Get a list of GPUs
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
def checkpoint(model, path):
model.save(filepath=f'{wandb.run.dir}/{path}_model.h5', include_optimizer=True)
|
model-patching-master
|
augmentation/utilities/utils.py
|
import augmentation.datasets.utils
from augmentation.augment.utils import WandbModelPseudoLabelingPipeline, BinaryMNISTWandbModelPseudoLabelingPipeline
def configure_pseudolabeler(pseudolabel: bool, pseudolabeler_builder, pseudolabeler_builder_args):
"""Pass in a class that can build a pseudolabeler (implementing __call__) or a builder function
that returns a pseudolabeling function.
"""
if pseudolabel:
return globals()[pseudolabeler_builder](*pseudolabeler_builder_args)
return None
def apply_pseudolabeler(pseudolabel: bool,
pseudolabeler_builder,
pseudolabeler_builder_args,
tf_datasets,
dataset_aliases,
dataset_lens,
labeler_batch_size,
keep_datasets=False):
assert len(tf_datasets) == len(dataset_aliases), 'Must specify one alias per dataset.'
if pseudolabel:
# If pseudolabeling, create the pseudolabeler and apply it
print("Pseudolabeling the dataset.")
pseudolabeler = configure_pseudolabeler(pseudolabel, pseudolabeler_builder, pseudolabeler_builder_args)
updated_datasets, updated_aliases, \
updated_dataset_lens, variants_per_dataset = \
augmentation.datasets.utils.split_datasets_by_pseudolabels(tf_datasets=tf_datasets,
dataset_aliases=dataset_aliases,
pseudolabeler=pseudolabeler,
batch_size=labeler_batch_size)
if keep_datasets:
# Append the datasets that pseudolabeling generated
updated_datasets = list(tf_datasets) + list(updated_datasets)
updated_aliases = dataset_aliases + updated_aliases
updated_dataset_lens = dataset_lens + updated_dataset_lens if dataset_lens is not None else None
#TODO: stable for the moment, but figure out how to handle variants_per_dataset more elegantly
# (e.g. it's used to replicate the augmentations in robust/train.py)
return updated_datasets, updated_aliases, updated_dataset_lens, variants_per_dataset
# Just return everything as is, if not pseudolabeling
return tf_datasets, dataset_aliases, dataset_lens, [1] * len(dataset_aliases)
|
model-patching-master
|
augmentation/utilities/labelers.py
|
import tensorflow as tf
import tensorflow.keras as keras
def decay_weights(model, weight_decay_rate):
"""Calculates the loss for l2 weight decay and returns it."""
# @tf.function
def _decay_weights(weights, weight_decay_rate):
reg_loss = 0.
for var in weights:
reg_loss = reg_loss + tf.nn.l2_loss(var)
reg_loss = weight_decay_rate * reg_loss
return reg_loss
return _decay_weights(model.trainable_weights, weight_decay_rate)
def create_loss_fn(loss_name):
if loss_name == 'sparse_categorical_crossentropy':
return keras.losses.SparseCategoricalCrossentropy(reduction=tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE)
elif loss_name == 'binary_crossentropy':
return keras.losses.BinaryCrossentropy()
else:
raise NotImplementedError
|
model-patching-master
|
augmentation/utilities/losses.py
|
from typing import List
import tensorflow as tf
import tensorflow.keras as keras
from augmentation.utilities.metrics import reset_metrics, update_metrics
def evaluate_model(model: keras.Model,
generator,
metrics: List[keras.metrics.Metric],
aggregate=None,
dtype=tf.float32) -> List[keras.metrics.Metric]:
"""
Evaluate a model on a dataset by measuring performance on some Keras metrics.
:param model: A model of type keras.Model.
:param generator: A data generator that can be iterated through.
:param metrics: A list of keras.metrics.Metric objects.
:param aggregate: A list of keras.metrics.Metric objects representing aggregate metrics
if this method is called multiple times.
:return: Performance on metrics.
"""
# Reset the metrics
reset_metrics(metrics)
# Loop over the data
for batch, targets in generator:
# Convert to tensors
batch, targets = tf.convert_to_tensor(batch, dtype=dtype), tf.convert_to_tensor(targets, dtype=dtype)
# Make predictions
predictions = model(batch, training=False)
# Update the metrics
update_metrics(metrics, targets, predictions)
# Update the aggregate metrics if any
if aggregate is not None:
update_metrics(aggregate, targets, predictions)
return metrics
|
model-patching-master
|
augmentation/utilities/eval.py
|
import tensorflow as tf
import tensorflow.keras as keras
import numpy as np
class LinearDecay(keras.optimizers.schedules.LearningRateSchedule):
# https://github.com/LynnHo/CycleGAN-Tensorflow-2/blob/master/module.py
# if `step` < `step_decay`: use fixed learning rate
# else: linearly decay the learning rate to zero
def __init__(self, initial_learning_rate, total_steps, step_decay):
super(LinearDecay, self).__init__()
self._initial_learning_rate = initial_learning_rate
self._steps = total_steps
self._step_decay = step_decay
self.current_learning_rate = tf.Variable(initial_value=initial_learning_rate, trainable=False, dtype=tf.float32)
def __call__(self, step):
self.current_learning_rate.assign(tf.cond(
step >= self._step_decay,
true_fn=lambda: self._initial_learning_rate *
(1 - 1 / (self._steps - self._step_decay) * (step - self._step_decay)),
false_fn=lambda: self._initial_learning_rate
))
return self.current_learning_rate
def build_optimizer(optimizer, learning_rate_fn, momentum=0.9):
if optimizer == 'Adam':
return keras.optimizers.Adam(learning_rate=learning_rate_fn)
elif optimizer == 'SGD':
return keras.optimizers.SGD(learning_rate=learning_rate_fn,
momentum=momentum, nesterov=True)
else:
raise NotImplementedError
def build_lr_scheduler(scheduler,
steps_per_epoch,
n_epochs,
lr_start,
lr_decay_steps=None,
lr_end=None):
decay_steps = lr_decay_steps if lr_decay_steps is not None else n_epochs * steps_per_epoch
if scheduler == 'linear':
lr_end = lr_end if lr_end is not None else 0.
learning_rate_fn = keras.optimizers.schedules.PolynomialDecay(lr_start,
decay_steps,
lr_end,
power=1.0)
elif scheduler == 'linear_decay_0.5':
# Used in CycleGAN
learning_rate_fn = LinearDecay(lr_start, decay_steps, decay_steps//2)
elif scheduler == 'constant':
# assert lr_decay_steps is None and lr_end is None, 'No decay for constant learning rate.'
learning_rate_fn = lr_start
elif scheduler == 'cosine':
learning_rate_fn = keras.experimental.CosineDecay(initial_learning_rate=lr_start,
decay_steps=decay_steps,
alpha=0.0)
elif scheduler == 'piecewise_quarter':
# Splits decay_steps into 4 equal phases, and reduces learning rate by 10. in each phase
learning_rate_fn = keras.optimizers.schedules.\
PiecewiseConstantDecay(boundaries=list(np.linspace(0, decay_steps, 5)[1:-1]),
values=[lr_start / div for div in [1., 10., 100., 1000.]])
elif scheduler == 'piecewise_custom_1':
# Splits decay_steps into 3 phases (50%, 25%, 25%) and reduces learning rate by 10. in each phase
learning_rate_fn = keras.optimizers.schedules.\
PiecewiseConstantDecay(boundaries=[decay_steps//2, (decay_steps * 3)//4],
values=[lr_start / div for div in [1., 10., 100.]])
elif scheduler == 'piecewise_cifar10':
# These are the numbers in the ResNet paper
learning_rate_fn = keras.optimizers.schedules.PiecewiseConstantDecay(boundaries=[30000, 60000, 90000, 120000],
values=[0.1, 0.01, 0.001, 0.0001, 0.00005])
else:
raise NotImplementedError
return learning_rate_fn
|
model-patching-master
|
augmentation/utilities/optim.py
|
import tensorflow as tf
import dataflow as D
import time
import numpy as np
import datetime
from multiprocessing import cpu_count
from augmentation.augment.utils import compose_augmentations
def benchmark(dataflow, num_epochs=2, sleep=0.):
start_time = time.perf_counter()
for epoch_num in range(num_epochs):
s = time.time()
counter = 0
for _ in dataflow:
# Performing a training step
time.sleep(sleep)
counter += 1
pass
tf.print(f"Samples counted: {counter}")
e = time.time()
tf.print(f'{e - s}s elapsed.')
tf.print("Total execution time:", time.perf_counter() - start_time)
def dataflow_len(dataflow):
"""
Compute the length of a dataflow.
"""
tot = 0
for data in dataflow:
tot += data[0].shape[0]
print(tot)
return tot
def create_direct_dataflow(tf_dataset,
batch_size,
augmentations=(),
gpu_augmentations=(),
label_augmentations=(),
num_proc=cpu_count(),
test_flow=True,
):
# Create a dataflow
dataflow = D.DataFromGenerator(tf_dataset)
# Map the tensors to numpy arrays
dataflow = D.MapData(dataflow, func=lambda x: (x[0].numpy(), x[1].numpy()))
# Batch the data
dataflow = D.BatchData(dataflow, batch_size=batch_size)
# Repeat the data only once, we use a custom loop over epochs
dataflow = D.RepeatedData(dataflow, 1)
# Create a function for data augmentations
daug = lambda x: compose_augmentations((compose_augmentations(x[0], augmentations), x[1]), label_augmentations)
# Map the function onto the data
dataflow = D.MapData(dataflow, func=daug)
# Create a function for gpu data augmentations
gpu_daug = lambda x: (compose_augmentations(x, gpu_augmentations))
# Map the function onto the data
dataflow = D.MapDataComponent(dataflow, func=gpu_daug, index=0)
if test_flow:
# A quick runthrough of all the data
D.TestDataSpeed(dataflow, size=128).start()
else:
# Reset state manually
dataflow.reset_state()
return dataflow
def create_paired_direct_dataflow(tf_dataset_1,
tf_dataset_2,
batch_size,
augmentations,
x_only=False,
num_proc=cpu_count(),
test_flow=True,
cache_dir1='',
cache_dir2='',
shuffle=True,
shuffle_buffer=1000):
# Cache the dataset first
tf_dataset_1 = tf_dataset_1.cache(cache_dir1).prefetch(tf.data.experimental.AUTOTUNE)
tf_dataset_2 = tf_dataset_2.cache(cache_dir2).prefetch(tf.data.experimental.AUTOTUNE)
try:
# Unbatch them
tf_dataset_1 = tf_dataset_1.unbatch()
tf_dataset_2 = tf_dataset_2.unbatch()
except ValueError:
pass
if shuffle:
# Shuffle the data
tf_dataset_1 = tf_dataset_1.shuffle(shuffle_buffer, seed=1)
tf_dataset_2 = tf_dataset_2.shuffle(shuffle_buffer, seed=2)
# Run through to cache the datasets: this is necessary to do, otherwise it won't work
for _ in tf_dataset_1.batch(batch_size):
print('.', end='')
pass
for _ in tf_dataset_2.batch(batch_size):
print('.', end='')
pass
# Create a dataflow
dataflow_1 = D.DataFromGenerator(tf_dataset_1)
dataflow_2 = D.DataFromGenerator(tf_dataset_2)
# Map the tensors to numpy arrays
dataflow_1 = D.MapData(dataflow_1, func=lambda x: (x[0].numpy(), x[1].numpy()))
dataflow_2 = D.MapData(dataflow_2, func=lambda x: (x[0].numpy(), x[1].numpy()))
# Select some indices in the data
if x_only:
dataflow_1 = D.SelectComponent(dataflow_1, [0])
dataflow_2 = D.SelectComponent(dataflow_2, [0])
# Zip them
dataflow = D.JoinData([dataflow_1, dataflow_2])
# Batch data
dataflow = D.BatchData(dataflow, batch_size=batch_size, remainder=True)
# Repeat data only once, we use a custom loop over epochs
dataflow = D.RepeatedData(dataflow, 1)
# Create a function for data augmentations
if not x_only:
daug = lambda x: (compose_augmentations(x[0], augmentations), x[1],
compose_augmentations(x[2], augmentations), x[3])
else:
daug = lambda x: (compose_augmentations(x[0], augmentations),
compose_augmentations(x[1], augmentations))
# Map the function onto the data
dataflow = D.MapData(dataflow, func=daug)
if test_flow:
# A quick runthrough of all the data
D.TestDataSpeed(dataflow).start()
else:
# Reset state manually
dataflow.reset_state()
return dataflow
def create_parallel_dataflow_via_numpy(tf_dataset,
batch_size,
augmentations=(),
gpu_augmentations=(),
x_only=False,
num_proc=cpu_count(),
test_flow=True):
X, y = [], []
# Materialize the dataset as a numpy array: this is memory intensive for large datasets!
for data in tf_dataset:
X.append(data[0].numpy())
y.append(data[1].numpy())
numpy_dataset = list(zip(np.array(X), np.array(y)))
# Create a dataflow
dataflow = D.DataFromList(numpy_dataset)
# Select some indices in the data
if x_only:
dataflow = D.SelectComponent(dataflow, [0])
# Batch data
dataflow = D.BatchData(dataflow, batch_size=batch_size)
# Repeat data only once, we use a custom loop over epochs
dataflow = D.RepeatedData(dataflow, 1)
# Create a function for data augmentations
if not x_only:
daug = lambda x: (compose_augmentations(x[0], augmentations), x[1])
else:
daug = lambda x: (compose_augmentations(x[0], augmentations))
# Map the function onto the data with parallelism
dataflow = D.MultiProcessMapData(dataflow, num_proc=num_proc, map_func=daug, strict=True)
# Create a function for gpu data augmentations
gpu_daug = lambda x: (compose_augmentations(x, gpu_augmentations))
# Map the function onto the data
dataflow = D.MapDataComponent(dataflow, func=gpu_daug, index=0)
if test_flow:
# A quick runthrough of all the data
D.TestDataSpeed(dataflow).start()
return dataflow
def create_paired_parallel_dataflow_via_numpy(tf_dataset_1,
tf_dataset_2,
batch_size,
augmentations,
x_only=False,
num_proc=cpu_count(),
test_flow=True):
X_1, y_1 = [], []
X_2, y_2 = [], []
# Materialize the dataset as a numpy array: this is memory intensive for large datasets!
for data in tf_dataset_1:
X_1.append(data[0].numpy())
y_1.append(data[1].numpy())
for data in tf_dataset_2:
X_2.append(data[0].numpy())
y_2.append(data[1].numpy())
numpy_dataset_1 = list(zip(np.array(X_1), np.array(y_1)))
numpy_dataset_2 = list(zip(np.array(X_2), np.array(y_2)))
# Create a dataflow
dataflow_1 = D.DataFromList(numpy_dataset_1)
dataflow_2 = D.DataFromList(numpy_dataset_2)
# Select some indices in the data
if x_only:
dataflow_1 = D.SelectComponent(dataflow_1, [0])
dataflow_2 = D.SelectComponent(dataflow_2, [0])
# Zip them
dataflow = D.JoinData([dataflow_1, dataflow_2])
# Batch data
dataflow = D.BatchData(dataflow, batch_size=batch_size)
# Repeat data only once, we use a custom loop over epochs
dataflow = D.RepeatedData(dataflow, 1)
# Create a function for data augmentations
if not x_only:
daug = lambda x: (compose_augmentations(x[0], augmentations), x[1],
compose_augmentations(x[2], augmentations), x[3])
else:
daug = lambda x: (compose_augmentations(x[0], augmentations),
compose_augmentations(x[1], augmentations))
# Map the function onto the data with parallelism
dataflow = D.MultiProcessMapData(dataflow, num_proc=num_proc, map_func=daug, strict=True)
if test_flow:
# A quick runthrough of all the data
D.TestDataSpeed(dataflow).start()
return dataflow
def build_basic_data_pipeline(datasets, n_examples, batch_size, map_fn, map_fn_args):
"""
Builds a basic data pipeline for multiple datasets by,
- Restricting the dataset to a few examples with take (use n_examples = -1 as default to fetch whole dataset)
- Batching data with batch
- Prefetching batches of data, keeping CPU busy for speedup
- Applying multiple augmentations to the data in sequence using a map_fn and map_fn_args. map_fn_args
is a list of list of arguments. Each list of arguments corresponds to an augmentation we would like to map on
the dataset.
Example:
map_fn = data_map
map_fn_args = [[BasicImagePreprocessingPipeline], [NoAugmentationPipeline]]
applies the two augmentations to the dataset in sequence.
:param datasets: list of datasets
:param n_examples: number of examples to take
:param batch_size: batch size
:param map_fn: function to map over the data for augmentation
:param map_fn_args: list of lists. Each inner list contains arguments to pass to map_fn.
:return: the list of augmented datasets
"""
augmented_datasets = []
for dataset in datasets:
# Take some examples, batch the dataset and enable prefetching
dataset = dataset.take(n_examples).batch(batch_size).prefetch(tf.data.experimental.AUTOTUNE)
# Map some transformation over the dataset
for args in map_fn_args:
dataset = dataset.map(lambda image, label: map_fn(image, label, *args))
# Append the augmented dataset
augmented_datasets.append(dataset)
return augmented_datasets
|
model-patching-master
|
augmentation/dataflows/utils.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
This is the main script used for training Classy Vision jobs.
This can be used for training on your local machine, using CPU or GPU, and
for distributed training. This script also supports Tensorboard, Visdom and
checkpointing.
Example:
For training locally, simply specify a configuration file and whether
to use CPU or GPU:
$ ./classy_train.py --device gpu --config configs/my_config.json
For distributed training, this can be invoked via
:func:`torch.distributed.launch`. For instance
$ python -m torch.distributed.launch \
--nnodes=1 \
--nproc_per_node=1 \
--master_addr=localhost \
--master_port=29500 \
--use_env \
classy_train.py \
--config=configs/resnet50_synthetic_image_classy_config.json \
--log_freq=100
For other use cases, try
$ ./classy_train.py --help
"""
import logging
import os
from datetime import datetime
from pathlib import Path
import torch
from classy_vision.generic.distributed_util import get_rank, get_world_size
from classy_vision.generic.opts import check_generic_args, parse_train_arguments
from classy_vision.generic.registry_utils import import_all_packages_from_directory
from classy_vision.generic.util import load_json
from classy_vision.hooks import (
CheckpointHook,
LossLrMeterLoggingHook,
ModelComplexityHook,
ProfilerHook,
ProgressBarHook,
TensorboardPlotHook,
VisdomHook,
)
from tasks.biasamp_classification_task import FineTuningTask, build_task
from classy_vision.trainer import DistributedTrainer, LocalTrainer
from torchvision import set_image_backend, set_video_backend
from torch.nn.modules.loss import CrossEntropyLoss
from torch.distributed.elastic.multiprocessing.errors import record
try:
import hydra
import omegaconf
hydra_available = True
except ImportError:
hydra_available = False
@record
def main(args, config):
# Global flags
torch.manual_seed(0)
set_image_backend(args.image_backend)
set_video_backend(args.video_backend)
task = build_task(config)
# Load checkpoint, if available.
if args.checkpoint_load_path:
task.set_checkpoint(args.checkpoint_load_path)
# Load a checkpoint contraining a pre-trained model. This is how we
# implement fine-tuning of existing models.
if args.pretrained_checkpoint_path:
assert isinstance(
task, FineTuningTask
), "Can only use a pretrained checkpoint for fine tuning tasks"
task.set_pretrained_checkpoint(args.pretrained_checkpoint_path)
# Configure hooks to do tensorboard logging, checkpoints and so on.
# `configure_hooks` adds default hooks, while extra hooks can be specified
# in config file and stored in `task.hooks`. Here, we merge them when we
# set the final hooks of the task.
task.set_hooks(configure_hooks(args, config) + task.hooks)
# LocalTrainer is used for a single replica. DistributedTrainer will setup
# training to use PyTorch's DistributedDataParallel.
trainer_class = {"none": LocalTrainer, "ddp": DistributedTrainer}[
args.distributed_backend
]
trainer = trainer_class()
logging.info(
f"Starting training on rank {get_rank()} worker. "
f"World size is {get_world_size()}"
)
# That's it! When this call returns, training is done.
trainer.train(task)
output_folder = Path(args.checkpoint_folder).resolve()
logging.info("Training successful!")
logging.info(f'Results of this training run are available at: "{output_folder}"')
def configure_hooks(args, config):
hooks = [LossLrMeterLoggingHook(args.log_freq),
ModelComplexityHook()]
# Make a folder to store checkpoints and tensorboard logging outputs
suffix = datetime.now().isoformat()
base_folder = Path(__file__).parent / f"hold_output/output_{suffix}"
print('checkpoint folder: ' + args.checkpoint_folder)
if args.checkpoint_folder != "":
base_folder = Path(args.checkpoint_folder)
args.checkpoint_folder = base_folder / "checkpoints"
os.makedirs(args.checkpoint_folder, exist_ok=True)
args.checkpoint_folder = str(args.checkpoint_folder)
logging.info(f"Logging outputs to {base_folder}")
logging.info(f"Logging checkpoints to {args.checkpoint_folder}")
if not args.skip_tensorboard:
try:
from torch.utils.tensorboard import SummaryWriter
os.makedirs(Path(base_folder) / "tensorboard", exist_ok=True)
tb_writer = SummaryWriter(log_dir=Path(base_folder) / "tensorboard")
hooks.append(TensorboardPlotHook(tb_writer))
except ImportError:
logging.warning("tensorboard not installed, skipping tensorboard hooks")
args_dict = vars(args)
args_dict["config"] = config
print('checkpoint folder: ' + args.checkpoint_folder)
hooks.append(
CheckpointHook(
args.checkpoint_folder, args_dict, checkpoint_period=args.checkpoint_period
)
)
if args.profiler:
hooks.append(ProfilerHook())
if args.show_progress:
hooks.append(ProgressBarHook())
if args.visdom_server != "":
hooks.append(VisdomHook(args.visdom_server, args.visdom_port))
return hooks
if hydra_available:
@hydra.main(config_path="hydra_configs", config_name="args")
def hydra_main(cfg):
args = cfg
check_generic_args(cfg)
config = omegaconf.OmegaConf.to_container(cfg.config)
main(args, config)
# run all the things:
if __name__ == "__main__":
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logging.info("Classy Vision's default training script.")
# This imports all modules in the same directory as classy_train.py
# Because of the way Classy Vision's registration decorators work,
# importing a module has a side effect of registering it with Classy
# Vision. This means you can give classy_train.py a config referencing your
# custom module (e.g. my_dataset) and it'll actually know how to
# instantiate it.
file_root = Path(__file__).parent
import_all_packages_from_directory(file_root)
if hydra_available:
hydra_main()
else:
args = parse_train_arguments()
config = load_json(args.config_file)
main(args, config)
|
cv_bias_amplification-main
|
my-project-release/my-project/classy_train.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the losses/ directory
import_all_modules(FILE_ROOT, "losses")
|
cv_bias_amplification-main
|
my-project-release/my-project/losses/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch.nn.functional as F
from classy_vision.losses import ClassyLoss, register_loss
@register_loss("one_hot_binary_ce_loss")
class OneHotBinaryCELoss(ClassyLoss):
def forward(self, input, target):
labels = F.one_hot(target, num_classes=10).float()
return F.binary_cross_entropy(input, labels)
@classmethod
def from_config(cls, config):
# We don't need anything from the config
return cls()
|
cv_bias_amplification-main
|
my-project-release/my-project/losses/one_hot_binary_ce_loss.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import contextlib
import copy
import enum
import json
import logging
import math
import multiprocessing as mp
import time
from typing import Any, Dict, List, NamedTuple, Optional, Union
import os
import torch
import torch.nn as nn
from classy_vision.dataset import ClassyDataset, build_dataset
from classy_vision.dataset.transforms.mixup import MixupTransform
from classy_vision.generic.distributed_util import (
all_reduce_mean,
barrier,
init_distributed_data_parallel_model,
is_distributed_training_run,
)
from classy_vision.generic.util import (
Timer,
copy_model_to_gpu,
get_torch_version,
load_and_broadcast_checkpoint,
master_params,
recursive_copy_to_gpu,
split_batchnorm_params,
update_classy_state,
)
from classy_vision.hooks import CheckpointHook, ClassyHook, build_hooks
from classy_vision.losses import ClassyLoss, build_loss
from classy_vision.meters import ClassyMeter, build_meters
from classy_vision.models import ClassyModel, build_model
from classy_vision.optim import (
ClassyOptimizer,
build_optimizer,
build_optimizer_schedulers,
)
from classy_vision.optim.zero import ZeRO
from torch.distributed import broadcast
from . import register_task, build_task
from classy_vision.tasks.classy_task import ClassyTask
from classy_vision.tasks.fine_tuning_task import FineTuningTask
try:
import apex
apex_available = True
except ImportError:
apex_available = False
try:
from torch.cuda.amp import GradScaler as TorchGradScaler
except ImportError:
pass
try:
from fairscale.optim.grad_scaler import ShardedGradScaler
fairscale_available = True
except ImportError:
fairscale_available = False
class AmpType(enum.Enum):
# Automatic Mixed Precision supported types
APEX = enum.auto()
PYTORCH = enum.auto()
class BroadcastBuffersMode(enum.Enum):
DISABLED = enum.auto()
# Enable DistributedDataParallel's broadcast_buffers option, synchronizing
# model buffers every forward pass.
FORWARD_PASS = enum.auto()
# Similar to FORWARD_PASS, but only synchronizes model buffers once
# per epoch, between train and test phases. If your motivation for
# synchronizing buffers is for buffers to be consistent during eval, use
# this instead of FORWARD_PASS to reduce training overhead.
BEFORE_EVAL = enum.auto()
class BatchNormSyncMode(enum.Enum):
DISABLED = enum.auto() # No Synchronized Batch Normalization
PYTORCH = enum.auto() # Use torch.nn.SyncBatchNorm
APEX = enum.auto() # Use apex.parallel.SyncBatchNorm, needs apex to be installed
class LastBatchInfo(NamedTuple):
loss: torch.Tensor
output: torch.Tensor
target: torch.Tensor
sample: Dict[str, Any]
step_data: Dict[str, Any]
@register_task("biasamp_classification_task")
class BiasAmpClassificationTask(ClassyTask):
"""Basic classification training task.
This task encapsultates all of the components and steps needed to
train a classifier using a :class:`classy_vision.trainer.ClassyTrainer`.
Assumes a train / test phase per each epoch and that the datasets
have the same API as the map-style Dataset class in
`torch.utils.data.dataset <https://pytorch.org/docs/stable/data.html
#torch.utils.data.Dataset>`_ (in particular, this task makes use of
the len). If you are using an `IterableDataset <https://pytorch.org/docs/
stable/data.html#torch.utils.data.IterableDataset>`_ then a custom task
may be appropriate.
:var loss: Loss (see :class:`classy_vision.losses.ClassyLoss`) function used
for computing the loss in each forward pass
:var datasets: Mapping from a ``phase_type`` in ["train", "test']
to dataset used for training (or testing)
:var meters: List of meters (see :class:`classy_vision.meters.ClassyMeter`)
to calculate during training
:var num_epochs: Number of epochs (passes over dataset) to train
:var test_only: Used to only run the test phase
:var base_model: Model to be trained, unwrapped in DDP or DP wrappers
:var optimizer: Optimizer used in train step
:var optimizer_schedulers: Dictionary. Key is the name of the optimizer
option (e.g. lr), value is a ClassyParamScheduler
:var checkpoint: Serializable dict which represents state in training
:var phases: List of phase specific information, e.g. if phase is
train / test.
:var hooks: List of hooks to apply during training
:var train: Phase type, if true it means we are training,
false means testing
:var distributed_model: Base model, but wrapped in DDP (DistributedDataParallel)
:var phase_idx: Current phase id, first phase is 0, if task has not started
training then returns -1
:var train_phase_idx: Only counts train phases
:var num_updates: Number of total parameter updates applied to model
by the optimizer
:var data_iterator: Iterator which can be used to obtain batches
:var losses: Loss curve
:var perf_log: list of training speed measurements, to be logged
:var clip_grad_norm: maximum gradient norm (default None)
:var simulated_global_batchsize: batch size simulated via gradient accumulation
:var optimizer_period: apply optimizer after this many steps; derived from
simulated_global_batchsize, default 1.
"""
def __init__(self):
"""Constructs a ClassificationTask"""
super().__init__()
self.base_loss = None
self.datasets = {}
self.meters = []
self.num_epochs = 1
self.test_phase_period = 1
self.train_phases_per_epoch = 0
self.test_only = False
self.base_model = None
self.optimizer = None
self.optimizer_schedulers = {}
self.checkpoint_dict = None
self.checkpoint_path = None
self.checkpoint_load_strict = True
self.phases = []
self.hooks = []
self.train = True
self.distributed_model = None
self.distributed_loss = None
self.phase_idx = -1
self.train_phase_idx = -1
self.num_updates = 0
self.dataloader = None
self.data_iterator = None
self.losses = []
self.broadcast_buffers_mode: BroadcastBuffersMode = (
BroadcastBuffersMode.BEFORE_EVAL
)
self.amp_args = None
self.amp_type = None
self.amp_grad_scaler = None
self.mixup_transform = None
self.perf_log = []
self.last_batch = None
self.batch_norm_sync_mode = BatchNormSyncMode.DISABLED
self.find_unused_parameters = False
self.use_gpu = torch.cuda.is_available()
self.dataloader_mp_context = "fork"
self.bn_weight_decay = False
self._train_only = True
self.clip_grad_norm = None
self.simulated_global_batchsize = None
self.optimizer_period = 1
self.ddp_bucket_cap_mb = 25
self.use_sharded_ddp = False
self.fp16_grad_compress = False
def set_use_sharded_ddp(self, use_sharded_ddp: bool):
self.use_sharded_ddp = use_sharded_ddp
if self.use_sharded_ddp:
logging.info("Using Sharded DDP")
return self
def set_use_gpu(self, use_gpu: bool):
self.use_gpu = use_gpu
assert (
not self.use_gpu or torch.cuda.is_available()
), "CUDA required to train on GPUs"
return self
def set_clip_grad_norm(self, clip_grad_norm: Optional[float]):
"""Sets maximum gradient norm.
None means gradient clipping is disabled. Defaults to None."""
self.clip_grad_norm = clip_grad_norm
if clip_grad_norm is None:
logging.info("Disabled gradient norm clipping.")
else:
logging.info(
f"Enabled gradient norm clipping with threshold: {clip_grad_norm}"
)
return self
def set_simulated_global_batchsize(self, simulated_global_batchsize: Optional[int]):
"""Sets a simulated batch size by gradient accumulation.
Gradient accumulation adds up gradients from multiple minibatches and
steps the optimizer every N train_steps, where N is optimizer_period.
When enabled, the very last train_steps might end up not updating the
model, depending on the number of total steps. None means gradient
accumulation is disabled. Defaults to None."""
self.simulated_global_batchsize = simulated_global_batchsize
return self
def set_checkpoint(self, checkpoint_path: str):
"""Sets checkpoint on task.
Args:
checkpoint_path: The path to load the checkpoint from. Can be a file or a
directory. See :func:`load_checkpoint` for more information.
"""
self.checkpoint_path = checkpoint_path
return self
def set_checkpoint_load_strict(self, checkpoint_load_strict: bool):
"""Sets checkpoint on task.
Args:
checkpoint_load_strict: Whether to use load_strict when copying model weights
"""
self.checkpoint_load_strict = checkpoint_load_strict
return self
def _set_checkpoint_dict(self, checkpoint_dict: Dict[str, Any]):
"""Sets the checkpoint dict in the task. Only used for testing.
Args:
checkpoint_dict: A serializable dict representing current task state
"""
self.checkpoint_dict = checkpoint_dict
return self
def set_num_epochs(self, num_epochs: Union[int, float]):
"""Set number of epochs to be run.
Args:
num_epochs: Number of epochs to run task
"""
self.num_epochs = num_epochs
return self
def set_test_phase_period(self, test_phase_period: int):
"""Set the period of test phase.
Args:
test_phase_period: The period of test phase
"""
self.test_phase_period = test_phase_period
return self
def set_dataset(self, dataset: ClassyDataset, phase_type: str):
"""Set dataset for phase type on task
Args:
dataset: ClassyDataset for returning samples.
phase_type: str must be one of "train" or "test"
"""
assert phase_type in [
"train",
"test",
], "phase_type must be in ['train', 'test']"
self.datasets[phase_type] = dataset
if phase_type == "train":
self.train_phases_per_epoch = getattr(dataset, "phases_per_epoch", 1)
else:
self._train_only = False
return self
def set_dataloader_mp_context(self, dataloader_mp_context: Optional[str]):
"""Set the multiprocessing context used by the dataloader.
The context can be either 'spawn', 'fork', 'forkserver' or None (uses the
default context). See
https://docs.python.org/3/library/multiprocessing.html#multiprocessing.get_context
for more details."""
self.dataloader_mp_context = dataloader_mp_context
return self
def set_optimizer(self, optimizer: ClassyOptimizer):
"""Set optimizer for task
Args:
optimizer: optimizer for task
"""
self.optimizer = optimizer
return self
def set_loss(self, loss: ClassyLoss):
"""Set loss function for task
Args:
loss: loss for task
"""
self.base_loss = loss
return self
def set_meters(self, meters: List["ClassyMeter"]):
"""Set meters for task
Args:
meters: list of meters to compute during training
"""
self.meters = meters
return self
def set_distributed_options(
self,
broadcast_buffers_mode: BroadcastBuffersMode = BroadcastBuffersMode.BEFORE_EVAL,
batch_norm_sync_mode: BatchNormSyncMode = BatchNormSyncMode.DISABLED,
batch_norm_sync_group_size: int = 0,
find_unused_parameters: bool = False,
bucket_cap_mb: int = 25,
fp16_grad_compress: bool = False,
):
"""Set distributed options.
Args:
broadcast_buffers_mode: Broadcast buffers mode. See
:class:`BroadcastBuffersMode` for options.
batch_norm_sync_mode: Batch normalization synchronization mode. See
:class:`BatchNormSyncMode` for options.
batch_norm_sync_group_size: Group size to use for synchronized batch norm.
0 means that the stats are synchronized across all replicas. For
efficient synchronization, set it to the number of GPUs in a node (
usually 8).
find_unused_parameters: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
bucket_cap_mb: See
:class:`torch.nn.parallel.DistributedDataParallel` for information.
Raises:
RuntimeError: If batch_norm_sync_mode is `BatchNormSyncMode.APEX` and apex
is not installed.
"""
self.broadcast_buffers_mode = broadcast_buffers_mode
if batch_norm_sync_group_size > 0:
if not batch_norm_sync_mode == BatchNormSyncMode.APEX:
# this should ideally work with PyTorch Sync BN as well, but it
# fails while initializing DDP for some reason.
raise ValueError(
"batch_norm_sync_group_size can be > 0 only when "
"Apex Synchronized Batch Normalization is being used."
)
self.batch_norm_sync_group_size = batch_norm_sync_group_size
if batch_norm_sync_mode == BatchNormSyncMode.DISABLED:
logging.info("Synchronized Batch Normalization is disabled")
else:
if batch_norm_sync_mode == BatchNormSyncMode.APEX and not apex_available:
raise RuntimeError("apex is not installed")
msg = f"Using Synchronized Batch Normalization using {batch_norm_sync_mode}"
if self.batch_norm_sync_group_size > 0:
msg += f" and group size {batch_norm_sync_group_size}"
logging.info(msg)
self.batch_norm_sync_mode = batch_norm_sync_mode
if find_unused_parameters:
logging.info("Enabling find_unused_parameters in DDP")
self.find_unused_parameters = find_unused_parameters
self.ddp_bucket_cap_mb = bucket_cap_mb
if fp16_grad_compress:
if get_torch_version() < [1, 8]:
raise RuntimeError(
"FP16 grad compression is only supported since PyTorch 1.8"
)
logging.info("Enabling FP16 grad compression")
self.fp16_grad_compress = fp16_grad_compress
return self
def set_hooks(self, hooks: List["ClassyHook"]):
"""Set hooks for task
Args:
hooks: List of hooks to apply during training
"""
from classy_vision.hooks import ClassyHook
assert isinstance(hooks, list)
assert all(isinstance(hook, ClassyHook) for hook in hooks)
assert len({hook.name() for hook in hooks}) == len(
hooks
), "Cannot have repeated hooks of the same class"
# TODO (zyan3): we move checkpoint hook to the end of the list because some hooks
# may change the state of the model, and we want to save changed state in the checkpoint.
# This is temporary fix.
non_checkpoint_hooks = [
hook for hook in hooks if not isinstance(hook, CheckpointHook)
]
checkpoint_hooks = [hook for hook in hooks if isinstance(hook, CheckpointHook)]
hooks = non_checkpoint_hooks + checkpoint_hooks
self.hooks = hooks
return self
def set_model(self, model: ClassyModel):
"""Set model for task
Args:
model: Model to be trained
"""
self.base_model = model
return self
def set_test_only(self, test_only: bool):
"""Set test only flag
Args:
test_only: If true, only test phases will be run
"""
self.test_only = test_only
return self
def set_bn_weight_decay(self, bn_weight_decay: bool):
assert type(bn_weight_decay) == bool
self.bn_weight_decay = bn_weight_decay
return self
def set_amp_args(self, amp_args: Optional[Dict[str, Any]]):
"""Disable / enable apex.amp and set the automatic mixed precision parameters.
apex.amp can be utilized for mixed / half precision training.
Args:
amp_args: Dictionary containing arguments to be passed to
amp.initialize. Set to None to disable amp. To enable mixed
precision training, pass amp_args={"opt_level": "O1"} here.
See https://nvidia.github.io/apex/amp.html for more info.
Raises:
RuntimeError: If opt_level is not None and apex is not installed.
Warning: apex needs to be installed to utilize this feature.
"""
self.amp_args = amp_args
if amp_args is None:
logging.info("AMP disabled")
else:
# Check that the requested AMP type is known
try:
self.amp_type = AmpType[self.amp_args["amp_type"].upper()]
except KeyError:
logging.info("AMP type not specified, defaulting to Apex")
self.amp_type = AmpType.APEX
# Check for CUDA availability, required for both Apex and Pytorch AMP
if not torch.cuda.is_available():
raise RuntimeError(
"AMP is required but CUDA is not supported, cannot enable AMP"
)
# Check for Apex availability
if self.amp_type == AmpType.APEX and not apex_available:
raise RuntimeError(
"Apex AMP is required but Apex is not installed, cannot enable AMP"
)
if self.use_sharded_ddp:
if self.amp_type == AmpType.APEX:
raise RuntimeError(
"ShardedDDP has been requested, which is incompatible with Apex AMP"
)
if not fairscale_available:
raise RuntimeError(
"ShardedDDP has been requested, but fairscale is not installed in the current environment"
)
# Set Torch AMP grad scaler, used to prevent gradient underflow
elif self.amp_type == AmpType.PYTORCH:
if self.use_sharded_ddp:
logging.info("Using ShardedGradScaler to manage Pytorch AMP")
self.amp_grad_scaler = ShardedGradScaler()
else:
self.amp_grad_scaler = TorchGradScaler()
logging.info(f"AMP enabled with args {amp_args}")
return self
def set_mixup_transform(self, mixup_transform: Optional["MixupTransform"]):
"""Disable / enable mixup transform for data augmentation
Args::
mixup_transform: a callable object which performs mixup data augmentation
"""
self.mixup_transform = mixup_transform
if mixup_transform is None:
logging.info("mixup disabled")
else:
logging.info("mixup enabled")
return self
def set_optimizer_schedulers(self, schedulers):
self.optimizer_schedulers = schedulers
return self
@classmethod
def from_config(cls, config: Dict[str, Any]) -> "ClassificationTask":
"""Instantiates a ClassificationTask from a configuration.
Args:
config: A configuration for a ClassificationTask.
See :func:`__init__` for parameters expected in the config.
Returns:
A ClassificationTask instance.
"""
test_only = config.get("test_only", False)
if not test_only:
# TODO Make distinction between epochs and phases in optimizer clear
train_phases_per_epoch = config["dataset"]["train"].get(
"phases_per_epoch", 1
)
optimizer_config = config["optimizer"]
optimizer_config["num_epochs"] = (
config["num_epochs"] * train_phases_per_epoch
)
optimizer = build_optimizer(optimizer_config)
param_schedulers = build_optimizer_schedulers(optimizer_config)
datasets = {}
phase_types = ["train", "test"]
for phase_type in phase_types:
if phase_type in config["dataset"]:
datasets[phase_type] = build_dataset(config["dataset"][phase_type])
loss = build_loss(config["loss"])
amp_args = config.get("amp_args")
meters = build_meters(config.get("meters", {}))
model = build_model(config["model"])
mixup_transform = None
if config.get("mixup") is not None:
assert "alpha" in config["mixup"], "key alpha is missing in mixup dict"
mixup_transform = MixupTransform(
config["mixup"]["alpha"],
num_classes=config["mixup"].get("num_classes"),
cutmix_alpha=config["mixup"].get("cutmix_alpha", 0),
cutmix_minmax=config["mixup"].get("cutmix_minmax"),
mix_prob=config["mixup"].get("mix_prob", 1.0),
switch_prob=config["mixup"].get("switch_prob", 0.5),
mode=config["mixup"].get("mode", "batch"),
label_smoothing=config["mixup"].get("label_smoothing", 0.0),
)
# hooks config is optional
hooks_config = config.get("hooks")
hooks = []
if hooks_config is not None:
hooks = build_hooks(hooks_config)
distributed_config = config.get("distributed", {})
distributed_options = {
"broadcast_buffers_mode": BroadcastBuffersMode[
distributed_config.get("broadcast_buffers", "before_eval").upper()
],
"batch_norm_sync_mode": BatchNormSyncMode[
distributed_config.get("batch_norm_sync_mode", "disabled").upper()
],
"batch_norm_sync_group_size": distributed_config.get(
"batch_norm_sync_group_size", 0
),
"find_unused_parameters": distributed_config.get(
"find_unused_parameters", False
),
"bucket_cap_mb": distributed_config.get("bucket_cap_mb", 25),
"fp16_grad_compress": distributed_config.get("fp16_grad_compress", False),
}
task = (
cls()
.set_num_epochs(config["num_epochs"])
.set_test_phase_period(config.get("test_phase_period", 1))
.set_loss(loss)
.set_test_only(test_only)
.set_model(model)
.set_meters(meters)
.set_amp_args(amp_args)
.set_mixup_transform(mixup_transform)
.set_distributed_options(**distributed_options)
.set_hooks(hooks)
.set_bn_weight_decay(config.get("bn_weight_decay", False))
.set_clip_grad_norm(config.get("clip_grad_norm"))
.set_simulated_global_batchsize(config.get("simulated_global_batchsize"))
.set_use_sharded_ddp(config.get("use_sharded_ddp", False))
)
if not test_only:
task.set_optimizer(optimizer)
task.set_optimizer_schedulers(param_schedulers)
use_gpu = config.get("use_gpu")
if use_gpu is not None:
task.set_use_gpu(use_gpu)
for phase_type in datasets:
task.set_dataset(datasets[phase_type], phase_type)
# NOTE: this is a private member and only meant to be used for
# logging/debugging purposes. See __repr__ implementation
task._config = config
return task
@property
def num_batches_per_phase(self):
"""Returns number of batches in current phase iterator"""
return len(self.data_iterator)
@property
def model(self):
"""Returns model used in training (can be wrapped with DDP)"""
return (
self.distributed_model if is_distributed_training_run() else self.base_model
)
@property
def loss(self):
"""Returns loss used in training (can be wrapped with DDP)"""
return self.distributed_loss if self.distributed_loss else self.base_loss
@property
def phase_type(self):
"""Returns current phase type. String with value "train" or "test" """
return "train" if self.train else "test"
@property
def eval_phase_idx(self):
"""Returns current evaluation phase"""
return self.phase_idx - self.train_phase_idx - 1
def get_total_training_phases(self):
"""
Returns the total number of "train" phases in the task
"""
num_training_phases = 0
for phase in self.phases:
if phase["train"] is True:
num_training_phases += 1
return num_training_phases
def get_total_test_phases(self):
"""
Returns the total number of "test" phases in the task
"""
num_test_phases = 0
for phase in self.phases:
if phase["train"] is False:
num_test_phases += 1
return num_test_phases
def _build_phases(self):
"""Returns list of phases from config.
These phases will look like:
{
train: is this a train or test phase?
optimizer: optimizer settings
}
- If this is a test only run, then only test phases will be
generated
- If this is a training run with both train and test datasets, then x phases =
x train phases + x test phases, interleaved. If test_phase_period > 1, test
phases are only added after test_phase_period train phases. The last phase is
always a test phase.
- If this is a training run with only a train dataset, then x phases = x train
phases.
"""
if not self.test_only:
phases = [
{"train": True}
for _ in range(math.ceil(self.train_phases_per_epoch * self.num_epochs))
]
if self._train_only:
return phases
final_phases = []
for i, phase in enumerate(phases):
final_phases.append(phase)
if (i + 1) % self.test_phase_period == 0:
final_phases.append({"train": False})
if final_phases[-1]["train"]:
final_phases.append({"train": False})
return final_phases
return [{"train": False} for _ in range(self.num_epochs)]
def build_dataloader_from_dataset(self, dataset, **kwargs):
"""Builds a dataloader from the provided dataset
Args:
dataset: A ClassyDataset
kwargs: Additional kwargs to pass during dataloader construction for
derived classes
"""
return dataset.iterator(
phase_type=self.phase_type,
current_phase_id=self.train_phase_idx if self.train else 0,
pin_memory=self.use_gpu and torch.cuda.device_count() > 1,
multiprocessing_context=mp.get_context(self.dataloader_mp_context),
**kwargs,
)
def build_dataloaders_for_current_phase(self):
"""Builds dataloader(s) for the current phase.
Deriving classes can override this method to support custom behavior, like
supporting multiple dataloaders in parallel.
"""
self.dataloader = self.build_dataloader_from_dataset(
self.datasets[self.phase_type]
)
def prepare_optimizer(self, optimizer, model, loss=None):
bn_params, other_params = split_batchnorm_params(model)
if loss is not None:
bn_params_loss, params_loss = split_batchnorm_params(loss)
bn_params = bn_params + bn_params_loss
other_params = other_params + params_loss
bn_schedulers = self.optimizer_schedulers.copy()
if not self.bn_weight_decay:
bn_schedulers["weight_decay"] = 0
param_groups = [{"params": other_params, **self.optimizer_schedulers}]
if len(bn_params) > 0:
param_groups.append({"params": bn_params, **bn_schedulers})
self.optimizer.set_param_groups(param_groups)
def prepare(self):
"""Prepares task for training, populates all derived attributes"""
self.phases = self._build_phases()
self.train = False if self.test_only else self.train
if self.batch_norm_sync_mode == BatchNormSyncMode.PYTORCH:
self.base_model = nn.SyncBatchNorm.convert_sync_batchnorm(self.base_model)
elif self.batch_norm_sync_mode == BatchNormSyncMode.APEX:
sync_bn_process_group = apex.parallel.create_syncbn_process_group(
self.batch_norm_sync_group_size
)
self.base_model = apex.parallel.convert_syncbn_model(
self.base_model, process_group=sync_bn_process_group
)
# move the model and loss to the right device
if self.use_gpu:
self.base_model, self.base_loss = copy_model_to_gpu(
self.base_model, self.base_loss
)
else:
self.base_loss.cpu()
self.base_model.cpu()
if self.optimizer is not None:
self.prepare_optimizer(
optimizer=self.optimizer, model=self.base_model, loss=self.base_loss
)
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
# Initialize apex.amp. This updates the model and the PyTorch optimizer (
# if training, which is wrapped by the ClassyOptimizer in self.optimizer).
# Please note this must happen before loading the checkpoint, cause
# there's amp state to be restored.
if self.optimizer is None:
self.base_model = apex.amp.initialize(
self.base_model, optimizers=None, **self.amp_args
)
else:
self.base_model, self.optimizer.optimizer = apex.amp.initialize(
self.base_model, self.optimizer.optimizer, **self.amp_args
)
if self.simulated_global_batchsize is not None:
if self.simulated_global_batchsize % self.get_global_batchsize() != 0:
raise ValueError(
f"Global batch size ({self.get_global_batchsize()}) must divide "
f"simulated_global_batchsize ({self.simulated_global_batchsize})"
)
else:
self.simulated_global_batchsize = self.get_global_batchsize()
self.optimizer_period = (
self.simulated_global_batchsize // self.get_global_batchsize()
)
if self.optimizer_period > 1:
logging.info(
f"Using gradient accumulation with a period of {self.optimizer_period}"
)
if self.checkpoint_path:
self.checkpoint_dict = load_and_broadcast_checkpoint(self.checkpoint_path)
classy_state_dict = (
None
if self.checkpoint_dict is None
else self.checkpoint_dict["classy_state_dict"]
)
if classy_state_dict is not None:
state_load_success = update_classy_state(self, classy_state_dict)
assert (
state_load_success
), "Update classy state from checkpoint was unsuccessful."
self.init_distributed_data_parallel_model()
def init_distributed_data_parallel_model(self):
"""
Initialize
`torch.nn.parallel.distributed.DistributedDataParallel <https://pytorch.org/
docs/stable/nn.html#distributeddataparallel>`_.
Needed for distributed training. This is where a model should be wrapped by DDP.
"""
if not is_distributed_training_run():
return
assert (
self.distributed_model is None
), "init_ddp_non_elastic must only be called once"
broadcast_buffers = (
self.broadcast_buffers_mode == BroadcastBuffersMode.FORWARD_PASS
)
if self.use_sharded_ddp:
if not isinstance(self.optimizer, ZeRO):
raise ValueError(
"ShardedDataParallel engine should only be used in conjunction with ZeRO optimizer"
)
from fairscale.nn.data_parallel import ShardedDataParallel
# Replace the original DDP wrap by the shard-aware ShardedDDP
self.distributed_model = ShardedDataParallel(
module=self.base_model,
sharded_optimizer=self.optimizer.optimizer,
broadcast_buffers=broadcast_buffers,
)
else:
self.distributed_model = init_distributed_data_parallel_model(
self.base_model,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
if self.fp16_grad_compress:
from torch.distributed.algorithms import ddp_comm_hooks
# FP16 hook is stateless and only takes a process group as the state.
# We use the default process group so we set the state to None.
process_group = None
self.distributed_model.register_comm_hook(
process_group, ddp_comm_hooks.default_hooks.fp16_compress_hook
)
if (
isinstance(self.base_loss, ClassyLoss)
and self.base_loss.has_learned_parameters()
):
logging.info("Initializing distributed loss")
self.distributed_loss = init_distributed_data_parallel_model(
self.base_loss,
broadcast_buffers=broadcast_buffers,
find_unused_parameters=self.find_unused_parameters,
bucket_cap_mb=self.ddp_bucket_cap_mb,
)
@property
def where(self):
"""Returns the proportion of training that has completed. If in test
only mode, returns proportion of testing completed
Returned value is a float in the range [0, 1)
"""
current_step = self.num_updates / self.get_global_batchsize()
num_phases = (
self.get_total_test_phases()
if self.test_only
else self.get_total_training_phases()
)
if self.num_batches_per_phase <= 0:
raise RuntimeError("No batches to read. Is the dataset empty?")
num_steps = num_phases * self.num_batches_per_phase
where = current_step / num_steps
return where
def get_classy_state(self, deep_copy: bool = False):
"""Returns serialiable state of task
Args:
deep_copy: If true, does a deep copy of state before returning.
"""
optimizer_state = {}
if self.optimizer is not None:
optimizer_state = self.optimizer.get_classy_state()
classy_state_dict = {
"train": self.train,
"base_model": self.base_model.get_classy_state(),
"meters": [meter.get_classy_state() for meter in self.meters],
"optimizer": optimizer_state,
"phase_idx": self.phase_idx,
"train_phase_idx": self.train_phase_idx,
"num_updates": self.num_updates,
"losses": self.losses,
"hooks": {hook.name(): hook.get_classy_state() for hook in self.hooks},
"loss": {},
}
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
classy_state_dict["train_dataset_iterator"] = self.datasets[
"train"
].get_classy_state()
if isinstance(self.base_loss, ClassyLoss):
classy_state_dict["loss"] = self.base_loss.get_classy_state()
if self.amp_args is not None:
if self.amp_type == AmpType.APEX:
classy_state_dict["amp"] = apex.amp.state_dict()
elif self.amp_grad_scaler is not None:
classy_state_dict["amp"] = self.amp_grad_scaler.state_dict()
if deep_copy:
classy_state_dict = copy.deepcopy(classy_state_dict)
return classy_state_dict
def set_classy_state(self, state):
"""Set task state
Args:
state: Dict containing state of a task
"""
self.train = False if self.test_only else state["train"]
self.base_model.set_classy_state(state["base_model"])
if self.test_only:
# if we're only testing, just need the state of the model to be updated
return
self.phase_idx = state["phase_idx"]
self.num_updates = state["num_updates"]
self.train_phase_idx = state["train_phase_idx"]
self.losses = state["losses"]
for meter, meter_state in zip(self.meters, state["meters"]):
meter.set_classy_state(meter_state)
if self.optimizer is not None:
self.optimizer.set_classy_state(state["optimizer"])
if state.get("loss") and isinstance(self.base_loss, ClassyLoss):
self.base_loss.set_classy_state(state["loss"])
if "amp" in state:
if self.amp_type == AmpType.APEX:
apex.amp.load_state_dict(state["amp"])
else:
self.amp_grad_scaler.load_state_dict(state["amp"])
for hook in self.hooks:
# we still want to be able to run when new hooks are added or old
# hooks are removed
if hook.name() in state["hooks"]:
hook.set_classy_state(state["hooks"][hook.name()])
else:
logging.warning(f"No state found for hook: {hook.name()}")
if "train" in self.datasets and self._is_checkpointable_dataset(
self.datasets["train"]
):
self.datasets["train"].set_classy_state(state.get("train_dataset_iterator"))
@staticmethod
def _is_checkpointable_dataset(dataset):
return hasattr(dataset, "get_classy_state") and hasattr(
dataset, "set_classy_state"
)
def eval_step(self):
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
with torch.no_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def check_inf_nan(self, loss):
if loss == float("inf") or loss == float("-inf") or loss != loss:
raise FloatingPointError(f"Loss is infinity or NaN: {loss}")
def _should_do_step(self):
"""Tells if we will be performing an optimizer step.
Returns True always if there is no gradient accumulation. With gradient
accumulation returns True only when the gradients will be synchronized and we
will be performing an optimizer step.
"""
update_idx = self.num_updates // self.get_global_batchsize()
return (update_idx % self.optimizer_period) == self.optimizer_period - 1
def train_step(self):
"""Train step to be executed in train loop."""
self.last_batch = None
# Process next sample
with Timer() as timer:
sample = next(self.data_iterator)
assert isinstance(sample, dict) and "input" in sample and "target" in sample, (
f"Returned sample [{sample}] is not a map with 'input' and"
+ "'target' keys"
)
# Copy sample to GPU
target = sample["target"]
if self.use_gpu:
sample = recursive_copy_to_gpu(sample, non_blocking=True)
if self.mixup_transform is not None:
sample = self.mixup_transform(sample)
# Optional Pytorch AMP context
torch_amp_context = (
torch.cuda.amp.autocast()
if self.amp_type == AmpType.PYTORCH
else contextlib.suppress()
)
# only sync with DDP when we need to perform an optimizer step
# an optimizer step can be skipped if gradient accumulation is enabled
do_step = self._should_do_step()
ctx_mgr_model = (
self.distributed_model.no_sync()
if self.distributed_model is not None and not do_step
else contextlib.suppress()
)
ctx_mgr_loss = (
self.distributed_loss.no_sync()
if self.distributed_loss is not None and not do_step
else contextlib.suppress()
)
with ctx_mgr_model, ctx_mgr_loss:
# Forward pass
with torch.enable_grad(), torch_amp_context:
output = self.model(sample["input"])
local_loss = self.compute_loss(output, sample)
loss = local_loss.detach().clone()
self.losses.append(loss.data.cpu().item())
self.update_meters(output, sample)
# Backwards pass + optimizer step
self.run_optimizer(local_loss)
self.num_updates += self.get_global_batchsize()
# Move some data to the task so hooks get a chance to access it
self.last_batch = LastBatchInfo(
loss=loss,
output=output,
target=target,
sample=sample,
step_data={"sample_fetch_time": timer.elapsed_time},
)
def compute_loss(self, model_output, sample):
return self.loss(model_output, sample["target"])
def run_optimizer(self, loss):
"""Runs backwards pass and update the optimizer"""
self.check_inf_nan(loss)
# Gradient accumulation logic. We always set optimizer_period, even
# if gradient accumulation is disabled. Assumes all batches have the
# same size
update_idx = self.num_updates // self.get_global_batchsize()
do_zero_grad = (update_idx % self.optimizer_period) == 0
do_step = self._should_do_step()
if do_zero_grad:
self.optimizer.zero_grad()
if self.amp_type == AmpType.APEX:
with apex.amp.scale_loss(loss, self.optimizer.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.amp_type == AmpType.PYTORCH:
self.amp_grad_scaler.scale(loss).backward()
else:
loss.backward()
if do_step:
# Handle gradient accumulation related gradient rescaling
if self.optimizer_period != 1:
self._rescale_gradients(1 / self.optimizer_period)
# Clipping must happen after grad accumulation
if self.clip_grad_norm is not None:
self._clip_gradients(self.clip_grad_norm)
if self.amp_type == AmpType.PYTORCH:
# If using mixed precision, handle underflow-related scaling
# See https://pytorch.org/docs/stable/amp.html#gradient-scaling
# for context
self.amp_grad_scaler.step(self.optimizer, where=self.where)
self.amp_grad_scaler.update()
else:
self.optimizer.step(where=self.where)
def _rescale_gradients(self, scale):
for param in master_params(self.optimizer):
if param.grad is not None:
param.grad.data.mul_(scale)
def _clip_gradients(self, max_norm):
nn.utils.clip_grad_norm_(master_params(self.optimizer), max_norm)
def update_meters(self, model_output, sample):
target = sample["target"].detach().cpu()
model_output = model_output.detach().cpu()
# Update meters
for meter in self.meters:
meter.update(model_output, target, is_train=self.train)
def synchronize_losses(self):
"""Average the losses across the different replicas"""
# Average losses across nodes
losses_tensor = torch.tensor(self.losses)
synchronized_losses_tensor = all_reduce_mean(losses_tensor)
self.losses = synchronized_losses_tensor.tolist()
def advance_phase(self):
"""Performs bookkeeping / task updates between phases
Increments phase idx, resets meters, resets loss history,
resets counters, shuffles dataset, rebuilds iterators, and
sets the train / test state for phase.
"""
logging.debug("Advancing phase")
# Reset meters for next phase / epoch
for meter in self.meters:
meter.reset()
# Reset loss history for next epoch
self.losses = []
# Setup new phase
self.phase_idx += 1
phase = self.phases[self.phase_idx]
self.train = True if phase["train"] else False
if self.train:
self.train_phase_idx += 1
# Re-build dataloader & re-create iterator anytime membership changes.
self.build_dataloaders_for_current_phase()
self.create_data_iterators()
# Set up pytorch module in train vs eval mode, update optimizer.
self._set_model_train_mode()
def done_training(self):
"""Stop condition for training"""
return self.phase_idx + 1 >= len(self.phases)
def create_data_iterators(self):
"""Creates data iterator(s) for the current phase."""
# Delete iterator explicitly so that all dataloader processes
# are cleaned up.
del self.data_iterator
self.data_iterator = iter(self.dataloader)
def _set_model_train_mode(self):
"""Set train mode for model"""
phase = self.phases[self.phase_idx]
self.base_model.train(phase["train"])
self.base_loss.train(phase["train"])
if (
self.broadcast_buffers_mode == BroadcastBuffersMode.BEFORE_EVAL
and not self.train
):
self._broadcast_buffers()
def _broadcast_buffers(self):
"""Explicitly synchronize buffers across all devices."""
if self.distributed_model is None:
return
buffers = list(self.base_model.buffers())
if len(buffers) > 0:
logging.info("Synchronizing buffers before evaluation.")
for buffer in buffers:
broadcast(buffer, 0, group=self.distributed_model.process_group)
# TODO: Functions below should be better abstracted into the dataloader
# abstraction
def get_batchsize_per_replica(self):
"""Return local replica's batchsize for dataset (e.g. batchsize per GPU)"""
return self.datasets[self.phase_type].get_batchsize_per_replica()
def get_global_batchsize(self):
"""Return global batchsize across all trainers"""
return self.datasets[self.phase_type].get_global_batchsize()
def on_start(self):
for hook in self.hooks:
hook.on_start(self)
def on_phase_start(self):
self.phase_start_time_total = time.perf_counter()
self.advance_phase()
for hook in self.hooks:
hook.on_phase_start(self)
self.phase_start_time_train = time.perf_counter()
def on_phase_end(self):
self.log_phase_end(self.phase_type)
if self.train:
self.optimizer.on_epoch(where=self.where)
logging.debug("Syncing losses on phase end...")
self.synchronize_losses()
logging.debug("...losses synced")
logging.debug("Syncing meters on phase end...")
for meter in self.meters:
meter.sync_state()
logging.debug("...meters synced")
barrier()
for hook in self.hooks:
hook.on_phase_end(self)
self.perf_log = []
self.log_phase_end(f"{self.phase_type}_total")
if hasattr(self.datasets[self.phase_type], "on_phase_end"):
self.datasets[self.phase_type].on_phase_end()
def on_end(self):
for hook in self.hooks:
hook.on_end(self)
def log_phase_end(self, tag):
start_time = (
self.phase_start_time_train
if tag == self.phase_type
else self.phase_start_time_total
)
phase_duration = time.perf_counter() - start_time
im_per_sec = (
self.get_global_batchsize() * self.num_batches_per_phase
) / phase_duration
self.perf_log.append(
{"tag": tag, "phase_idx": self.train_phase_idx, "im_per_sec": im_per_sec}
)
def __repr__(self):
if hasattr(self, "_config"):
config = json.dumps(self._config, indent=4)
return f"{super().__repr__()} initialized with config:\n{config}"
return super().__repr__()
|
cv_bias_amplification-main
|
my-project-release/my-project/tasks/biasamp_classification_task.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import traceback
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
from classy_vision.tasks.classy_task import ClassyTask
from classy_vision.tasks.fine_tuning_task import FineTuningTask
FILE_ROOT = Path(__file__).parent
TASK_REGISTRY = {}
TASK_CLASS_NAMES = set()
TASK_REGISTRY_TB = {}
TASK_CLASS_NAMES_TB = {}
def build_task(config):
"""Builds a ClassyTask from a config.
This assumes a 'name' key in the config which is used to determine what
task class to instantiate. For instance, a config `{"name": "my_task",
"foo": "bar"}` will find a class that was registered as "my_task"
(see :func:`register_task`) and call .from_config on it."""
task = TASK_REGISTRY[config["name"]].from_config(config)
return task
def register_task(name):
"""Registers a ClassyTask subclass.
This decorator allows Classy Vision to instantiate a subclass of ClassyTask
from a configuration file, even if the class itself is not part of the
Classy Vision framework. To use it, apply this decorator to a ClassyTask
subclass, like this:
.. code-block:: python
@register_task('my_task')
class MyTask(ClassyTask):
...
To instantiate a task from a configuration file, see :func:`build_task`."""
def register_task_cls(cls):
if name in TASK_REGISTRY:
msg = "Cannot register duplicate task ({}). Already registered at \n{}\n"
raise ValueError(msg.format(name, TASK_REGISTRY_TB[name]))
if not issubclass(cls, ClassyTask):
raise ValueError(
"Task ({}: {}) must extend ClassyTask".format(name, cls.__name__)
)
if cls.__name__ in TASK_CLASS_NAMES:
msg = (
"Cannot register task with duplicate class name({})."
+ "Previously registered at \n{}\n"
)
raise ValueError(
msg.format(cls.__name__, TASK_CLASS_NAMES_TB[cls.__name__])
)
tb = "".join(traceback.format_stack())
TASK_REGISTRY[name] = cls
TASK_CLASS_NAMES.add(cls.__name__)
TASK_REGISTRY_TB[name] = tb
TASK_CLASS_NAMES_TB[cls.__name__] = tb
return cls
return register_task_cls
from .biasamp_classification_task import BiasAmpClassificationTask # isort:skip
# from .fine_tuning_task import FineTuningTask # isort:skip
__all__ = [
"ClassyTask",
# "FineTuningTask",
"build_task",
"register_task",
"BiasAmpClassificationTask",
]
# automatically import any Python files in the tasks/ directory
import_all_modules(FILE_ROOT, "tasks")
|
cv_bias_amplification-main
|
my-project-release/my-project/tasks/__init__.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
from torchvision import datasets, transforms
import classy_vision.generic.util as util
import torchvision
import math
import numpy as np
import json
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms, ClassyTransform, register_transform
from PIL import Image
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar100_random_sample_train")
class CIFAR100RandomSampleTrain(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
dataset_size,
p,
seed,
class_mapping):
# Grab original dataset
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train='train')
# Instantiate storage for task images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
self.mapped_classes = [] # will become a list of mapped classes for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in range(0,len(class_mapping))} # key=task-class, value(to be)=original idx
# Store indices for task images
for i in range(len(dataset)):
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
# Shuffle task images for selecting subset
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_tasks_breakdown.items():
np.random.shuffle(valid_cifar_idx_tasks_breakdown[key])
class_size = int(np.rint(500*dataset_size))
# Collect task images and class mappings for CIFAR100 subset
for key, _ in valid_cifar_idx_tasks_breakdown.items():
self.valid_cifar_idx_tasks.extend(valid_cifar_idx_tasks_breakdown[key][0:class_size])
self.mapped_classes.extend([class_mapping[key]]*class_size)
# Assign attribute based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
for key, _ in valid_cifar_idx_tasks_breakdown.items():
hold = [1] * (int)(np.round(class_size * p[class_mapping[key]], 0)) + [0] * (int)(np.round(class_size * (1.0-p[class_mapping[key]]), 0))
np.random.shuffle(hold)
attr_breakdown[key] = hold
# Assign overlay image based on attribute-class assignment
self.valid_attrs = [None]*class_size*100 # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# this assumes that the dataset ordering does not change between iterations.
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = 'b' if attr else 'a'
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.mapped_classes)
assert num_samples == len(self.valid_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
img = sample[0]
mapped_label = self.mapped_classes[idx]
attribute = self.valid_attrs[idx]
# perform overlay transform
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
img = 255 - img if attribute == 'b' else img
sample[0] = Image.fromarray(img.astype(img_dtype))
sample.append(idx)
sample.append(mapped_label)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
dataset_size=config["dataset_size"],
p=config["p"],
seed=config["seed"],
class_mapping=config["class_mapping"]
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar100_random_sample.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from PIL import Image
import numpy as np
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import json
from classy_vision.dataset.transforms import build_transforms, ClassyTransform, register_transform
from classy_vision.dataset import build_dataset
import classy_vision.generic.util as util
from collections.abc import Iterable
import cv2
import random
import time
import logging
@register_transform("invert")
class Invert(ClassyTransform):
"""With probablity p_class, invert the image.
Args:
p (dict <int: float>): Probabilities for each class.
seed (int): Seed used for replication.
"""
def __init__(self, p, seed):
self.p = p
self.seed = seed
def __call__(self, sample):
"""
Args:
sample (tuple): Image to be altered and its class
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
if self.seed >= 0:
with util.torch_seed(self.seed + sample_id):
with util.numpy_seed(self.seed + sample_id):
attribute = 'b' if np.random.rand() < self.p[mapped_label] else 'a'
else:
attribute = 'b' if np.random.rand() < self.p[mapped_label] else 'a'
img = 255 - img if attribute == 'b' else img
img = img.astype(img_dtype)
return (Image.fromarray(img), original_label, sample_id, mapped_label, attribute)
def __repr__(self):
return self.__class__.__name__
@register_transform("invert_exact")
class InvertExact(ClassyTransform):
"""Invert the image according to the provided inversion list.
Args:
invert (list <int>): Whether or not the image at index i should be inverted
"""
def __init__(self, invert):
self.invert = invert
def __call__(self, sample):
"""
Args:
sample (tuple): Image to be altered and its class
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img = np.asarray(img)
img_dtype = img.dtype
attribute = 'b' if self.invert[sample_id] else 'a'
img = 255 - img if attribute == 'b' else img
img = img.astype(img_dtype)
return (Image.fromarray(img), original_label, sample_id, mapped_label, attribute)
def __repr__(self):
return self.__class__.__name__
@register_transform("assign_class")
class AssignClass(ClassyTransform):
"""Re-assign each image class to a given class.
Args:
classes (dict <int: int>): New class assignments, with current class:new class
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, sample):
"""
Args:
sample (tuple): Class to be altered and its image.
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
label = sample[1]
sample_id = sample[2]
return (img, label, sample_id, self.classes[label])
def __repr__(self):
return self.__class__.__name__
@register_transform("swap_task_attr")
class SwapTaskAttr(ClassyTransform):
"""Switch the task and attribute.
Converts the original attribute to a numeric form.
"""
def __call__(self, sample):
img = sample[0]
original_label = sample[1]
sample_id = sample[2]
mapped_label = sample[3]
attribute = sample[4]
return (img, original_label, sample_id, ord(attribute)-97, mapped_label)
def __repr__(self):
return self.__class__.__name__
@register_transform("assign_class_str")
class AssignClassStr(ClassyTransform):
"""Re-assign the image to a given class.
Args:
classes (dict <int: int>): New class assignments, with current class:new class
"""
def __init__(self, classes):
self.classes = classes
def __call__(self, sample):
"""
Args:
sample (tuple): Class to be altered and its image.
Returns:
tuple: (Altered image, class)
"""
img = sample[0]
label = sample[1]
sample_id = sample[2]
attribute = sample[3]
return (img, label, sample_id, attribute, self.classes[str(label)])
def __repr__(self):
return self.__class__.__name__
@register_transform("rand_assign_class_rand_invert")
class RandAssignClassRandInvert(ClassyTransform):
"""Helper function to make configs easier to write. Warning: Requires
dataset to be registered before transform is called. Requires dataset
to be cheap to do one pass over to create maps when transform is
created
Randomly assigns the original class targets to a new, smaller, set
of class targets. The original class set will be evenly divided
among the new classes Then inverts images with probability p based
on the probability map provided.
Args:
num_new_classes (int): New set of classes
invert_probs (array[float]): Inversion probability for each class
dataset_name (string): Already registered dataset for retrieving class info
exact (bool): Exact number of inversions (i.e. class_1: 0.5 => exactly half of of class_1 images will be inverted vs flipping a coin for each image)
assignment_seed (optional int): This is the seed used for the random generation ... must be same if you want the class mapping to match for test set
inversion_seed (optional int): This is the seed for actually inverting each image. If None, uses time.
"""
def __init__(self, num_new_classes, invert_probs, dataset_config, exact=True, assignment_seed=0, inversion_seed=0):
# assertions
assert len(invert_probs) == num_new_classes, "inversion probabilities must match the number of final classes"
assert assignment_seed is not None, "Assignment seed cannot be None otherwise it will be impossible to track the mapping"
for i in range(0, num_new_classes):
assert invert_probs[i] >= 0.0 and invert_probs[i] <= 1.0, "Valid probabilities must be provided"
if inversion_seed is None:
inversion_seed = int(time.time())
# For most datasets, only the name is required, we set batchsize, shuffle, transforms, num_workers
dataset_config["batchsize_per_replica"] = 1
dataset_config["shuffle"] = False
dataset_config["transforms"] = []
dataset_config["num_workers"] = 0
# Get target mapping directly from dataset
dataset = build_dataset(dataset_config)
index_to_class_mapping = {}
target_counts = {}
for i in range(0, len(dataset)):
sample = dataset[i]
index_to_class_mapping[i] = {"original_target": sample[1]}
if sample[1] not in target_counts:
target_counts[sample[1]] = 0
target_counts[sample[1]] += 1
target_list = list(target_counts.keys())
target_list.sort()
new_target_list = []
quotient = len(target_list) // num_new_classes
remainder = len(target_list) % num_new_classes
# Create correct number of new class instances
for i in range(0, num_new_classes):
num = quotient
if i < remainder:
num += 1
new_target_list += [i for j in range(0, num)]
with util.numpy_seed(assignment_seed):
np.random.shuffle(new_target_list)
class_mapping = dict(zip(target_list, new_target_list))
logging.info("Classy mapping: {}".format(str(class_mapping)))
self.random_assign = AssignClass(class_mapping)
# Now that we have our random assignment, need our exact list
inversion_counts = {}
for i in range(0, len(target_counts)):
if class_mapping[i] not in inversion_counts:
inversion_counts[class_mapping[i]] = 0
inversion_counts[class_mapping[i]] += target_counts[i]
target_to_inversion_lists = {}
target_to_inversion_iterators = []
for i in range(0, len(invert_probs)):
prob = invert_probs[i]
count = inversion_counts[i]
target_to_inversion_lists[i] = [0] * round(count * (1 - prob)) + [1] * round(count * prob)
with util.numpy_seed(inversion_seed):
np.random.shuffle(target_to_inversion_lists[i])
target_to_inversion_iterators.append(iter(target_to_inversion_lists[i]))
inversions = [None] * len(dataset)
for i in range(0, len(dataset)):
it = target_to_inversion_iterators[class_mapping[index_to_class_mapping[i]["original_target"]]]
inversions[i] = next(it)
logging.info("Inversions: {}".format(str(inversions)))
self.exact_invert = InvertExact(inversions)
def __call__(self, sample):
new_sample = self.random_assign(sample)
new_sample = self.exact_invert(new_sample)
return new_sample
def __repr__(self):
return self.__class__.__name__
@register_transform("PadToSize")
class PadToSize(ClassyTransform):
"""
Pad the input PIL Image so that it has the specified size. The image is returned
unchanged if at least one dimension of the original image is larger than the
corresponding dimension in the requested size.
Args:
size (sequence): Output size (height, width)
border_type (string): The type cv2 border type to use.
pad_both_sides (bool): True: add padding to both sides to keep the image
in the centre; False: add padding to the right and/or bottom.
"""
def __init__(
self,
size,
border_type="BORDER_CONSTANT",
pad_both_sides=True,
):
self.size = size
self.pad_both_sides = pad_both_sides
self.border_type = self._getBorderType(border_type)
assert (
isinstance(size, Iterable) and len(size) == 2
), "Got inappropriate size arg: {}. Expected a sequence (h, w)".format(
type(size)
)
def _pad(self, img: Image.Image) -> Image.Image:
padding = self._get_padding(img)
assert len(padding) == 2
padding_tlbr = self._get_padding_tlbr(padding)
if (
padding_tlbr[0] > 0
or padding_tlbr[1] > 0
or padding_tlbr[2] > 0
or padding_tlbr[3] > 0
):
padded = cv2.copyMakeBorder(
cv2.cvtColor(np.asarray(img), cv2.COLOR_RGB2BGR),
padding_tlbr[0],
padding_tlbr[2],
padding_tlbr[1],
padding_tlbr[3],
self.border_type,
value=[0, 0, 0], # black
)
result_img = Image.fromarray(cv2.cvtColor(padded, cv2.COLOR_BGR2RGB))
return result_img
def _getBorderType(self, border_type: str) -> int:
if border_type == "BORDER_CONSTANT":
return cv2.BORDER_CONSTANT
elif border_type == "BORDER_REFLECT":
return cv2.BORDER_REFLECT
elif border_type == "BORDER_REFLECT_101":
return cv2.BORDER_REFLECT_101
elif border_type == "BORDER_REPLICATE":
return cv2.BORDER_REPLICATE
elif border_type == "BORDER_WRAP":
return cv2.BORDER_WRAP
else:
assert f'unsupported border type "{border_type}"'
def _get_padding(self, img: Image.Image) -> Iterable:
img_width, img_height = img.size
return (self.size[0] - img_height, self.size[1] - img_width)
def _get_padding_tlbr(self, padding: Iterable) -> Iterable:
top_padding = padding[0] // 2 if self.pad_both_sides else 0
left_padding = padding[1] // 2 if self.pad_both_sides else 0
bottom_padding = padding[0] - top_padding
right_padding = padding[1] - left_padding
return [top_padding, left_padding, bottom_padding, right_padding]
def __call__(self, img: Image.Image) -> Image.Image:
"""
Args:
img (PIL Image): Image to be padded.
Returns:
PIL Image: Padded image with dimensions (h, w).
"""
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
img_width, img_height = img.size
if img_height > self.size[0] or img_width > self.size[1]:
return img
else:
return self._pad(img)
def __repr__(self):
return (
self.__class__.__name__
+ "(size={0}, border_type={1}, pad_both_sides={2})".format(
self.size,
self.border_type,
repr(self.pad_both_sides),
)
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/inversion_transforms.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import CIFAR100
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
@register_dataset("cifar100_train")
class CIFAR100Train(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train='train')
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
@register_dataset("cifar100_test")
class MyClassyDatasetCIFAR100Test(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.CIFAR100(root='./', download=True, train=False)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar100.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the datasets/ directory
import_all_modules(FILE_ROOT, "datasets")
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
@register_dataset("fashionmnist_train")
class FashionMNISTTrain(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.FashionMNIST(root='./', download=True, train='train')
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
@register_dataset("fashionmnist_test")
class FashionMNISTTest(ClassyDataset):
def __init__(self, batchsize_per_replica, shuffle, transform, num_samples):
dataset = torchvision.datasets.FashionMNIST(root='./', download=True, train=False)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.dataset
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[idx])
sample.append(idx)
sample = tuple(sample)
if self.transform is None:
return sample
return self.transform(sample)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=None,
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/fashionmnist.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from typing import Any, Callable, Dict, Optional, Union
from classy_vision.dataset import ClassyDataset, register_dataset
from classy_vision.dataset.core.random_image_datasets import (
RandomImageBinaryClassDataset,
SampleType,
)
from classy_vision.dataset.transforms import ClassyTransform, build_transforms
from torchvision.datasets import FashionMNIST
import torch.utils.data
import torch
import torchvision
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.dataset.transforms import build_transforms
import numpy as np
import classy_vision.generic.util as util
from PIL import Image
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar10_train_overlay")
class CIFAR10TrainOverlay(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
task_classes,
attr_classes,
eta,
epsilon,
seed):
# Set up necessary variables
assert len(task_classes) == 2 # assume class size = 2 for now
assert len(attr_classes) == 2 # assume class size = 2 for now
p = [np.round(0.5 + (epsilon * 0.01), 2), np.round(0.5 - (epsilon * 0.01), 2)]
self.eta = eta
# Grab original dataset
dataset = torchvision.datasets.CIFAR10(root='./', download=True, train='train')
# Instantiate storage for task and attribute images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in task_classes} # key=task-class, value(to be)=original idx
valid_cifar_idx_attrs_breakdown = {i:[] for i in attr_classes} # key=attr-class, value(to be)=original idx
# Store indices for task and attribute images
for i in range(len(dataset)):
if dataset[i][1] in task_classes:
self.valid_cifar_idx_tasks.append(i)
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
if dataset[i][1] in attr_classes:
valid_cifar_idx_attrs_breakdown[dataset[i][1]].append(i)
# Shuffle attribute images for random pairing
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_attrs_breakdown.items():
np.random.shuffle(valid_cifar_idx_attrs_breakdown[key])
# Assign attribute-class based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
for t, t_i in zip(task_classes, range(len(task_classes))):
hold = [attr_classes[0]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * p[t_i], 0)) + [attr_classes[1]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * (1.0-p[t_i]), 0))
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
np.random.shuffle(hold)
attr_breakdown[t] = hold
# Assign overlay image based on attribute-class assignment
self.valid_cifar_idx_attrs= [None]*num_samples # will become a list of original idxs for the attr-class subset, aligned with corresponding idxs in task_idx_list
self.valid_attrs = [None]*num_samples # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
attr_pointers = {attr:0 for attr in attr_classes} # used to parse self.attr_idx_subset for exact assignment
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# images at a given `idx` for both attr_idx_list and task_idx_list should be overlayed on each other.
# we use the pointers to ensure that a unique attr-class image is used for each task-class image.
# this assumes that the dataset ordering does not change between iterations.
self.valid_cifar_idx_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = valid_cifar_idx_attrs_breakdown[attr][attr_pointers[attr]]
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = attr
attr_pointers[attr] += 1
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.valid_cifar_idx_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
overlay_sample = list(self.dataset[self.valid_cifar_idx_attrs[idx]])
attribute = self.valid_attrs[idx]
sample.append(idx)
# perform overlay transform
img = sample[0]
overlay_img = overlay_sample[0]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
sample[0] = Image.blend(img, overlay_img, self.eta*0.01)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
task_classes=config["task_classes"],
attr_classes = config["attr_classes"],
eta=config["eta"],
epsilon=config["epsilon"],
seed=config["seed"]
)
# Handle dataset so that we only get a subset of images (`task_classes`).
# Perform overlay transform (`attr_classes`) for a specific proportion of images (`epsilon`) with a specific strength (`eta`).
@register_dataset("cifar10_test_overlay")
class CIFAR10TestOverlay(ClassyDataset):
def __init__(self,
batchsize_per_replica,
shuffle, transform,
num_samples,
task_classes,
attr_classes,
eta,
epsilon,
seed):
# Set up necessary variables
assert len(task_classes) == 2 # assume class size = 2 for now
assert len(attr_classes) == 2 # assume class size = 2 for now
p = [np.round(0.5 + (epsilon * 0.01), 2), np.round(0.5 - (epsilon * 0.01), 2)]
self.eta = eta
# Grab original dataset
dataset = torchvision.datasets.CIFAR10(root='./', download=True, train=False)
# Instantiate storage for task and attribute images
self.valid_cifar_idx_tasks = [] # will become a list of original idxs for the task-class subset.
valid_cifar_idx_tasks_breakdown = {i:[] for i in task_classes} # key=task-class, value(to be)=original idx
valid_cifar_idx_attrs_breakdown = {i:[] for i in attr_classes} # key=attr-class, value(to be)=original idx
# Store indices for task and attribute images
for i in range(len(dataset)):
if dataset[i][1] in task_classes:
self.valid_cifar_idx_tasks.append(i)
valid_cifar_idx_tasks_breakdown[dataset[i][1]].append(i)
if dataset[i][1] in attr_classes:
valid_cifar_idx_attrs_breakdown[dataset[i][1]].append(i)
# Shuffle attribute images for random pairing
with util.torch_seed(seed):
with util.numpy_seed(seed):
for key, _ in valid_cifar_idx_attrs_breakdown.items():
np.random.shuffle(valid_cifar_idx_attrs_breakdown[key])
# Assign attribute-class based on task-class probability
attr_breakdown = {} # key(to be)=task-class, value(to be)=attr-class
for t, t_i in zip(task_classes, range(len(task_classes))):
hold = [attr_classes[0]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * p[t_i], 0)) + [attr_classes[1]] * (int)(np.round(len(valid_cifar_idx_tasks_breakdown[t]) * (1.0-p[t_i]), 0))
with util.torch_seed(seed+1):
with util.numpy_seed(seed+1):
np.random.shuffle(hold)
attr_breakdown[t] = hold
# Assign overlay image based on attribute-class assignment
self.valid_cifar_idx_attrs= [None]*num_samples # will become a list of original idxs for the attr-class subset, aligned with corresponding idxs in task_idx_list
self.valid_attrs = [None]*num_samples # will become a list of attr-classes, aligned with corresponding idxs in task_idx_list
attr_pointers = {attr:0 for attr in attr_classes} # used to parse self.attr_idx_subset for exact assignment
for key, _ in valid_cifar_idx_tasks_breakdown.items():
for cifar_task_idx, attr in zip(valid_cifar_idx_tasks_breakdown[key], attr_breakdown[key]):
# images at a given `idx` for both attr_idx_list and task_idx_list should be overlayed on each other.
# we use the pointers to ensure that a unique attr-class image is used for each task-class image.
# this assumes that the dataset ordering does not change between iterations.
self.valid_cifar_idx_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = valid_cifar_idx_attrs_breakdown[attr][attr_pointers[attr]]
self.valid_attrs[self.valid_cifar_idx_tasks.index(cifar_task_idx)] = attr
attr_pointers[attr] += 1
# Confirm there are the right number of samples
assert num_samples == len(self.valid_cifar_idx_tasks)
assert num_samples == len(self.valid_cifar_idx_attrs)
super().__init__(dataset, batchsize_per_replica, shuffle, transform, num_samples)
def __getitem__(self, idx: int):
assert idx >= 0 and idx < len(
self.valid_cifar_idx_tasks
), "Provided idx ({}) is outside of dataset range".format(idx)
sample = list(self.dataset[self.valid_cifar_idx_tasks[idx]])
overlay_sample = list(self.dataset[self.valid_cifar_idx_attrs[idx]])
attribute = self.valid_attrs[idx]
sample.append(idx)
# perform overlay transform
img = sample[0]
overlay_img = overlay_sample[0]
assert isinstance(img, Image.Image), "img should be PIL Image. Got {}".format(
type(img)
)
sample[0] = Image.blend(img, overlay_img, self.eta*0.01)
sample.append(attribute)
sample = tuple(sample) # TODO: Update future transforms with this new ordering.
if self.transform is None:
return sample
return self.transform(sample)
def __len__(self):
return len(self.valid_cifar_idx_tasks)
@classmethod
def from_config(cls, config):
transform = build_transforms(config["transforms"])
return cls(
batchsize_per_replica=config["batchsize_per_replica"],
shuffle=config["shuffle"],
transform=transform,
num_samples=config["num_samples"],
task_classes=config["task_classes"],
attr_classes = config["attr_classes"],
eta=config["eta"],
epsilon=config["epsilon"],
seed=config["seed"]
)
|
cv_bias_amplification-main
|
my-project-release/my-project/datasets/cifar10_overlay.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from pathlib import Path
from classy_vision.generic.registry_utils import import_all_modules
FILE_ROOT = Path(__file__).parent
# Automatically import any Python files in the models/ directory
import_all_modules(FILE_ROOT, "models")
|
cv_bias_amplification-main
|
my-project-release/my-project/models/__init__.py
|
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torch import Tensor
import torch.nn as nn
from typing import Type, Any, Callable, Union, List, Optional
from torchvision.models.resnet import Bottleneck, BasicBlock, conv1x1
import torch.nn as nn
import torchvision.models as models
from classy_vision.models import ClassyModel, register_model
import math
@register_model("custom_resnet")
class CustomResNet(ClassyModel):
def __init__(
self,
channels: int,
num_classes: int,
layers: List[int],
block: Optional[Type[Union[BasicBlock, Bottleneck]]] = None,
zero_init_residual: bool = False,
groups: int = 1,
width_per_group: int = 64,
replace_stride_with_dilation: Optional[List[bool]] = None,
norm_layer: Optional[Callable[..., nn.Module]] = None
) -> None:
block = BasicBlock
self.inplanes = 64
super(CustomResNet, self).__init__()
self.conv1 = nn.Conv2d(channels, 64, kernel_size=3, stride=1, padding=1,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(256 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
@classmethod
def from_config(cls, config):
# This method takes a configuration dictionary
# and returns an instance of the class. In this case,
# we'll let the number of classes be configurable.
return cls(
channels=config["channels"],
num_classes=config["num_classes"],
layers=config["layers"]
)
def _make_layer(self, block: Type[Union[BasicBlock, Bottleneck]], planes: int, blocks: int,
stride: int = 1, dilate: bool = False) -> nn.Sequential:
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def _forward_impl(self, x: Tensor) -> Tensor:
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def forward(self, x: Tensor) -> Tensor:
return self._forward_impl(x)
|
cv_bias_amplification-main
|
my-project-release/my-project/models/custom_resnet.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
from models.custom_resnet import CustomResNet
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "fashionmnist" # used to store experiment configs and results
IDS_TEST = "./test_ids.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.2860],
"std": [0.3530]
}
}
TEST_DATASET = "fashionmnist_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1, 1, 0]
]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/fashionmnist/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
# Run from within the /scripts folder.
import json
import numpy as np
import pandas as pd
import classy_vision.generic.util as util
import random
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[1, 1, 0, 1, 0, 0, 0, 0, 1, 1],
[1, 1, 1, 1, 1, 0, 0, 0, 0, 0],
[1, 1, 0, 0, 1, 1, 0, 1, 0, 0],
[0, 1, 1, 0, 1, 0, 1, 0, 1, 0],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0],
[0, 0, 0, 0, 1, 1, 1, 1, 0, 1],
[1, 1, 0, 0, 0, 0, 1, 1, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 0, 1],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 0, 0, 1, 0, 0, 1, 1],
[1, 0, 1, 0, 1, 0, 0, 1, 0, 1],
[0, 1, 0, 0, 1, 0, 0, 1, 1, 1],
[1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 0, 1, 0, 1, 0],
[1, 0, 1, 0, 1, 1, 0, 0, 1, 0],
[1, 0, 0, 1, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 0, 1, 0, 0, 1, 1],
[0, 0, 1, 0, 1, 1, 1, 1, 0, 0],
[1, 1, 1, 0, 0, 1, 0, 0, 1, 0],
[1, 0, 0, 0, 0, 1, 1, 1, 1, 0]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 55, 5)
EXPERIMENT_NAME = "fashionmnist" # used to store experiment configs and results
IDS_TEST = "./test_ids.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 60_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['num_classes'] = num_classes
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/fashionmnist/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100_width" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_width/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_width" # used to store experiment configs and results
WIDTHS = [4, 8, 16, 32, 64]
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnet110_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
counter_ids = []
for width in WIDTHS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['init_planes'] = width
data['model']['heads'][0]['in_plane'] = 4 * width
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(width)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_width/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
if counter == 1001:
continue
if NUM_CHECKPOINTS == None:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
for checkpoint in NUM_CHECKPOINTS:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/model_phase-' + str(checkpoint) + '_end.torch'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = checkpoint
)
if (checkpoint + 2) % 100 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100/scripts/training_measurements_checkpoints.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100" # used to store experiment configs and results
DEPTHS = {110: [[18, 18, 18], "block2-17"],
92: [[15, 15, 15], "block2-14"],
74: [[12, 12, 12], "block2-11"],
56: [[9, 9, 9], "block2-8"],
50: [[8, 8, 8], "block2-7"],
44: [[7, 7, 7], "block2-6"],
38: [[6, 6, 6], "block2-5"],
32: [[5, 5, 5], "block2-4"],
32: [[5, 5, 5], "block2-4"],
26: [[4, 4, 4], "block2-3"],
20: [[3, 3, 3], "block2-2"],
14: [[2, 2, 2], "block2-1"],
8: [[1, 1, 1], "block2-0"],}
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnetx_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for depth in DEPTHS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['model']['num_blocks'] = DEPTHS[depth][0]
data['model']['heads'][0]['fork_block'] = DEPTHS[depth][1]
data['model']['num_classes'] = num_classes
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid CONFIG_PATH")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(depth)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar10_overlay" # used to store experiment configs and results
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 2_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"assign_class_str": {},
"normalize": {
"mean": [
0.4914,
0.4822,
0.4465],
"std": [
0.247,
0.243,
0.261]
}
}
TEST_DATASET = "cifar10_test_overlay"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [{'task': [5, 9], 'attr': [2, 4]},
{'task': [8, 7], 'attr': [3, 2]},
{'task': [7, 1], 'attr': [5, 2]},
{'task': [4, 0], 'attr': [9, 6]},
{'task': [2, 4], 'attr': [6, 3]},
{'task': [6, 8], 'attr': [7, 4]},
{'task': [8, 5], 'attr': [4, 1]},
{'task': [3, 4], 'attr': [5, 6]},
{'task': [1, 8], 'attr': [0, 2]},
{'task': [3, 5], 'attr': [2, 6]},
{'task': [5, 9], 'attr': [3, 4]},
{'task': [3, 7], 'attr': [8, 1]},
{'task': [0, 6], 'attr': [8, 1]},
{'task': [3, 1], 'attr': [0, 4]},
{'task': [6, 7], 'attr': [2, 5]},
{'task': [6, 9], 'attr': [2, 0]},
{'task': [5, 3], 'attr': [6, 7]},
{'task': [9, 2], 'attr': [1, 8]},
{'task': [3, 8], 'attr': [9, 2]},
{'task': [8, 0], 'attr': [4, 5]}]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
a = int(a)
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class_str" in TRANSFORMS:
transforms.append({"name": "assign_class_str", "classes": {str(class_assignment["task"][0]): 0, str(class_assignment["task"][1]): 1}})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": [
"input",
"original_target",
"sample_id",
"attribute",
"target"
]
},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8,
"num_samples": 2_000,
"task_classes": class_assignment["task"],
"attr_classes": class_assignment["attr"],
"eta": eta,
"epsilon": epsilon,
"seed": test_seed
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {class_assignment["attr"][0]: 0, class_assignment["attr"][1]: 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
etas.append(eta)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
etas = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
eta = row['eta']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
if NUM_CHECKPOINTS == None:
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar10_overlay/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [{'task': [5, 9], 'attr': [2, 4]},
{'task': [8, 7], 'attr': [3, 2]},
{'task': [7, 1], 'attr': [5, 2]},
{'task': [4, 0], 'attr': [9, 6]},
{'task': [2, 4], 'attr': [6, 3]},
{'task': [6, 8], 'attr': [7, 4]},
{'task': [8, 5], 'attr': [4, 1]},
{'task': [3, 4], 'attr': [5, 6]},
{'task': [1, 8], 'attr': [0, 2]},
{'task': [3, 5], 'attr': [2, 6]},
{'task': [5, 9], 'attr': [3, 4]},
{'task': [3, 7], 'attr': [8, 1]},
{'task': [0, 6], 'attr': [8, 1]},
{'task': [3, 1], 'attr': [0, 4]},
{'task': [6, 7], 'attr': [2, 5]},
{'task': [6, 9], 'attr': [2, 0]},
{'task': [5, 3], 'attr': [6, 7]},
{'task': [9, 2], 'attr': [1, 8]},
{'task': [3, 8], 'attr': [9, 2]},
{'task': [8, 0], 'attr': [4, 5]}]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(20, 50, 10)
ETAS = range(0, 101, 10)
EXPERIMENT_NAME = "cifar10_overlay" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar10.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar10.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar10_resnet110_gpu1_lrmultistep.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 2_000
NUM_SAMPLES_TRAIN = 10_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class_str": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
etas = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for eta in ETAS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
test_seed = get_test_seed(model_i)
for i in range(len(data['dataset']['train']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['train']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
for i in range(len(data['dataset']['test']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['test']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
data['model']['num_classes'] = num_classes
data['dataset']['train']['num_samples'] = 10_000
data['dataset']['train']['task_classes'] = class_assignment["task"]
data['dataset']['train']['attr_classes'] = class_assignment["attr"]
data['dataset']['train']['eta'] = eta
data['dataset']['train']['epsilon'] = epsilon
data['dataset']['train']['seed'] = train_seed
data['dataset']['test']['num_samples'] = 2_000
data['dataset']['test']['task_classes'] = class_assignment["task"]
data['dataset']['test']['attr_classes'] = class_assignment["attr"]
data['dataset']['test']['eta'] = eta
data['dataset']['test']['epsilon'] = epsilon
data['dataset']['test']['seed'] = test_seed
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
etas.append(eta)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"eta": etas,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar10_overlay/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar100_trainingsize" # used to store experiment configs and results
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_CHECKPOINTS = None
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, d_s, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
dataset_sizes.append(d_s)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
dataset_sizes = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
d_s = row['dataset_size']
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
d_s = d_s,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates,
"dataset_size": dataset_sizes
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates,
"dataset_size": dataset_sizes
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_trainingsize/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
DATASET_SIZES = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_trainingsize" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100subset_resnet110_gpu1_lrmultistep.json"
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
dataset_sizes = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
depths = []
counter_ids = []
for template in JSON_TEMPLATES:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for d_s in DATASET_SIZES:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, template, COUNTER)) as f:
data = json.load(f)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
test_seed = get_test_seed(model_i)
for i in range(len(data['dataset']['train']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['train']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
for i in range(len(data['dataset']['test']['transforms'])):
if "assign_class_str" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class_str':
data['dataset']['test']['transforms'][i]['classes'] = {class_assignment["task"][0]: 0, class_assignment["task"][1]: 1}
data['model']['num_classes'] = 2
data['num_epochs'] = (int)(np.rint(500 * (1/d_s)))
data['optimizer']['param_schedulers']['lr']['milestones'] = [(int)(np.rint(1*(1/d_s))), (int)(np.rint(250*(1/d_s))), (int)(np.rint(375*(1/d_s)))]
data['dataset']['train']['num_samples'] = (int)(np.rint(NUM_SAMPLES_TRAIN * d_s))
data['dataset']['train']['dataset_size'] = d_s
data['dataset']['train']['p'] = p
data['dataset']['train']['seed'] = train_seed
data['dataset']['train']['class_mapping'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
dataset_sizes.append(d_s)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(template)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"dataset_size": dataset_sizes,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_trainingsize/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
root = './'
EXPERIMENT_NAME = "cifar100_regularization" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
NUM_CHECKPOINTS = range(18, 999, 20)
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {"a": 0, "b": 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = None
)
if counter % 10 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_regularization/scripts/training_measurements.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import json
import numpy as np
import classy_vision.generic.util as util
import random
import pandas as pd
import os
CONFIG_PATH = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
CLASS_ASSIGNMENTS = [
[0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0],
[1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0],
[1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1],
[1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1],
[1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1],
[0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1],
[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0],
[0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0],
[0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0],
[1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0],
[1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1],
[0, 0, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0],
[0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
]
COUNTER = 1000 # used to name the experiments and associated config
EPSILONS = range(0, 60, 10)
EXPERIMENT_NAME = "cifar100_regularization" # used to store experiment configs and results
WEIGHT_DECAYS = np.logspace(-5, -2, 8)
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
IDS_TRAIN = "./train_ids_cifar100.txt" # used to designate the file of sample ids for each train image
INVERSION_CONFIGS = [{ # used to designate which classes should be inverted
"type": "fixed", # used to designate how inversion probabilities should be calculated
"assign": "custom", # should we use a "custom" class assignment with "custom_selection", or "shuffle" randomly with a fixed "num_classes"
"custom_selection": [0],
"num_inversion_classes": None,
"seed": None
}
]
JSON_BASE = None # used to calculate base json when JSON type is custom
JSON_TYPE = "template" # used to determine which base json to build the config from
JSON_TEMPLATES = [ # used to designate template config
"config_template_cifar100_resnet110_gpu1_lrmultistep.json",
]
NUM_MODELS = 1 # number of models per epsilon-class assignment combination
NUM_SAMPLES_TEST = 10_000
NUM_SAMPLES_TRAIN = 50_000
SEED_BASE = 0
SEED_TEST = 100_000 # seed, or "None" to indicate get_test_seed() should be used
TRANSFORMS = { # used to designate which transforms to apply or change
"assign_class": None,
"invert_exact": None
}
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def get_train_seed(model_i):
return ((model_i + SEED_BASE + 2) * 100_000)
def get_test_seed(model_i):
return SEED_TEST if SEED_TEST else model_i
def get_base_json(json_type, template, counter=None):
if json_type == "template":
return template
if json_type == "custom":
return '../configs/' + str(EXPERIMENT_NAME) + '/config_' + str(EXPERIMENT_NAME) + "_bias_test_" + str(counter + JSON_BASE) + '.json'
config_paths = []
output_directories = []
names = []
epsilons = []
train_seeds = []
test_seeds = []
json_templates = []
class_assignment_mappings = []
probabilities = []
n_classes= []
counter_ids = []
for weight_decay in WEIGHT_DECAYS:
for class_assignment in CLASS_ASSIGNMENTS:
for epsilon in EPSILONS:
for inversion_config in INVERSION_CONFIGS:
num_classes = len(set(class_assignment))
p = get_p(epsilon, num_classes, inversion_config)
for model_i in range(0, NUM_MODELS):
with open(get_base_json(JSON_TYPE, JSON_TEMPLATES[0], COUNTER)) as f:
data = json.load(f)
invert_exact_index_train = -1
invert_exact_index_test = -1
for i in range(len(data['dataset']['train']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TRAIN % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TRAIN / len(class_assignment)
train_seed = get_train_seed(CLASS_ASSIGNMENTS.index(class_assignment))
with util.torch_seed(train_seed):
with util.numpy_seed(train_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TRAIN, 'r') as f:
train_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TRAIN
for k in range(0, NUM_SAMPLES_TRAIN):
invert_mapped[k] = invert_original[int((train_ids[str(k)]['class'] * class_size) + train_ids[str(k)]['index'])]
data['dataset']['train']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['train']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_train = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['train']['transforms'][i]['name'] == 'assign_class':
data['dataset']['train']['transforms'][i]['classes'] = class_assignment
for i in range(len(data['dataset']['test']['transforms'])):
if "invert_exact" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
test_seed = get_test_seed(model_i)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
data['dataset']['test']['transforms'][i]['invert'] = invert_mapped
if data['dataset']['test']['transforms'][i]['name'] == 'invert_exact':
invert_exact_index_test = i
if "assign_class" in TRANSFORMS.keys() and data['dataset']['test']['transforms'][i]['name'] == 'assign_class':
data['dataset']['test']['transforms'][i]['classes'] = class_assignment
data['optimizer']['weight_decay'] = weight_decay
filename = 'config_' + EXPERIMENT_NAME + '_bias_test_' + str(COUNTER) +'.json'
model_folder_path = CONFIG_PATH + '/models/'
if (not os.path.exists(CONFIG_PATH)):
raise Exception(CONFIG_PATH + " not a valid config path")
if (not os.path.exists(model_folder_path)):
os.mkdir(model_folder_path)
config_path = model_folder_path + filename
with open(config_path, 'w') as out:
json.dump(data, out, indent=4)
COUNTER += 1
config_paths.append(config_path)
output_directories.append(EXPERIMENT_NAME)
names.append(filename)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
json_templates.append(weight_decay)
class_assignment_mappings.append(CLASS_ASSIGNMENTS.index(class_assignment))
probabilities.append(str(p).replace(',', ''))
n_classes.append(num_classes)
counter_ids.append(COUNTER)
data = {
"config_path": config_paths,
"output_dir": output_directories,
"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"json_template": json_templates,
"class_assignment_mapping": class_assignment_mappings,
"probabilities": probabilities,
"n_classes": n_classes,
"counter_id": counter_ids
}
df = pd.DataFrame.from_dict(data)
df.to_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_regularization/scripts/generate_experiment_configs.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from copy import error
import torch
import torch.utils.data
import ast
import itertools
import json
import numpy as np
import pandas as pd
import random
import sys
from classy_vision.dataset import ClassyDataset, build_dataset, register_dataset
from classy_vision.generic.util import load_checkpoint
import classy_vision.generic.util as util
from classy_vision.models import ClassyModel
from PIL import Image
sys.path.append('../../..')
from datasets.inversion_transforms import AssignClass, Invert
root = './'
#### TODO: Change according to your directory structure.
CHECKPOINT_PATH = ""
####
EXPERIMENT_NAME = "cifar100_swapped" # used to store experiment configs and results
IDS_TEST = "./test_ids_cifar100.txt" # used to designate file of sample ids for each test image
NBINS = 15 # used to designate the number of bins for overconfidence measures
NUM_SAMPLES_TEST = 10_000
TRANSFORMS ={ # used to designate which transforms to apply or change
"invert_exact": {},
"assign_class": {},
"swap_task_attr": {},
"normalize": {
"mean": [0.5071, 0.4867, 0.4408],
"std": [0.2675, 0.2565, 0.2761]
}
}
TEST_DATASET = "cifar100_test"
VISUALIZE = False # designate if plots should be shown
CLASS_ASSIGNMENTS = [
[1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1],
[0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0],
[0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0],
[1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1],
[1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1],
[1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0],
[1, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1],
[1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1],
[1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1],
[1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0],
[1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 1],
[1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1],
[1, 1, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1],
[0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1],
[1, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1],
[1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1],
[1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 1],
[0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 1, 0, 0],
[1, 0, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1],
[1, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0],
[1, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 1, 0, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 0],
[0, 1, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 1, 1, 1, 0, 1, 0, 0, 1, 1, 1, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 0, 0],
[0, 1, 1, 0, 1, 1, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 0, 1, 0, 1, 1, 0, 1, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1, 0, 1, 0, 1, 1, 1, 0, 0, 1],
[0, 1, 0, 1, 0, 0, 0, 0, 0, 1, 0, 1, 0, 0, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 0, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1, 1, 1, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 1, 0, 1, 1, 0, 1, 1, 1, 0, 0, 1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 1, 1, 1, 1, 0, 0, 0, 1, 0, 1, 1, 0, 0, 1, 0, 0]]
NUM_CHECKPOINTS = 50
sm = torch.nn.Softmax()
def get_model_results(model, dataset, attributes, tasks):
"""Get model results needed for directional bias amplification measurements.
Args:
dataset (ClassyDataset): ClassyDataset, generally of test data
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (List x List): nested list of size attributes x tasks with number of instances with that
input attribute and predicted task
"""
predictions = []
attributes_in = []
targets = []
percents = []
model.eval()
for k in dataset.iterator():
attributes_in.append(k['attribute'])
targets.append(k['target'])
result = model(k['input']).data
predictions.append(result.numpy().argmax(axis=1))
percents.append(sm(result))
flat_attributes = [item for sublist in attributes_in for item in sublist]
flat_targets = [item for sublist in targets for item in sublist]
flat_predictions = [item for sublist in predictions for item in sublist]
flat_percents = [float(max(item)) for sublist in percents for item in sublist]
results_in = np.zeros((len(attributes), len(tasks)))
results_pred = np.zeros((len(attributes), len(tasks)))
results_correct = np.zeros((len(attributes), len(tasks)))
total = 0
correct = 0
for a, t, p in zip(flat_attributes, flat_targets, flat_predictions):
a = int(a)
t = int(t)
results_in[attributes[a]][t] = results_in[attributes[a]][t] + 1
results_pred[attributes[a]][p] = results_pred[attributes[a]][p] + 1
results_correct[attributes[a]][t] += 1 if t == p else 0
total += 1
correct = correct + 1 if t == p else correct
# TESTING return flat_attributes, flat_predictions, flat_targets
return results_in, results_pred, correct * 1.0 / total, np.array(flat_predictions), np.array(flat_targets), np.array(flat_percents), results_correct / results_in
def format_results(results, attributes, tasks):
"""Format results for readability.
Args:
results (List): nested list of size attributes x tasks with number of instances with that
attribute and task
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
dict<(attribute, task), value> : dict with number of instances with that
attribute and task
"""
return {key: results[attributes[key[0]]][key[1]] for key in list(itertools.product(attributes, tasks))}
def calc_bias_amp_at(res, res_h, attributes, tasks):
"""Perform directional bias amplification a->t as defined in https://arxiv.org/pdf/2102.12594.pdf.
1. Instantiate `results`, which will store the values defined in the paper's
summation expression.
Looping over all attributes and tasks...
2. Generate probabilities needed for y_at, delta_at calculations defined in the paper.
p_attr = P(Attribute_a = 1)
p_attr_h = P(Attribute_a-hat = 1)
p_task = P(Task_t = 1)
p_task_h = P(Task_t-hat = 1)
p_attr_task = P(Attribute_a = 1, Task_t = 1)
p_attr_h_task_h = P(Attribute_a-hat = 1, Task_t = 1)
p_task_h_cond_attr_h = P(Task_t-hat = 1 | Attribute_a-hat = 1)
p_task_cond_attr = P(Task_t = 1 | Attribute_a = 1)
3. Calculate y_at, delta_at, and expression inside of summation, and save to `results`.
4. Perform summation and BiasAmp_at calculation.
Args:
res (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the training input or test input
resh (List x List): nested list of size attributes x tasks with number of instances that have
attribute and task, generally the test output
attributes (List): list of distinct attributes
tasks (List): list of distinct tasks
Returns:
results (dict<(attribute, task), value>): dict with value inside equation summation with the
attribute and task
bias_amp_at (float): directional bias amplification a->t metric
"""
results = {key: 0 for key in list(itertools.product(attributes, tasks))}
for key, value in results.items():
attr = key[0]
task = key[1]
p_attr = np.sum(res[attributes[attr]]) / np.sum(np.matrix(res))
p_attr_h = np.sum(res_h[attributes[attr]]) / np.sum(np.matrix(res_h))
p_task = np.sum(res, axis=0)[task] / np.sum(np.matrix(res))
p_task_h = np.sum(res_h, axis=0)[task] / np.sum(np.matrix(res_h))
p_attr_task = res[attributes[attr]][task] / np.sum(np.matrix(res))
p_attr_h_task_h = res_h[attributes[attr]][task] / np.sum(np.matrix(res_h))
p_task_h_cond_attr_h = p_attr_h_task_h / p_attr_h
p_task_cond_attr = p_attr_task / p_attr
y_at = p_attr_task > (p_attr * p_task)
delta_at = p_task_h_cond_attr_h - p_task_cond_attr
print(str(key)+".... y_at: " + str(y_at) + ", delta_at: " + str(delta_at))
results[key] = (y_at * delta_at) + ((1 - y_at) * (-1 * delta_at))
bias_amp_at = (1 / (len(attributes) * len(tasks))) * sum(results.values())
return results, bias_amp_at
def get_binned_metrics(percents, predictions, targets, nbins):
acc_bins = []
conf_bins = []
count_bins = []
assert 0 not in percents
for i in range(0, nbins):
filter = np.where((percents > (i)/nbins) & (percents <= (i+1)/nbins))
perc = percents[filter]
pred = predictions[filter]
targ = targets[filter]
acc = sum(pred==targ)/len(pred) if len(pred) != 0 else np.nan
conf = sum(perc)/len(perc) if len(perc) != 0 else np.nan
acc_bins.append(acc)
conf_bins.append(conf)
count_bins.append(len(pred))
return acc_bins, conf_bins, count_bins
def get_ece(acc_bins, conf_bins, count_bins, nbins):
ece = 0
for i in range(0, nbins):
ece += (count_bins[i] / sum(count_bins)) * abs(acc_bins[i] - conf_bins[i]) if acc_bins[i] is not np.nan else 0
return ece
def get_mce(acc_bins, conf_bins, nbins):
mce = 0.0
for i in range(0, nbins):
mce = np.maximum(mce, abs(acc_bins[i] - conf_bins[i])) if acc_bins[i] is not np.nan else mce
return mce
def get_p(epsilon, num_classes, inversion_config):
p = []
if inversion_config['type'] == "fixed":
class_list = []
if inversion_config['assign'] == "custom":
class_list = inversion_config['custom_selection']
elif inversion_config['assign'] == "shuffle":
random.seed(inversion_config['seed'])
class_list = random.sample(range(num_classes), inversion_config['num_inversion_classes'])
for i in range(num_classes):
if i in class_list:
p.append(np.round(0.5 + (epsilon * 0.01), 2))
else:
p.append(np.round(0.5 - (epsilon * 0.01), 2))
elif inversion_config['type'] == "evenly_spaced":
for i in range(num_classes):
min = np.round(0.5 - (epsilon * 0.01), 2)
p.append(np.round(min + (i * (2 * epsilon * 0.01) / (num_classes - 1)), 2))
return p
def run_measurements(name, json_template, class_assignment, epsilon, num_classes, p, train_seed, test_seed, checkpoint_dir, checkpoint):
print("\nRunning \"" + checkpoint_dir + "\" .")
transforms = []
if "assign_class" in TRANSFORMS:
transforms.append({"name": "assign_class", "classes": class_assignment})
if "invert" in TRANSFORMS:
transforms.append({"name": "invert", "p": list(p), "seed": SEED_TEST})
if "invert_exact" in TRANSFORMS:
invert_original = []
assert NUM_SAMPLES_TEST % len(p) == 0, "Dataset not evenly divisible by number of classes."
class_size = NUM_SAMPLES_TEST / len(class_assignment)
with util.torch_seed(test_seed):
with util.numpy_seed(test_seed):
for c in class_assignment:
hold = [1] * (int)(np.round(class_size * p[c], 0)) + [0] * (int)(np.round(class_size * (1-p[c]), 0))
np.random.shuffle(hold)
invert_original.extend(hold)
with open(IDS_TEST, 'r') as f:
test_ids = json.load(f)
invert_mapped = [0] * NUM_SAMPLES_TEST
for k in range(0, NUM_SAMPLES_TEST):
invert_mapped[k] = invert_original[int((test_ids[str(k)]['class'] * class_size) + test_ids[str(k)]['index'])]
transforms.append({"name": "invert_exact", "invert": invert_mapped})
if "swap_binary_task" in TRANSFORMS:
transforms.append({"name": "swap_binary_task"})
if "swap_task_attr" in TRANSFORMS:
transforms.append({"name": "swap_task_attr"})
key_transforms = [{"name": "ToTensor"}]
if "normalize" in TRANSFORMS:
key_transforms.append({
"name": "Normalize",
"mean": TRANSFORMS['normalize']['mean'],
"std": TRANSFORMS['normalize']['std'],
})
transforms.extend([
{"name": "tuple_to_map", "list_of_map_keys": ["input", "original_target", "sample_id", "target", "attribute"]},
{
"name": "apply_transform_to_key",
"transforms": key_transforms,
"key": "input"
}
])
test_dataset_config = {
"name": TEST_DATASET,
"batchsize_per_replica": 128,
"shuffle": True,
"transforms": transforms,
"num_workers": 8
}
my_dataset_test = build_dataset(test_dataset_config)
checkpoint_data = load_checkpoint(checkpoint_dir)
model = ClassyModel.from_checkpoint(checkpoint_data)
attributes = {0: 0, 1: 1}
tasks = [i for i in range(num_classes)]
test_results_in, test_results_pred, acc, predictions, targets, percents, accuracy_breakdown = get_model_results(model, my_dataset_test, attributes, tasks)
accuracy_breakdown = format_results(accuracy_breakdown, attributes, tasks)
accuracy_breakdown = {str(k): v for k, v in accuracy_breakdown.items()}
biasamp_breakdown, biasamp = calc_bias_amp_at(test_results_in, test_results_pred, attributes, tasks)
biasamp_breakdown = {str(k): v for k, v in biasamp_breakdown.items()}
acc_bins, conf_bins, count_bins = get_binned_metrics(percents, predictions, targets, NBINS)
print(acc)
print(format_results(test_results_in, attributes, tasks))
print(format_results(test_results_pred, attributes, tasks))
print(biasamp_breakdown)
names.append(name)
epsilons.append(epsilon)
train_seeds.append(train_seed)
test_seeds.append(test_seed)
checkpoints.append(checkpoint)
accuracies.append(acc)
biasamps.append(biasamp)
test_predictions.append(format_results(test_results_pred, attributes, tasks))
acc_bins_list.append(acc_bins)
conf_bins_list.append(conf_bins)
count_bins_list.append(count_bins)
eces.append(get_ece(acc_bins, conf_bins, count_bins, NBINS))
mces.append(get_mce(acc_bins, conf_bins, NBINS))
accuracy_breakdowns.append(accuracy_breakdown)
biasamp_breakdowns.append(biasamp_breakdown)
json_templates.append(json_template)
# Set up and run measurements
names = []
epsilons = []
train_seeds = []
test_seeds = []
checkpoints = []
accuracies = []
biasamps = []
test_predictions = []
acc_bins_list = []
count_bins_list = []
conf_bins_list = []
eces = []
mces = []
accuracy_breakdowns = []
biasamp_breakdowns = []
json_templates = []
try:
original_results = pd.read_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index_col=False)
except:
original_results = None
manifest_df = pd.read_csv('../../' + EXPERIMENT_NAME + '/scripts/manifest.txt')
for _, row in manifest_df.iterrows():
json_template = row['json_template']
class_assignment = CLASS_ASSIGNMENTS[row['class_assignment_mapping']]
epsilon = row['epsilon']
num_classes = row['n_classes']
p = [float(x) for x in row['probabilities'][1:-1].split(' ')]
train_seed = row['train_seed']
test_seed = row['test_seed']
counter = row['counter_id']
name = row['name']
for checkpoint in range(18, 999, 20):
checkpoint_dir = CHECKPOINT_PATH + name[:-5] + '/checkpoints/model_phase-' + str(checkpoint) + '_end.torch'
run_measurements(
name = name,
json_template = json_template,
class_assignment = class_assignment,
epsilon = epsilon,
num_classes = num_classes,
p = p,
train_seed = train_seed,
test_seed = test_seed,
checkpoint_dir = checkpoint_dir,
checkpoint = checkpoint
)
if (checkpoint + 2) % 100 == 0:
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
data = {"name": names,
"epsilon": epsilons,
"train_seed": train_seeds,
"test_seed": test_seeds,
"checkpoint_number": checkpoints,
"acc@1": accuracies,
"biasamp": biasamps,
"bins": NBINS,
"acc_bins": acc_bins_list,
"conf_bins": conf_bins_list,
"count_bins": count_bins_list,
"ece": eces,
"mce": mces,
"accuracy_breakdown": accuracy_breakdowns,
"biasamp_breakdown": biasamp_breakdowns,
"json_templates": json_templates
}
df = pd.DataFrame.from_dict(data)
if original_results is not None:
combined_df = pd.concat([original_results, df])
combined_df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
else:
df.to_csv('../../' + EXPERIMENT_NAME + '/results_overconf_checkpoints.csv', index=False)
|
cv_bias_amplification-main
|
my-project-release/my-project/configs/cifar100_swapped/scripts/training_measurements_checkpoints.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.