python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class RandomResizedCrop(UnagiTransform):
def __init__(
self,
size,
name=None,
prob=1.0,
level=0,
scale=(0.08, 1.0),
ratio=(0.75, 1.333_333_333_333_333_3),
interpolation=transforms.InterpolationMode.BILINEAR,
):
self.size = size
self.scale = scale
self.ratio = ratio
self.interpolation = interpolation
self.transform_func = transforms.RandomResizedCrop(
self.size, self.scale, self.ratio, self.interpolation
)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"size={self.size}, scale={self.scale}, ratio={self.ratio}, "
f"interpolation={self.interpolation}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/random_resize_crop.py
|
import random
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class ShearX(UnagiTransform):
value_range = (0.0, 0.3)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
if random.random() > 0.5:
degree = -degree
return (
pil_img.transform(pil_img.size, Image.AFFINE, (1, degree, 0, 0, 1, 0)),
label,
)
|
thanos-code-main
|
unagi/data/transforms/image/shear_x.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class RandomCrop(UnagiTransform):
def __init__(
self,
size,
padding=None,
pad_if_needed=False,
fill=0,
padding_mode="constant",
name=None,
prob=1.0,
level=0,
):
self.size = size
self.padding = padding
self.pad_if_needed = pad_if_needed
self.fill = fill
self.padding_mode = padding_mode
self.transform_func = transforms.RandomCrop(
self.size, self.padding, self.pad_if_needed, self.fill, self.padding_mode
)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"size={self.size}, padding={self.padding}, "
f"pad_if_needed={self.pad_if_needed}, fill={self.fill}, "
f"mode={self.padding_mode}>"
)
|
thanos-code-main
|
unagi/data/transforms/image/random_crop.py
|
from PIL import ImageEnhance
from unagi.data.transforms.image.transform import UnagiTransform
from unagi.data.transforms.image.utils import categorize_value
class Contrast(UnagiTransform):
value_range = (0.1, 1.9)
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
degree = categorize_value(self.level, self.value_range, "float")
return ImageEnhance.Contrast(pil_img).enhance(degree), label
|
thanos-code-main
|
unagi/data/transforms/image/contrast.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class ToTensor(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return transforms.ToTensor()(pil_img), label
|
thanos-code-main
|
unagi/data/transforms/image/to_tensor.py
|
from torchvision import transforms as transforms
from unagi.data.transforms.image.transform import UnagiTransform
class RandomHorizontalFlip(UnagiTransform):
def __init__(self, p=0.5, name=None, prob=1.0, level=0):
self.p = p
self.transform_func = transforms.RandomHorizontalFlip(p)
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return self.transform_func(pil_img), label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"p={self.p}"
)
|
thanos-code-main
|
unagi/data/transforms/image/random_horizontal_flip.py
|
from PIL import Image
from unagi.data.transforms.image.transform import UnagiTransform
class VerticalFlip(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img.transpose(Image.FLIP_TOP_BOTTOM), label
|
thanos-code-main
|
unagi/data/transforms/image/vertical_flip.py
|
from unagi.data.transforms.task.transform import (
GroupTransform,
IdentityTransform,
MaskGen,
TupleTransform,
)
ALL_TRANSFORMS = {
"Contrastive": GroupTransform,
"MaskGenerator": MaskGen,
"Mask": TupleTransform,
"Identity": IdentityTransform,
}
|
thanos-code-main
|
unagi/data/transforms/task/__init__.py
|
import torch
from unagi.data.transforms.image.compose import Compose
class IdentityTransform:
def __init__(self):
pass
def __call__(self, x, label):
return x, label
class GroupTransform:
def __init__(self, transform, views=2):
self.t = transform
self.views = views
self.squeeze = False
def __call__(self, x, label, **kwargs):
grouped_contrastive_transforms_lst, label_lst = zip(
*[self.t(x, label, **kwargs) for i in range(self.views)]
)
grouped_contrastive_transforms = torch.stack(grouped_contrastive_transforms_lst)
if label is not None:
label = torch.stack(label_lst, dim=1)
return grouped_contrastive_transforms, label
class MaskGen:
def __init__(self, views, mask_length, mask_prob=0.05):
self.mask_length = mask_length
self.views = views
self.mask_prob = mask_prob
def __call__(self, x, label, **kwargs):
return torch.rand(self.views, self.mask_length) < self.mask_prob
class TupleTransform:
def __init__(self, *args):
self.fs = args
def __call__(self, x, label, **kwargs):
input = [
f(x, label, **kwargs)[0]
if isinstance(f, Compose)
else f(x, label, **kwargs)
for f in self.fs
]
for f in self.fs:
if isinstance(f, Compose):
label = f(x, label, **kwargs)[1]
return input, label
# return tuple([f(x, label)[0] for f in self.fs])
|
thanos-code-main
|
unagi/data/transforms/task/transform.py
|
class Compose(object):
"""Composes several transforms together.
Originally from:
https://pytorch.org/docs/stable/_modules/torchvision/transforms/transforms.html#Compose
Args:
transforms (list of ``Transform`` objects): list of transforms to compose.
"""
def __init__(self, transforms):
self.transforms = transforms
def __call__(self, text, label, **kwargs):
for idx, t in enumerate(self.transforms):
kwargs["idx"] = idx
text, label = t(text, label, **kwargs)
return text, label
def __repr__(self):
format_string = self.__class__.__name__ + "("
for t in self.transforms:
format_string += "\n"
format_string += " {0}".format(t)
format_string += "\n)"
return format_string
|
thanos-code-main
|
unagi/data/transforms/text/compose.py
|
from unagi.data.transforms.text.back_translate import BackTranslate
from unagi.data.transforms.text.identity import Identity
from unagi.data.transforms.text.pretrained_lm_tokenize import PretrainedLMTokenize
ALL_TRANSFORMS = {
"PretrainedLMTokenize": PretrainedLMTokenize,
"BackTranslate": BackTranslate,
"Identity": Identity,
}
|
thanos-code-main
|
unagi/data/transforms/text/__init__.py
|
import torch
from transformers import AutoTokenizer
from unagi.data.transforms.text.transform import UnagiTransform
class PretrainedLMTokenize(UnagiTransform):
def __init__(
self,
name=None,
prob=1.0,
level=0,
model="bert-base-uncased",
padding="max_length",
truncation=True,
max_length=128,
):
super().__init__(name, prob, level)
self.tokenizer = AutoTokenizer.from_pretrained(model)
self.padding = padding
self.truncation = truncation
self.max_length = max_length
def transform(self, text, label, **kwargs):
if isinstance(text, str):
tokens = torch.LongTensor(
self.tokenizer(
text,
padding=self.padding,
truncation=self.truncation,
max_length=self.max_length,
)["input_ids"]
)
else:
tokens = text
return tokens, label
|
thanos-code-main
|
unagi/data/transforms/text/pretrained_lm_tokenize.py
|
import random
PRECISION = 3
class UnagiTransform(object):
"""Base UnagiTransform transfrom class.
Args:
name(str): Transformation name.
prob(float): Transformation probability.
level(int): Transformation level.
"""
def __init__(self, name=None, prob=1.0, level=0):
self.name = name if name is not None else type(self).__name__
self.prob = prob
assert 0 <= level <= 1.0, "Invalid level, level must be in [0, 1.0]."
self.level = level
def transform(self, text, label, **kwargs):
return text, label
def __call__(self, text, label, **kwargs):
if random.random() <= self.get_prob():
return self.transform(text, label, **kwargs)
else:
return text, label
def __repr__(self):
return f"<Transform ({self.name}), prob={self.prob}, level={self.level}>"
def get_prob(self):
if self.prob == 1:
return self.prob
return random.random()
def get_level(self):
return random.randint(0, 10 ** PRECISION) / float(10 ** PRECISION)
|
thanos-code-main
|
unagi/data/transforms/text/transform.py
|
import math
import os
import pickle
from zipfile import ZipFile
import numpy as np
from unagi.data.transforms.text.transform import UnagiTransform
class BackTranslate(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0, select_prob=0.5):
super().__init__(name, prob, level)
self.select_prob = select_prob
# Check data exists or not.
if not os.path.exists("data/seq2seq.pkl"):
with ZipFile("data/seq2seq.pkl.zip", "r") as zip:
zip.extractall("data/")
data = open("data/seq2seq.pkl", "rb")
self.seq2seq = pickle.load(data)
def transform(self, text, label, **kwargs):
ori_text = " ".join(text).strip()
num_sents = len(self.seq2seq[ori_text][0])
select_prob = np.random.random(size=(num_sents,))
new_sents = [
self.seq2seq[ori_text][0][i]
if select_prob[i] > self.select_prob
else self.seq2seq[ori_text][1][i]
for i in range(num_sents)
]
new_text = " ".join(new_sents).strip()
return (self.replace_with_length_check(ori_text, new_text).split(" "), label)
def replace_with_length_check(
self, ori_text, new_text, use_min_length=10, use_max_length_diff_ratio=0.5
):
"""Use new_text if the text length satisfies several constraints."""
if len(ori_text) < use_min_length or len(new_text) < use_min_length:
return ori_text
length_diff_ratio = 1.0 * (len(new_text) - len(ori_text)) / len(ori_text)
if math.fabs(length_diff_ratio) > use_max_length_diff_ratio:
return ori_text
return new_text
|
thanos-code-main
|
unagi/data/transforms/text/back_translate.py
|
from unagi.data.transforms.image.transform import UnagiTransform
class Identity(UnagiTransform):
def __init__(self, name=None, prob=1.0, level=0):
super().__init__(name, prob, level)
def transform(self, pil_img, label, **kwargs):
return pil_img, label
|
thanos-code-main
|
unagi/data/transforms/text/identity.py
|
import random
from typing import Tuple
import numpy as np
import torch
from unagi.data.transforms.image.transform import UnagiTransform
class Mixup(UnagiTransform):
def __init__(
self,
name=None,
prob=1.0,
level=0,
alpha=1.0,
same_class_ratio=-1.0,
prob_label=False,
):
self.alpha = alpha
self.same_class_ratio = same_class_ratio
self.prob_label = prob_label
super().__init__(name, prob, level)
def transform(self, pil_img, label, dp_x, dp_y):
"""
Note: for Mixup apply all transforms (including converting to Tesnsor)
before applying mixup
pil_img (Tensor)
dp_x: input column
dp_y: label column
"""
if random.random() < self.prob:
if self.alpha > 0.0:
mix_ratio = np.random.beta(self.alpha, self.alpha)
else:
mix_ratio = 1.0
idx = np.random.randint(len(dp_x))
tot_cnt = len(dp_x)
if self.same_class_ratio >= 0: # get idx
same_class = (
True if np.random.rand() <= self.same_class_ratio else False
)
for i in np.random.permutation(tot_cnt):
if same_class == torch.equal(dp_y["labels"][i], label):
idx = i
break
cand_img = dp_x[idx]
cand_label = dp_y[idx]
# Calc all transforms before mixup
if isinstance(cand_img, Tuple):
cand_img = cand_img[0]
if isinstance(pil_img, Tuple):
cand_img = pil_img[0]
mixup_img = mix_ratio * pil_img + (1 - mix_ratio) * cand_img
if label is not None:
if self.prob_label:
mixup_label = mix_ratio * label + (1 - mix_ratio) * cand_label
else:
mixup_label = (
label if np.random.random() < mix_ratio else cand_label
)
else:
mixup_label = label
return mixup_img, mixup_label
else:
return pil_img, label
def __repr__(self):
return (
f"<Transform ({self.name}), prob={self.prob}, level={self.level}, "
f"alpha={self.alpha}, same_class_ratio={self.same_class_ratio}, "
f"prob_label={self.prob_label}>"
)
|
thanos-code-main
|
unagi/data/augmentations/mixup.py
|
import random
from unagi.data.transforms.image.cutout import Cutout as CutoutTransform
from unagi.data.transforms.image.transform import UnagiTransform
class Cutout(UnagiTransform):
def __init__(
self,
name=None,
prob=1.0,
level=0,
alpha=1.0,
same_class_ratio=-1.0,
prob_label=False,
):
self.alpha = alpha
self.same_class_ratio = same_class_ratio
self.prob_label = prob_label
self.cutout = CutoutTransform(prob=prob, level=level)
super().__init__(name, prob, level)
def transform(self, pil_img, label, dp_x, dp_y):
if random.random() < self.prob:
cutout_img, cutout_label = self.cutout(pil_img, label)
return cutout_img, cutout_label
else:
return pil_img, label
|
thanos-code-main
|
unagi/data/augmentations/cutout.py
|
from unagi.data.augmentations.brightness import Brightness
from unagi.data.augmentations.cutout import Cutout
from unagi.data.augmentations.mixup import Mixup
AUGMENTATIONS = {
"mixup": Mixup,
# "invert": Invert,
"cutout": Cutout,
# "solarize": Solarize,
"brightness": Brightness,
# "rotate": Rotate,
}
|
thanos-code-main
|
unagi/data/augmentations/__init__.py
|
import random
from unagi.data.transforms.image.brightness import Brightness as BrightnessTransform
from unagi.data.transforms.image.transform import UnagiTransform
class Brightness(UnagiTransform):
def __init__(
self,
name=None,
prob=1.0,
level=0,
alpha=1.0,
same_class_ratio=-1.0,
prob_label=False,
):
self.alpha = alpha
self.same_class_ratio = same_class_ratio
self.prob_label = prob_label
self.brightness = BrightnessTransform(prob=prob, level=level)
super().__init__(name, prob, level)
def transform(self, pil_img, label, dp_x, dp_y):
if random.random() < self.prob:
cutout_img, cutout_label = self.brightness(pil_img, label)
return cutout_img, cutout_label
else:
return pil_img, label
|
thanos-code-main
|
unagi/data/augmentations/brightness.py
|
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
def binary_cross_entropy(logits, y):
# BCE loss requires squeezing last dimension of logits so it has the same
# shape as y
# requires y to be float, since it's overloaded to represent a probability
return F.binary_cross_entropy_with_logits(logits.squeeze(-1), y.float())
def binary_accuracy(logits, y):
return torch.eq(logits.squeeze(-1) >= 0, y).float().mean()
def cross_entropy(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return F.cross_entropy(logits, y)
def accuracy(logits, y):
logits = logits.view(-1, logits.shape[-1])
y = y.view(-1)
return torch.eq(torch.argmax(logits, dim=-1), y).float().mean()
def mse(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.mse_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.mse_loss(outs_masked, y_masked)
def mae(outs, y, len_batch=None):
# assert outs.shape[:-1] == y.shape and outs.shape[-1] == 1
# outs = outs.squeeze(-1)
if len(y.shape) < len(outs.shape):
assert outs.shape[-1] == 1
outs = outs.squeeze(-1)
if len_batch is None:
return F.l1_loss(outs, y)
else:
# Computes the loss of the first `lens` items in the batches
mask = torch.zeros_like(outs, dtype=torch.bool)
for i, l in enumerate(len_batch):
mask[i, :l, :] = 1
outs_masked = torch.masked_select(outs, mask)
y_masked = torch.masked_select(y, mask)
return F.l1_loss(outs_masked, y_masked)
# Metrics that can depend on the loss
def loss(x, y, loss_fn):
"""This metric may be useful because the training loss may add extra
regularization (e.g. weight decay implemented as L2 penalty),
while adding this as a metric skips the additional losses"""
return loss_fn(x, y)
def rmse(x, y, loss_fn):
return loss_fn(x, y) ** 0.5 # NOTE this isn't exactly correct
def bpb(x, y, loss_fn):
"""bits per byte (image density estimation, speech generation, char LM)"""
return loss_fn(x, y) / math.log(2)
def ppl(x, y, loss_fn):
return torch.exp(loss_fn(x, y))
# Implementation from
# https://github.com/pclucas14/pixel-cnn-pp/blob/master/utils.py
def log_prob_from_logits(x):
"""numerically stable log_softmax implementation that prevents overflow"""
# TF ordering
# axis = len(x.size()) - 1
# m, _ = torch.max(x, dim=axis, keepdim=True)
# return x - m - torch.log(torch.sum(torch.exp(x - m), dim=axis,
# keepdim=True))
m, _ = torch.max(x, dim=-1, keepdim=True)
x = x - m
return x - torch.logsumexp(x, dim=-1, keepdim=True)
def discretized_mix_logistic_loss_3d(x, l):
"""
log-likelihood for mixture of discretized logistics, specially for
the 3 channel case
assumes the data has been rescaled to [-1,1] interval
"""
# Pytorch ordering
# x = x.permute(0, 2, 3, 1)
# l = l.permute(0, 2, 3, 1)
xs = x.shape # [int(y) for y in x.size()]
ls = l.shape # [int(y) for y in l.size()]
# here and below: unpacking the params of the mixture of logistics
nr_mix = int(ls[-1] / 10)
logit_probs = l[..., :nr_mix]
# 3 for mean, scale, coef
l = l[..., nr_mix:].contiguous().view(xs + (3 * nr_mix,))
means = l[..., :, :nr_mix]
# log_scales = torch.max(l[..., :, nr_mix:2 * nr_mix], -7.)
log_scales = torch.clamp(l[..., :, nr_mix : 2 * nr_mix], min=-7.0)
coeffs = torch.tanh(l[..., :, 2 * nr_mix : 3 * nr_mix])
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = x.contiguous()
# x = x.unsqueeze(-1) + Variable(torch.zeros(xs + [nr_mix]).cuda(),
# requires_grad=False)
x = x.unsqueeze(-1)
m2 = (means[..., 1, :] + coeffs[..., 0, :] * x[..., 0, :]).view(
xs[:-1] + (1, nr_mix)
)
m3 = (
means[..., 2, :]
+ coeffs[..., 1, :] * x[..., 0, :]
+ coeffs[..., 2, :] * x[..., 1, :]
).view(xs[:-1] + (1, nr_mix))
means = torch.cat((means[..., 0, :].unsqueeze(-2), m2, m3), dim=-2)
centered_x = x - means
inv_stdv = torch.exp(-log_scales)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0)
cdf_plus = torch.sigmoid(plus_in)
min_in = inv_stdv * (centered_x - 1.0 / 255.0)
cdf_min = torch.sigmoid(min_in)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -F.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
# now select the right output: left edge case, right edge case, normal
# case, extremely low prob case (doesn't actually happen for us)
# this is what we are really doing, but using the robust version below for
# extreme cases in other applications and to avoid NaN issue with
# tf.select()
# log_probs = tf.select(x < -0.999, l
# og_cdf_plus, tf.select(x > 0.999,
# log_one_minus_cdf_min, tf.log(cdf_delta)))
# robust version, that still works if probabilities are below 1e-5
# (which never happens in our code)
# tensorflow backpropagates through tf.select() by multiplying with
# zero instead of selecting: this requires use to use some ugly tricks to
# avoid potential NaNs the 1e-12 in tf.maximum(cdf_delta, 1e-12) is never
# actually used as output, it's purely there to get around the tf.select()
# gradient issue
# if the probability on a sub-pixel is below 1e-5, we use an approximation
# based on the assumption that the log-density is constant in the bin of
# the observed sub-pixel value
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(
torch.clamp(cdf_delta, min=1e-12)
) + (1.0 - inner_inner_cond) * (log_pdf_mid - np.log(127.5))
inner_cond = (x > 0.999).float()
inner_out = (
inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
)
cond = (x < -0.999).float()
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
log_probs = torch.sum(log_probs, dim=-2) + log_prob_from_logits(logit_probs)
# return -torch.sum(log_sum_exp(log_probs))
return -torch.mean(torch.logsumexp(log_probs, dim=-1)) / 3.0
def discretized_mix_logistic_loss_1d(x, l):
"""log-likelihood for mixture of discretized logistics, assumes the data
has been rescaled to [-1,1] interval"""
# Pytorch ordering
# x = x.permute(0, 2, 3, 1)
# l = l.permute(0, 2, 3, 1)
xs = [int(y) for y in x.size()]
ls = [int(y) for y in l.size()]
# here and below: unpacking the params of the mixture of logistics
nr_mix = ls[-1] // 3 # k
logit_probs = l[..., :nr_mix] # (b, h, w, k)
l = l[..., nr_mix:].contiguous().view(xs + [nr_mix * 2]) # 2 for mean, scale
means = l[..., :, :nr_mix] # (b, h, w, 1, k)
log_scales = torch.clamp(
l[..., :, nr_mix : 2 * nr_mix], min=-7.0
) # (b, h, w, 1, k)
# here and below: getting the means and adjusting them based on preceding
# sub-pixels
x = x.contiguous() # (b, h, w, 1)
# x = x.unsqueeze(-1) + nn.Variable(torch.zeros(xs + [nr_mix]).cuda(),
# requires_grad=False)
x = x.unsqueeze(-1)
# means = torch.cat((means[:, :, :, 0, :].unsqueeze(3), m2, m3), dim=3)
centered_x = x - means # (b, h, w, 1, k)
inv_stdv = torch.exp(-log_scales) # (b, h, w, 1, k)
plus_in = inv_stdv * (centered_x + 1.0 / 255.0) # (b, h, w, 1, k)
cdf_plus = torch.sigmoid(plus_in) # (b, h, w, 1, k)
min_in = inv_stdv * (centered_x - 1.0 / 255.0) # (b, h, w, 1, k)
cdf_min = torch.sigmoid(min_in) # (b, h, w, 1, k)
# log probability for edge case of 0 (before scaling)
log_cdf_plus = plus_in - F.softplus(plus_in)
# log probability for edge case of 255 (before scaling)
log_one_minus_cdf_min = -F.softplus(min_in)
cdf_delta = cdf_plus - cdf_min # probability for all other cases
mid_in = inv_stdv * centered_x
# log probability in the center of the bin, to be used in extreme cases
# (not actually used in our code)
log_pdf_mid = mid_in - log_scales - 2.0 * F.softplus(mid_in)
inner_inner_cond = (cdf_delta > 1e-5).float()
inner_inner_out = inner_inner_cond * torch.log(
torch.clamp(cdf_delta, min=1e-12)
) + (1.0 - inner_inner_cond) * (log_pdf_mid - np.log(127.5))
inner_cond = (x > 0.999).float() # (b, h, w, 1, 1)
inner_out = (
inner_cond * log_one_minus_cdf_min + (1.0 - inner_cond) * inner_inner_out
)
cond = (x < -0.999).float() # (b, h, w, 1, 1)
log_probs = cond * log_cdf_plus + (1.0 - cond) * inner_out
# log_probs= torch.sum(log_probs, dim=3) + log_prob_from_logits(logit_probs)
assert log_probs.size(-2) == 1
log_probs = torch.squeeze(log_probs, dim=-2) + log_prob_from_logits(
logit_probs
) # (b, h, w, k)
# return -torch.sum(log_sum_exp(log_probs))
return -torch.mean(torch.logsumexp(log_probs, dim=-1))
""" My implementations of the mixture loss functions. Simpler and faster. """
def mixture_loss(outs, y, c=256, cdf_fn=torch.sigmoid, reduce="mean", scale=2.0):
"""
outs: (..., 3*k)
y: (...) int between 0 to c-1 (inclusive)
c: number of classes
scale: hyperparameter that increases the size of the buckets, i.e.
increases entropy of distribution (less confident)
"""
assert outs.shape[-1] % 3 == 0
k = outs.shape[-1] // 3
# Transform targets
y = y.unsqueeze(-1) # (..., 1)
# y_normalized = (2*y - (c-1)) / (c-1)
# y_normalized = (y - (c-1)/2) / ((c-1)/2)
# Buckets are slightly offset from normal implementation,
# to match Mixture() class below
y_normalized = (y - (c - 1) / 2) / ((c - 2) / 2)
# bin_max = y_normalized + 1./(c-1) # (..., 1)
# bin_min = y_normalized - 1./(c-1) # (..., 1)
bin_max = y_normalized + 1.0 / (c - 2) # (..., 1)
bin_min = y_normalized - 1.0 / (c - 2) # (..., 1)
bin_max = bin_max * scale
bin_min = bin_min * scale
# Unpack outputs
mixture_logits = outs[..., :k] # (..., k)
means = outs[..., k : 2 * k] # (..., k)
scales = outs[..., 2 * k : 3 * k].clamp(min=-7.0) # (..., k)
# Transform bins by mean and scale
# equivalent to dividing by exp(scales) or negating scales; marginally
# easier for me to reason about multiply
bin_min = (bin_min - means) * torch.exp(scales) # (..., k)
bin_max = (bin_max - means) * torch.exp(scales) # (..., k)
# Calculate probabilities
cdf_max = cdf_fn(bin_max)
cdf_min = cdf_fn(bin_min)
# Edge cases for endpoints
z = torch.zeros_like(
y, dtype=torch.float
) # torch.where doesn't support float32 scalar...
tail_min = torch.where(y == 0, cdf_min, z)
tail_max = torch.where(y == c - 1, 1.0 - cdf_max, z)
probs = (
cdf_max - cdf_min + tail_min + tail_max + 1e-8
) # pad for numerical stability
# Finish calculation in logit space; I doubt its more stable but previous
# implementations do this
# Equivalent to working in probability space:
# probs = torch.sum(torch.softmax(mixture_logits, dim=-1) * probs, dim=-1)
# log_probs = torch.log(probs)
log_probs = torch.log(probs)
log_probs = torch.logsumexp(
log_probs + log_prob_from_logits(mixture_logits), dim=-1
)
if reduce == "mean":
return -log_probs.mean()
elif reduce == "none":
return -log_probs
else:
raise NotImplementedError
def mixture_loss_kd(outs, y, c=256, cdf_fn=torch.sigmoid, reduce="mean"):
"""Mixture loss for outputting multiple distributions at once, where later
predictions can depend linearly on previous ones.
outs: (..., 3*k)
y: (..., d) int between 0 to c-1 (inclusive)
c: number of classes
"""
d = y.shape[-1]
factor = 1 + 2 * d + d * (d - 1) // 2
assert outs.shape[-1] % factor == 0
k = outs.shape[-1] // factor
# Transform targets
y = y.unsqueeze(-1)
y_normalized = (y - (c - 1) / 2) / ((c - 1) / 2)
bin_max = y_normalized + 1.0 / (c - 1) # (..., d)
bin_min = y_normalized - 1.0 / (c - 1) # (..., d)
# y_normalized = (y - (c-1)/2) / ((c-2)/2)
# bin_max = y_normalized + 1./(c-2) # (..., d)
# bin_min = y_normalized - 1./(c-2) # (..., d)
bin_max = bin_max * 1.0
bin_min = bin_min * 1.0
# Unpack outputs
outs = rearrange(outs, "... (d k) -> ... d k", k=k)
mixture_logits = outs[..., 0, :] # (..., k)
means = outs[..., 1 : 1 + d, :] # (..., d*k)
scales = outs[..., 1 + d : 1 + 2 * d, :] # (..., d*k)
coeffs = torch.tanh(outs[..., 1 + 2 * d :, :])
# Transform means with linear combinations
# means = rearrange(means, '... (d k) -> ... d k', k=k)
# scales = rearrange(scales, '... (d k) -> ... d k', k=k)
idx = 0
for i in range(1, d):
means[..., i, :] += torch.sum(
coeffs[..., idx : idx + i, :] * y_normalized[..., :i, :], dim=-2
) # (..., k)
idx += i
# Transform bins by mean and scale
# equivalent to dividing by exp(scales) or negating scales; marginally
# easier for me to reason about multiply
bin_min = (bin_min - means) * torch.exp(scales) # (..., d, k)
bin_max = (bin_max - means) * torch.exp(scales) # (..., d, k)
# Calculate probabilities
cdf_max = cdf_fn(bin_max)
cdf_min = cdf_fn(bin_min)
# Edge cases for endpoints
z = torch.zeros_like(
y, dtype=torch.float
) # torch.where doesn't support float32 scalar...
tail_min = torch.where(y == 0, cdf_min, z)
tail_max = torch.where(y == c - 1, 1.0 - cdf_max, z)
probs = (
cdf_max - cdf_min + tail_min + tail_max + 1e-8
) # pad for numerical stability
# Finish calculation in logit space; I doubt its more stable but previous
# implementations do this
# Equivalent to working in probability space:
# probs = torch.sum(torch.softmax(mixture_logits, dim=-1) * probs, dim=-1)
# log_probs = torch.log(probs)
log_probs = torch.log(probs) # (..., d, k)
log_probs = torch.sum(log_probs, dim=-2) # (..., k)
log_probs = torch.logsumexp(
log_probs + log_prob_from_logits(mixture_logits), dim=-1
) # (...)
if reduce == "mean":
return -log_probs.mean() / 3.0
elif reduce == "none":
return -log_probs
else:
raise NotImplementedError
def mixture_sample(x):
"""x: (..., 3*k) mixture params"""
# Pytorch ordering
assert x.shape[-1] % 3 == 0
k = x.shape[-1] // 3
# Unpack outputs
mixture_logits = x[..., :k] # (..., k)
means = x[..., k : 2 * k] # (..., k)
scales = x[..., 2 * k : 3 * k].clamp(min=-7.0) # (..., k)
# sample mixture indicator from softmax
eps = 1e-8
temp = torch.rand_like(means) * (1 - 2 * eps) + eps
temp = mixture_logits - torch.log(-torch.log(temp))
_, argmax = temp.max(dim=-1, keepdim=True) # (..., 1)
means = torch.gather(means, -1, argmax).squeeze(-1)
scales = torch.gather(scales, -1, argmax).squeeze(-1)
u = torch.rand_like(means) * (1 - 2 * eps) + eps
x = means + (torch.log(u) - torch.log(1.0 - u)) / torch.exp(scales) # (...)
return x
def piecewise_cdf(x):
"""Piecewise linear function with nodes at (-1, 0) and (1, 1)"""
x = F.relu(1 + x) - 1
x = 1 - F.relu(1 - x)
x = (x + 1) / 2
return x
def pdf(m, s, buckets, cdf_fn):
"""
m: (...) mean
s: (...) scale
buckets: (..., n-1)
returns: (..., n)
"""
samples = s.unsqueeze(-1) * (buckets - m.unsqueeze(-1))
# samples = (buckets - m.unsqueeze(-1)) / s.unsqueeze(-1)
# samples = s.unsqueeze(-1) * buckets + m.unsqueeze(-1)
c = cdf_fn(samples) # (..., b) between 0, 1
p0 = c[..., :1] # (..., 1)
pn = 1.0 - c[..., -1:] # (..., 1)
p = c[..., 1:] - c[..., :-1] # (..., b-2)
probs = torch.cat([p0, p, pn], dim=-1) # (..., b)
return probs
class Mixture(nn.Module):
def __init__(self, b, a, cdf="piecewise"):
super().__init__()
self.b = b
self.a = a
self.cdf_fn = {
"piecewise": piecewise_cdf,
"sigmoid": F.sigmoid,
}[cdf]
assert b % 2 == 0
buckets = torch.linspace(-1.0, 1.0, b - 1) * a
# buckets = torch.linspace(-1.0+1/(b-1), 1.0-1/(b-1), b-1) * a
self.register_buffer("buckets", buckets)
def forward(self, x):
"""
x: (..., 3*k)
"""
l, m, s = torch.unbind(rearrange(x, "... (z a) -> ... z a", z=3), dim=-2)
p = pdf(m, torch.exp(s), self.buckets, self.cdf_fn) # (..., k, b)
weights = F.softmax(l, dim=-1) # (..., k)
probs = torch.sum(weights.unsqueeze(-1) * p, dim=-2) # (..., b)
logits = torch.log(probs + 1e-8)
return logits
def test_mixture_loss():
logits = torch.FloatTensor(5, 1024, 30).normal_()
y = torch.randint(0, 256, (5, 1024, 1))
ans = []
for target in range(256):
y = torch.ones(5, 1024, dtype=torch.long) * target
loss = mixture_loss(logits, y, reduce="none")
ans.append(torch.exp(-loss))
total_prob = sum(ans)
print(torch.max(total_prob))
print(torch.min(total_prob))
def test_mixture_function():
m = torch.tensor([0.0])
s = torch.tensor([1.0])
buckets = torch.tensor([-1.0, 0.0, 1.0])
p = pdf(m, s, buckets, piecewise_cdf)
print(p)
mixture = Mixture(4, 1.0, "piecewise")
s = torch.tensor([0.0])
l = torch.tensor([0.0])
p = mixture(torch.cat([m, s, l], dim=-1))
print(p)
def test_pixelcnn_mixture():
# x = torch.FloatTensor(5, 1024, 1).uniform_(-1., 1.)
y = torch.randint(0, 256, (5, 1024, 1))
x = (y - 255 / 2) / (255 / 2)
logits = torch.FloatTensor(5, 1024, 30).normal_()
loss = discretized_mix_logistic_loss_1d(x, logits)
print(loss)
loss = mixture_loss(logits, y.squeeze(-1))
print(loss)
mixture = Mixture(256, 2.0, "sigmoid")
loss = F.cross_entropy(mixture(logits).reshape(-1, 256), y.view(-1))
print(loss)
y = torch.randint(0, 256, (5, 32, 32, 3))
x = (y - 255 / 2) / (255 / 2)
# x = torch.FloatTensor(5, 32, 32, 3).uniform_(-1., 1.)
logits = torch.FloatTensor(5, 32, 32, 30).normal_()
loss = discretized_mix_logistic_loss_3d(x, logits)
print(loss)
loss = mixture_loss_kd(logits, y)
print(loss)
def test_mixture_sample():
B = 8
k = 5
# x = torch.rand(B, 3*k)
means = torch.linspace(-1.0, 1.0, k)
scales = torch.full((B, k), 5.0) # Higher scale means more confident
logits = torch.zeros(B, k)
x = torch.cat([logits, means.repeat(B, 1), scales], dim=-1)
samples = mixture_sample(x)
print(samples.shape, samples) # Should see values close to -1, -.5, 0, .5, 1
# should have a better way to do this
output_metric_fns = {
"binary_cross_entropy": binary_cross_entropy,
"cross_entropy": cross_entropy,
"binary_accuracy": binary_accuracy,
"accuracy": accuracy,
"eval_loss": loss,
"mixture": mixture_loss,
"mixture_kd": mixture_loss_kd,
"mse": mse,
"mae": mae,
}
loss_metric_fns = {
"loss": loss,
"bpb": bpb,
"ppl": ppl,
}
metric_fns = {**output_metric_fns, **loss_metric_fns} # TODO py3.9
if __name__ == "__main__":
# test_mixture_function()
# test_mixture_loss()
# test_pixelcnn_mixture()
test_mixture_sample()
|
thanos-code-main
|
unagi/trainer/metrics.py
|
from dataclasses import dataclass
from typing import Dict, Union
import torchmetrics as tm
import unagi.trainer.callbacks as C
import unagi.trainer.metrics as M
@dataclass
class UnagiTask:
name: str
task_weight: Union[int, float]
task_flow: Dict
losses: Dict
metrics: Dict
torchmetrics: Dict
callbacks: Dict
# weight: float
def __post_init__(self):
# use once hydra-fied
"""self.metric_names = [m.module for m in self.metrics]
self.torchmetric_names = [m.module for m in self.torchmetrics]"""
self._tracked_torchmetrics = {}
self.all_callbacks = {}
for callback_name, callback in self.callbacks.items():
if callback["module"] in C.output_callback_fns:
callback["task_name"] = self.name
callback["name"] = callback_name
self.all_callbacks[callback_name] = C.output_callback_fns[
callback["module"]
](**callback)
def _init_torchmetrics(self, prefix):
"""
Instantiate torchmetrics.
"""
self._tracked_torchmetrics[prefix] = {}
for name, torchmetric in self.torchmetrics.items():
# TODO: .to('cuda') is a hack to make it work on GPU, generalize
self._tracked_torchmetrics[prefix][torchmetric["module"]] = getattr(
tm, name
)(
**{
**{
k: v
for k, v in torchmetric
if k not in ["node", "module", "inputs"]
},
"compute_on_step": False,
}
).to(
"cuda"
)
# TODO: deal with num_classes
# for name in self.torchmetric_names:
# if name in ['AUROC', 'StatScores', 'Precision', 'Recall', 'F1']:
# self._tracked_torchmetrics[prefix][name] = getattr(tm, name)(
# average='macro', num_classes=self.dataset.d_output,
# compute_on_step=False).to('cuda')
# elif '@' in name:
# k = int(name.split('@')[1])
# mname = name.split('@')[0]
# self._tracked_torchmetrics[prefix][name] = getattr(tm, mname)(
# average='macro', num_classes=self.dataset.d_output,
# compute_on_step=False, top_k=k).to('cuda')
# else:
# self._tracked_torchmetrics[prefix][name] = getattr(tm, name)(
# compute_on_step=False).to('cuda')
def _reset_torchmetrics(self, prefix=None):
"""
Reset torchmetrics for a prefix
associated with a particular dataloader (e.g. train, val, test).
Generally do this at the start of an epoch.
"""
all_prefixes = [prefix] if prefix is not None else self._tracked_torchmetrics
for prefix in all_prefixes:
for torchmetric in self.torchmetrics:
try:
self._tracked_torchmetrics[prefix][torchmetric["module"]].reset()
except KeyError: # metrics don't exist yet
pass
def get_torchmetrics(self, prefix):
"""
Compute torchmetrics for a prefix associated with
a particular dataloader (e.g. train, val, test).
Generally do this at the end of an epoch.
"""
return {
torchmetric["module"]: self._tracked_torchmetrics[prefix][name].compute()
for name, torchmetric in self.torchmetrics.items()
}
def update_torchmetrics(self, x, y, prefix):
"""
Update torchmetrics with new x, y.
Prefix corresponds to a particular dataloader (e.g. train, val, test).
Generally call this every batch.
"""
if prefix not in self._tracked_torchmetrics:
self._init_torchmetrics(prefix)
for torchmetric in self.torchmetrics:
self._tracked_torchmetrics[prefix][torchmetric["module"]].update(x, y)
def get_metric(self, metric, *args):
"""
Metrics are just functions
output metrics are a function of output and target
# loss metrics are a function of loss (e.g. perplexity)
"""
# TODO: handle the loss metrics (perplexity, bpb)
"""if metric.module in M.output_metric_fns:
return M.output_metric_fns[metric.module](*args)"""
if metric["module"] in M.output_metric_fns:
return M.output_metric_fns[metric["module"]](*args)
return None
def get_callback(self, callback, **kwargs):
"""
Callbacks are just arbitrary functions
Can be used for, e.g., custom logging
"""
if callback["module"] in C.output_callback_fns:
return C.output_callback_fns[callback["module"]](**kwargs)
return None
|
thanos-code-main
|
unagi/trainer/task.py
|
from unagi.models import MODULE_DICTS
from unagi.tasks import LOSS_MODULE_REGISTRY, TASK_PREPROCESSING_LAYER
MODULE_REGISTRY = {
"preprocessors": TASK_PREPROCESSING_LAYER,
"losses": LOSS_MODULE_REGISTRY,
**MODULE_DICTS,
}
|
thanos-code-main
|
unagi/trainer/__init__.py
|
import logging
from typing import Any, Dict, List, Optional, Union
from torch import nn
from torch.nn import ModuleDict
from unagi.task import UnagiTask
logger = logging.getLogger(__name__)
class UnagiModel(nn.Module):
"""A class to build multi-task model.
Args:
name: Name of the model, defaults to None.
tasks: A task or a list of tasks.
"""
def __init__(
self,
tasks: Optional[Union[UnagiTask, List[UnagiTask]]] = None,
module_dict: ModuleDict = None,
) -> None:
super().__init__()
# Initiate the model attributes
self.module_dict = module_dict
self.task_flows = {name: task.task_flow for name, task in tasks.items()}
self.task_names = list(self.task_flows.keys())
def _get_data_from_output_dict(
self,
output_dict: Dict[str, Any],
index: Any,
task_flow: Dict[str, Any],
) -> Any:
"""
Get output_dict output based on output_idx.
For the valid index, please check the definition of Action.
"""
if index[0] != "_input_":
index_aug = [item for item in index]
index_aug[0] = (index[0], task_flow[index[0]]["module"])
index = index_aug
# Handle any output_dict's item and index is str or int
if isinstance(index, (str, int)):
if index in output_dict:
return output_dict[index]
else:
raise ValueError(f"Action {index}'s output is not in the output_dict.")
# Handle output_dict's item is a list, tuple or dict, and index is (X, Y)
elif isinstance(output_dict[index[0]], (list, tuple)):
if isinstance(index[1], int):
return output_dict[index[0]][index[1]]
else:
raise ValueError(
f"Action {index[0]} output has {type(output_dict[index[0]])} type, "
f"while index has {type(index[1])} not int."
)
elif isinstance(output_dict[index[0]], dict):
if index[1] in output_dict[index[0]]:
return output_dict[index[0]][index[1]]
else:
raise ValueError(
f"Action {index[0]}'s output doesn't have attribute {index[1]}."
)
# Handle output_dict's item is neither a list or dict, and index is (X, Y)
elif int(index[1]) == 0:
return output_dict[index[0]]
raise ValueError(f"Cannot parse action index {index}.")
def forward(
self,
X_dict: Dict[str, Any],
# task_names: List[str],
) -> Dict[str, Any]:
"""Forward based on input and task flow.
Note:
We assume that all shared modules from all tasks are based on the
same input.
Args:
X_dict: The input data
# task_names: The task names that needs to forward.
Returns:
The output of all forwarded modules
"""
output_dict = dict(_input_=X_dict)
# Call forward for each task
for task_name in self.task_names:
for node, action in self.task_flows[task_name].items():
if (node, action["module"]) not in output_dict:
if action["inputs"]:
input = [
self._get_data_from_output_dict(
output_dict,
_input,
self.task_flows[task_name],
)
for _input in action["inputs"]
]
# TODO: this might be important for the multi-gpu case
# try:
# action_module_device = (
# self.module_device[action.module]
# if action.module in self.module_device
# else default_device
# )
# input = move_to_device(
# [
# self._get_data_from_output_dict(output_dict, _input)
# for _input in action.inputs
# ],
# action_module_device,
# )
# except Exception:
# raise ValueError(f"Unrecognized action {action}.")
output = self.module_dict[action["module"]].forward(*input)
else:
# TODO: Handle multiple device with not inputs case
output = self.module_dict[action["module"]].forward(output_dict)
output_dict[(node, action["module"])] = output
return output_dict
# def __repr__(self) -> str:
# """Represent the model as a string."""
# cls_name = type(self).__name__
# return f"{cls_name}"#(name={self.name})"
# def add_tasks(self, tasks: Union[UnagiTask, List[UnagiTask]]) -> None:
# """
# Build the MTL network using all tasks.
# Args:
# tasks: A task or a list of tasks.
# """
# if not isinstance(tasks, Iterable):
# tasks = [tasks]
# for task in tasks:
# self.add_task(task)
# def add_task(self, task: UnagiTask) -> None:
# """Add a single task into MTL network.
# Args:
# task: A task to add.
# """
# if not isinstance(task, UnagiTask):
# raise ValueError(f"Unrecognized task type {task}.")
# # TODO: move this check taht there are no duplicate tasks somewhere else
# # if task.name in self.task_names:
# # raise ValueError(
# # f"Found duplicate task {task.name}, different task should use "
# # f"different task name."
# # )
# # # Combine module_dict from all tasks
# # for key in task.module_dict.keys():
# # if key in self.module_dict.keys():
# # task.module_dict[key] = self.module_dict[key]
# # else:
# # self.module_dict[key] = task.module_dict[key]
# # Collect task name
# self.task_names.add(task.name)
# # Collect task flow
# self.task_flows[task.name] = task.task_flow
|
thanos-code-main
|
unagi/trainer/model.py
|
import math
import matplotlib.pyplot as plt
import numpy as np
import torch
import wandb
from einops import rearrange
from sklearn.manifold import TSNE
class UnagiCallback:
def __init__(self):
self.train_batches = {}
self.val_batches = {}
self.test_batches = {}
def on_train_batch_end(self, outputs, name):
if name not in self.train_batches.keys():
self.train_batches[name] = [outputs]
else:
self.train_batches[name].append(outputs)
def on_validation_batch_end(self, outputs, name):
if name not in self.val_batches.keys():
self.val_batches[name] = [outputs]
else:
self.val_batches[name].append(outputs)
def on_test_batch_end(self, outputs, name):
if name not in self.test_batches.keys():
self.test_batches[name] = [outputs]
else:
self.test_batches[name].append(outputs)
def on_train_epoch_end(self):
for split_name, batch_list in self.train_batches.items():
self.train_batches[split_name] = []
def on_validation_epoch_end(self):
for split_name, batch_list in self.val_batches.items():
self.val_batches[split_name] = []
def on_test_epoch_end(self):
for split_name, batch_list in self.test_batches.items():
self.test_batches[split_name] = []
class LogImage(UnagiCallback):
def __init__(
self,
name,
logging_batch_idx,
max_images,
input_names,
task_name,
**kwargs,
):
super().__init__()
self.name = name
self.logging_batch_idx = logging_batch_idx
self.max_images = max_images
self.task_name = task_name
self.input_names = input_names
def _on_batch_end(self, output_batch):
inputs = output_batch["inputs"]
prefix = output_batch["prefix"]
trainer = output_batch["trainer"]
batch_idx = output_batch["batch_idx"]
if batch_idx == self.logging_batch_idx:
for input_name, inp in zip(self.input_names, inputs):
imgs = [wandb.Image(img) for img in inp][: self.max_images]
trainer.logger.experiment.log(
{
f"{prefix}/{self.task_name}_{self.name}_{input_name}": imgs,
"trainer/global_step": trainer.global_step,
}
)
def on_train_batch_end(self, output_batch):
super().on_train_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch)
def on_validation_batch_end(self, output_batch):
super().on_validation_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch)
def on_test_batch_end(self, output_batch):
super().on_test_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch)
class LogEmbedding(UnagiCallback):
def __init__(
self,
name,
logging_batch_idx,
input_names,
task_name,
batch_size,
eval_batch_size,
class_names,
plot_embeddings,
plot_embeddings_stride,
**kwargs,
):
super().__init__()
self.name = name
self.class_names = class_names
self.logging_batch_idx = logging_batch_idx
self.task_name = task_name
self.input_names = input_names
self.batch_size = batch_size
self.eval_batch_size = eval_batch_size
# self.val_input_embeddings = [[] for _ in range(len(self.input_names))]
# self.train_input_embeddings = [
# [] for _ in range(len(self.input_names))
# ]
# self.test_input_embeddings = [[] for _ in range(len(self.input_names))]
self.val_input_embeddings = {}
self.test_input_embeddings = {}
self.train_input_embeddings = {}
self.plot_embeddings = plot_embeddings
self.plot_embeddings_stride = plot_embeddings_stride
def _extract_views(self, batch_size, inp, view=0):
n_views = math.ceil(inp.shape[0] / batch_size)
embs = rearrange(inp, "(b v) ... -> b v ...", v=n_views)
embs = embs[:, view, ...]
return embs
def _on_batch_end(self, output_batch, split):
inputs = output_batch["inputs"]
prefix = output_batch["prefix"]
for i, inp in enumerate(inputs):
if split == "train":
if prefix not in self.train_input_embeddings:
self.train_input_embeddings[prefix] = [
[] for _ in range(len(self.input_names))
]
self.train_input_embeddings[prefix][i].append(
self._extract_views(self.batch_size, inp)
)
elif split == "test":
if prefix not in self.test_input_embeddings:
self.test_input_embeddings[prefix] = [
[] for _ in range(len(self.input_names))
]
self.test_input_embeddings[prefix][i].append(
self._extract_views(self.eval_batch_size, inp)
)
else:
if prefix not in self.val_input_embeddings:
self.val_input_embeddings[prefix] = [
[] for _ in range(len(self.input_names))
]
self.val_input_embeddings[prefix][i].append(
self._extract_views(self.eval_batch_size, inp)
)
def _plot_tsne(self, tsne_results, categories, s=1, figsize=(12, 8)):
plt.figure(figsize=figsize)
for indices, label in categories:
plt.scatter(
tsne_results[:, 0][indices],
tsne_results[:, 1][indices],
label=label,
s=s,
)
plt.legend()
return plt
def on_train_batch_end(self, output_batch):
super().on_train_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch, "train")
def on_validation_batch_end(self, output_batch):
super().on_validation_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch, "val")
def on_test_batch_end(self, output_batch):
super().on_test_batch_end(output_batch, output_batch["prefix"])
self._on_batch_end(output_batch, "test")
def _on_epoch_end(self, split):
# inputs = []
if split == "train":
input_embeddings = self.train_input_embeddings
batches = self.train_batches
self.train_input_embeddings = {}
elif split == "test":
input_embeddings = self.test_input_embeddings
batches = self.test_batches
self.test_input_embeddings = {}
else:
input_embeddings = self.val_input_embeddings
batches = self.val_batches
self.val_input_embeddings = {}
for prefix, inps in input_embeddings.items():
inputs = []
for i in inps: # iterates through all inputs
inputs.append(torch.cat(i, dim=0))
input_embeddings[prefix] = inputs
for prefix, inputs in input_embeddings.items():
print(f"logging {prefix} embeddings to wandb...may be slow")
trainer = batches[prefix][-1]["trainer"]
for input_name, inp in zip(self.input_names, inputs):
if len(inp.shape) == 1:
inp = inp.unsqueeze(-1)
inp = inp.numpy()
columns = [str(x) for x in range(inp.shape[1])]
inp = [inp[x] for x in range(inp.shape[0])]
inp = wandb.Table(columns=columns, data=inp)
trainer.logger.experiment.log(
{
f"{prefix}/{self.task_name}_{self.name}_{input_name}_"
f"epoch{trainer.current_epoch}": inp
}
)
if self.plot_embeddings:
print(f"generating {prefix} TSNE plot...may be slow")
labels, embs = None, None
for input_name, inp in zip(self.input_names, inputs):
if input_name == "labels":
labels = inp.numpy()
if input_name not in [
"labels",
"sample_uid",
]: # this is hacky
embs = inp
tsne = TSNE(n_iter=300)
if len(embs) > self.plot_embeddings_stride * 100:
embs = embs[:: self.plot_embeddings_stride]
labels = labels[:: self.plot_embeddings_stride]
tsne_results = tsne.fit_transform(embs)
categories = [
(np.argwhere(labels == i).flatten(), self.class_names[i])
for i in range(len(self.class_names))
]
plt_mpl = self._plot_tsne(tsne_results, categories, figsize=(6, 6))
plt_wb = wandb.Image(plt_mpl)
wandb.log(
{
f"{prefix}/{self.task_name}_{self.name}_{input_name}"
f"_tsne": plt_wb,
"trainer/global_step": trainer.global_step,
}
)
def on_train_epoch_end(self):
self._on_epoch_end("train")
super().on_train_epoch_end()
def on_validation_epoch_end(self):
self._on_epoch_end("val")
super().on_validation_epoch_end()
def on_test_epoch_end(self):
self._on_epoch_end("test")
super().on_test_epoch_end()
"""
def log_image(
# prefix,
task_name,
trainer,
name,
input_names,
# trainer,
logging_batch_idx,
# inputs,
# batch_idx,
max_images,
# batch_args,
**kwargs,
):
batch_args = kwargs[f"batch_{logging_batch_idx}"]
inputs = batch_args["inputs"]
prefix = batch_args["prefix"]
# name = batch_args["name"]
# trainer = batch_args["trainer"]
# task_name = batch_args["task_name"]
for input_name, inp in zip(input_names, inputs):
imgs = [wandb.Image(img) for img in inp][:max_images]
trainer.logger.experiment.log(
{
f"{prefix}/{task_name}_{name}_{input_name}": imgs,
"trainer/global_step": trainer.global_step,
}
)
def log_embedding(
# prefix,
task_name,
name,
input_names,
trainer,
# logging_batch_idx,
# inputs,
# batch_idx,
# max_images,
# batch_args,
**kwargs,
):
# batch_args = kwargs[f"batch_{logging_batch_idx}"]
batch_inputs = [[] for _ in range(len(input_names))]
for key, item in kwargs.items():
if "batch_" in key and key != "logging_batch_idx":
for i, inp in enumerate(item["inputs"]):
batch_inputs[i].append(inp)
prefix = item["prefix"]
# name = item["name"]
# trainer = item["trainer"]
# task_name = item["task_name"]
inputs = []
for inps in batch_inputs:
inputs.append(torch.cat(inps, dim=0))
for input_name, inp in zip(input_names, inputs):
# imgs = [wandb.Image(img) for img in inp][:max_images]
columns = [str(x) for x in range(inp.shape[1])]
inp = [inp[x] for x in range(inp.shape[0])]
inp = wandb.Table(columns=columns, data=inp)
trainer.logger.experiment.log(
{
f"{prefix}/{task_name}_{name}_{input_name}_"
f"epoch{trainer.current_epoch}": inp
}
)
"""
output_callback_fns = {"log_image": LogImage, "log_embedding": LogEmbedding}
|
thanos-code-main
|
unagi/trainer/callbacks.py
|
""" Scheduler Class -- Credit: Emmental"""
import random
from abc import ABC, abstractmethod
from typing import Iterator, List, Tuple, Union
import torch
from unagi.trainer.model import UnagiModel
class Scheduler(ABC):
"""Generate batch generator from dataloaders in designed order."""
def __init__(self) -> None:
"""Initialize Scheduler."""
pass
def get_num_batches(self, dataloaders: List[torch.utils.data.DataLoader]) -> int:
"""Get total number of batches per epoch.
Args:
dataloaders: List of dataloaders.
Returns:
Total number of batches per epoch.
"""
raise NotImplementedError()
@abstractmethod
def get_batches(
self,
dataloaders: List[torch.utils.data.DataLoader],
model: UnagiModel = None,
) -> Iterator[Union[Tuple, List[Tuple]]]:
"""Generate batch generator from all dataloaders for one epoch.
Args:
dataloaders: List of dataloaders.
model: The training model, defaults to None.
Returns:
A generator of all batches.
"""
raise NotImplementedError()
class RoundRobinScheduler(Scheduler):
"""Generate batch generator from all dataloaders in round robin order.
Args:
fillup: Whether fillup to make all dataloader the same size.
"""
def __init__(self, fillup: bool = False) -> None:
"""Initialize RoundRobinScheduler."""
super().__init__()
self.fillup = fillup
def get_num_batches(self, dataloaders: List[torch.utils.data.DataLoader]) -> int:
"""Get total number of batches per epoch.
Args:
dataloaders: List of dataloaders.
Returns:
Total number of batches per epoch.
"""
batch_counts = [len(dataloader) for dataloader in dataloaders]
if self.fillup:
batch_counts = [max(batch_counts)] * len(dataloaders)
"""for idx in range(len(dataloaders)):
if dataloaders[idx].n_batches:
batch_counts[idx] = dataloaders[idx].n_batches"""
return sum(batch_counts)
def get_batches(
self,
dataloaders: List[torch.utils.data.DataLoader],
model: UnagiModel = None,
) -> Iterator[Union[Tuple, List[Tuple]]]:
"""Generate batch generator from all dataloaders for one epoch.
Args:
dataloaders: List of dataloaders.
model: The training model, defaults to None.
Returns:
A generator of all batches.
"""
# task_to_label_dicts = [
# dataloader.task_to_label_dict for dataloader in dataloaders
# ]
# uid_names = [dataloader.uid for dataloader in dataloaders]
# data_names = [dataloader.data_name for dataloader in dataloaders]
# splits = [dataloader.split for dataloader in dataloaders]
# Calc the batch size for each dataloader
batch_counts = [len(dataloader) for dataloader in dataloaders]
data_loaders = [iter(dataloader) for dataloader in dataloaders]
if self.fillup:
batch_counts = [max(batch_counts)] * len(dataloaders)
"""for idx in range(len(dataloaders)):
if dataloaders[idx].n_batches:
batch_counts[idx] = dataloaders[idx].n_batches"""
dataloader_indexer = []
for idx, count in enumerate(batch_counts):
dataloader_indexer.extend([idx] * count)
random.shuffle(dataloader_indexer)
for data_loader_idx in dataloader_indexer:
# uid_name = uid_names[data_loader_idx]
try:
batch = next(data_loaders[data_loader_idx])
except StopIteration:
data_loaders[data_loader_idx] = iter(dataloaders[data_loader_idx])
batch = next(data_loaders[data_loader_idx])
if not isinstance(batch, dict):
X_dict, Y_dict = batch
else:
X_dict = batch
Y_dict = None
yield (
# X_dict[uid_name],
X_dict,
Y_dict,
# task_to_label_dicts[data_loader_idx],
# data_names[data_loader_idx],
# splits[data_loader_idx],
)
class SequentialScheduler(ABC):
"""Generate batch generator from all dataloaders in sequential order.
Args:
fillup: Whether fillup to make all dataloader the same size.
"""
def __init__(self, fillup: bool = False) -> None:
"""Initialize SequentialScheduler."""
super().__init__()
self.fillup = fillup
def get_num_batches(self, dataloaders: List[torch.utils.data.DataLoader]) -> int:
"""Get total number of batches per epoch.
Args:
dataloaders: List of dataloaders.
Returns:
Total number of batches per epoch.
"""
batch_counts = [len(dataloader) for dataloader in dataloaders]
if self.fillup:
batch_counts = [max(batch_counts)] * len(dataloaders)
"""for idx in range(len(dataloaders)):
if dataloaders[idx].n_batches:
batch_counts[idx] = dataloaders[idx].n_batches"""
return sum(batch_counts)
def get_batches(
self,
dataloaders: List[torch.utils.data.DataLoader],
model: UnagiModel = None,
) -> Iterator[Union[Tuple, List[Tuple]]]:
"""Generate batch generator from all dataloaders for one epoch.
Args:
dataloaders: List of dataloaders.
model: The training model, defaults to None.
Returns:
A generator of all batches.
"""
# task_to_label_dicts = [
# dataloader.task_to_label_dict for dataloader in dataloaders
# ]
# data_names = [dataloader.data_name for dataloader in dataloaders]
# splits = [dataloader.split for dataloader in dataloaders]
# uid_names = [dataloader.uid for dataloader in dataloaders]
# Calc the batch size for each dataloader
data_loaders = [iter(dataloader) for dataloader in dataloaders]
batch_counts = [len(dataloader) for dataloader in dataloaders]
if self.fillup:
batch_counts = [max(batch_counts)] * len(dataloaders)
"""for idx in range(len(dataloaders)):
if dataloaders[idx].n_batches:
batch_counts[idx] = dataloaders[idx].n_batches"""
for (
data_loader_idx,
batch_count
# (task_to_label_dict, data_name, batch_count, split, uid_name),
) in enumerate(batch_counts):
for batch_idx in range(batch_count):
try:
batch = next(data_loaders[data_loader_idx])
except StopIteration:
data_loaders[data_loader_idx] = iter(dataloaders[data_loader_idx])
batch = next(data_loaders[data_loader_idx])
if not isinstance(batch, dict):
X_dict, Y_dict = batch
else:
X_dict = batch
Y_dict = None
yield (
# X_dict[uid_name],
X_dict,
Y_dict,
# task_to_label_dict,
# data_name,
# split,
)
class MixedScheduler(Scheduler):
"""Generate batch generator from all dataloaders in mixture for MTL training.
Args:
fillup: Whether fillup to make all dataloader the same size.
"""
def __init__(self, fillup: bool = False) -> None:
"""Initialize MixedScheduler."""
super().__init__()
self.fillup = fillup
def get_num_batches(self, dataloaders: List[torch.utils.data.DataLoader]) -> int:
"""Get total number of batches per epoch.
Args:
dataloaders: List of dataloaders.
Returns:
Total number of batches per epoch.
"""
batch_counts = [len(dataloader) for dataloader in dataloaders]
num_batch = max(batch_counts) if self.fillup else min(batch_counts)
return num_batch
def get_batches(
self,
dataloaders: List[torch.utils.data.DataLoader],
model: UnagiModel = None,
) -> Iterator[Union[Tuple, List[Tuple]]]:
"""Generate batch generator from all dataloaders in mixture for one epoch.
Args:
dataloaders: List of dataloaders.
model: The training model, defaults to None.
Returns:
A generator of all batches.
"""
data_loaders = [iter(dataloader) for dataloader in dataloaders]
batch_counts = [len(dataloader) for dataloader in dataloaders]
num_batch = max(batch_counts) if self.fillup else min(batch_counts)
for batch_idx in range(num_batch):
mixed_batch = []
for (data_loader_idx, batch_count) in enumerate(batch_counts):
try:
batch = next(data_loaders[data_loader_idx])
except StopIteration:
data_loaders[data_loader_idx] = iter(dataloaders[data_loader_idx])
batch = next(data_loaders[data_loader_idx])
if not isinstance(batch, dict):
X_dict, Y_dict = batch
else:
X_dict = batch
Y_dict = None
mixed_batch.append(
(
# X_dict[uid_name],
X_dict,
Y_dict,
# task_to_label_dict,
# data_name,
# split,
)
)
yield mixed_batch
SCHEDULERS = {
"mixed": MixedScheduler,
"round_robin": RoundRobinScheduler,
"sequential": SequentialScheduler,
}
|
thanos-code-main
|
unagi/trainer/scheduler.py
|
from typing import List
import hydra
import pytorch_lightning as pl
import torch
from torch.backends import cudnn as cudnn
from unagi.task import create_tasks, instantiate_modules
from unagi.trainer.model import UnagiModel
from unagi.trainer.scheduler import SCHEDULERS
def process_inputs(inputs_list, output_dict, Y_dict, task_flow):
inputs = []
for input_node, key in inputs_list:
if input_node == "_input_":
inputs.append(output_dict[input_node][key])
elif not input_node == "_output_":
inputs.append(output_dict[(input_node, task_flow[input_node]["module"])])
else:
inputs.append(Y_dict[key])
return inputs
class UnagiModule(pl.LightningModule):
def _set_module_shapes(self, config):
"""
Automatically set the shapes of modules.
- Embedding modules: set the input shape to the dataset input shape
- Encoder / Decoder modules: set the input shape to the output shape
of the previous module
- Decoder modules: set the output shape to the dataset output shape
(using the loss it's linked to)
"""
# start w/embeddings because their input is directly dataset
#
for _, task_config in self.config.tasks.items():
decoder_to_dataset_key = {}
for _, loss_config in task_config.losses.items():
input_modules = []
# For all input modules to the loss, we keep track of what dataset
# output those modules map to
for input_module, input_index in loss_config.inputs:
if input_module == "_output_":
# "Label" input from the dataset to the loss
dataset_key = input_index
else:
# Input from another module (typically decoder) to the loss
input_modules.append(input_module)
for module in input_modules:
decoder_to_dataset_key[module] = dataset_key
d_output = {}
for step, step_config in task_config.task_flow.items():
if step_config.module in config.model.embeddings:
# Set the d_input of the embedding module
"""assert (
len(step_config.inputs) == 1
), "Assume only one input to embedding."""
assert len(step_config.inputs[0]) == 2
dataset_key = step_config.inputs[0][1]
# TODO: this will need to be updated if there are multiple datasets
# used for training
# (Currently, assumes that all datasets have the same
# dataset.input_shapes)
if dataset_key in self.dataset.input_shapes:
config.model.embeddings[
step_config.module
].d_input = self.dataset.input_shapes[dataset_key][0]
# Carry over the d_model of the embedding to the encoder
d_output[step] = config.model.embeddings[step_config.module].d_model
elif step_config.module in config.model.encoders:
# Check the input of the encoder and set the d_model of the
# encoder using that
input_module = step_config.inputs[0][0]
assert (
input_module in d_output
), f"Encoder {step_config.module} has no recognizable input."
config.model.encoders[step_config.module].d_model = d_output[
input_module
]
# TODO: Carry over the d_output of the encoder instead to the next
# encoder or decoder
# (Currently assumes that encoders have
# d_input = d_output = d_model)
d_output[step] = config.model.encoders[step_config.module].d_model
elif step_config.module in config.model.decoders:
# Check the input of the decoder and set the d_input of the decoder
# using that
# TODO (ASN): THIS IS A HACK -- some decoders take inputs directly
# from task_preprocessing layer
input_module = step_config.inputs[0][0]
dataset_key = step_config.inputs[0][1]
# assert input_module in d_output, f"Decoder {step_config.module}
# has no recognizable input."
# breakpoint()
if config.model.decoders[step_config.module].d_input is None:
config.model.decoders[step_config.module].d_input = (
d_output[input_module]
if input_module in d_output
else self.dataset.input_shapes[dataset_key][-1]
)
# Set the d_output of the decoder by looking at the dataset
# output key it uses for supervision (through a loss fn)
# (Currently, assumes that all datasets have the same
# dataset.output_shapes)
if step in decoder_to_dataset_key:
dataset_key = decoder_to_dataset_key[step]
config.model.decoders[
step_config.module
].d_output = self.dataset.output_shapes[dataset_key][0]
else:
config.model.decoders[
step_config.module
].d_output = config.model.decoders[step_config.module].d_input
d_output[step] = config.model.decoders[
step_config.module
].d_input
# Make sure all embeddings and decoders are being used
for emb, emb_config in config.model.embeddings.items():
assert (
emb_config.d_input is not None
), f"Embedding {emb} has no input shape and is unused."
"""for dec, dec_config in config.model.decoders.items():
assert (
dec_config.d_output is not None
), f"Decoder {dec} has no output shape and is unused." """
return config
def _set_d_input(self):
# Grab the random state from torch
rng = torch.get_rng_state()
if isinstance(self.train_dataloaders, torch.utils.data.DataLoader):
dl = self.train_dataloaders
else:
dl = self.train_dataloaders[0]
for batch in dl:
X_dict, _ = batch
# shape is (B, V, ...) -- discard (B, V)
X_shapes = {
k: tuple(v.shape[2:])
for k, v in X_dict["inputs"].items()
if k != "is_train"
}
break
self.dataset.input_shapes = X_shapes
# Set the random state back to torch
torch.set_rng_state(rng)
def __init__(
self,
config,
dataset,
train_dataloaders: List[torch.utils.data.DataLoader],
val_dataloaders: List[torch.utils.data.DataLoader],
test_dataloaders: List[torch.utils.data.DataLoader],
):
super(UnagiModule, self).__init__()
self.config = config
self.dataset = dataset
self.train_dataloaders = train_dataloaders
self.val_dataloaders = val_dataloaders
self.test_dataloaders = test_dataloaders
self.val_loader_names = list(self.val_dataloaders.keys()) + list(
self.test_dataloaders.keys()
)
self.test_loader_names = list(self.val_dataloaders.keys()) + list(
self.test_dataloaders.keys()
)
self.test_loader_names = [f"final/{name}" for name in self.test_loader_names]
# Set the d_input
self._set_d_input()
self.task_scheduler = SCHEDULERS[self.config.learner.task_scheduler]()
# Update the internal d_input / d_model / d_output shapes of all modules
self.config = self._set_module_shapes(self.config)
# Construct all the torch modules
self.module_dict, self.loss_dict = instantiate_modules(
self.config.model.preprocessors,
self.config.model.embeddings,
self.config.model.encoders,
self.config.model.decoders,
self.config.model.losses,
)
self.tasks = create_tasks(self.config.tasks)
self.model = UnagiModel(tasks=self.tasks, module_dict=self.module_dict)
self.checkpoint_scheduler = self.config.learner.checkpoint_scheduler
# Set cudnn benchmark
cudnn.benchmark = True
def setup(self, stage=None):
pass
def forward(self, x_dict):
return self.model(x_dict)
def configure_callbacks(self):
checkpoint = pl.callbacks.ModelCheckpoint(**self.checkpoint_scheduler)
lr_monitor = pl.callbacks.LearningRateMonitor(logging_interval="step")
return [checkpoint, lr_monitor]
def _shared_step(self, batch, batch_idx, prefix="train"):
# TODO: make batch return less things in the schedulers
(
X_dict,
Y_dict,
) = batch
# Forward pass through the model(s)
output_dict = self.forward(X_dict)
# Compute loss
loss_per_task = {name: 0.0 for name, task in self.tasks.items()}
for name, task in self.tasks.items():
# Run all the losses for this task
for loss in task.losses.values():
# Pull out relevant attributes of `loss`
loss_module_name = loss["module"] # UID of the loss module
# loss_node = loss["node"] # UID of the loss node
# Grab the actual loss module
loss_module = self.loss_dict[loss_module_name]
# Gather all the inputs to the loss module
loss_inputs = process_inputs(
loss["inputs"], output_dict, Y_dict, task.task_flow
)
# Calculate the loss, add it to the task loss
loss_per_task[name] += loss_module(*loss_inputs) * loss["weight"]
# Add up all the task losses weighted by task_weight
loss = sum(
[
loss_per_task[name] * task.task_weight
for name, task in self.tasks.items()
]
)
# Compute metrics
metrics_per_task = {name: {} for name, task in self.tasks.items()}
for task_name, task in self.tasks.items():
# Run all the metrics for this task
for name, metric in task.metrics.items():
# Gather all the inputs to the metric module
metric_inputs = process_inputs(
metric["inputs"], output_dict, Y_dict, task.task_flow
)
# Calculate the metric, add to the task metric dict
metrics_per_task[task_name][name] = task.get_metric(
metric, *metric_inputs
)
# Compute torchmetrics
for task_name, task in self.tasks.items():
# Run all the metrics for this task
for name, torchmetric in task.torchmetrics.items():
# Gather all the inputs to the metric module
_ = process_inputs(
torchmetric["inputs"], output_dict, Y_dict, task.task_flow
)
# TODO: figure out what to do here (call .update on torchmetrics)
# Calculate the metric, add to the task metric dict
# metrics_per_task[task.name][metric['node']] =
# task.get_metric(metric, *metric_inputs)
# Run callbacks
for task_name, task in self.tasks.items():
for name, callback in task.callbacks.items():
callback_batch = {}
if self.current_epoch % callback["log_every_n_epochs"] == 0:
callback_batch["inputs"] = [
x.clone().detach().cpu()
if isinstance(x, torch.Tensor)
else torch.Tensor(x)
for x in process_inputs(
callback["inputs"],
output_dict,
Y_dict,
task.task_flow,
)
]
callback_batch["trainer"] = self
callback_batch["batch_idx"] = batch_idx
callback_batch["prefix"] = prefix
if prefix == "train":
task.all_callbacks[name].on_train_batch_end(callback_batch)
elif prefix in self.test_loader_names:
task.all_callbacks[name].on_test_batch_end(callback_batch)
else:
task.all_callbacks[name].on_validation_batch_end(callback_batch)
# Calculate all metrics and log
metrics = {
f"{name}_loss": loss_per_task[name] * task.task_weight
for name, task in self.tasks.items()
}
metrics["loss"] = loss
metrics.update(
{
f"{task}_{metric}": metric_val
for task, task_metrics in metrics_per_task.items()
for metric, metric_val in task_metrics.items()
}
)
"""task_accuracy = {}
for task_name, label_name in task_to_label_dict.items():
if (
task_name in self.model.loss_funcs
and self.model.loss_funcs[task_name] is not None
):
preds = F.softmax(
output_dict["classifier"][0],
dim=1,
)
target = Y_dict[label_name]
accuracy = torchmetrics.functional.accuracy(preds, target)
task_accuracy[f"accuracy_{task_name}"] = accuracy"""
# metrics.update(task_accuracy)
# metrics["batch_size"] = len(batch[0])
metrics = {f"{prefix}/{k}": v.detach() for k, v in metrics.items()}
metrics["loss"] = loss
self.log_dict(
metrics,
on_epoch=True,
on_step=True,
sync_dist=True,
add_dataloader_idx=False,
)
# return {"loss": loss, "metrics": metrics, "callback_values": callback_values}
# return metrics
return metrics
def training_step(self, batch, batch_idx):
return self._shared_step(batch, batch_idx, "train")
def validation_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, self.val_loader_names[dataloader_idx]
)
def test_step(self, batch, batch_idx, dataloader_idx=0):
return self._shared_step(
batch, batch_idx, self.test_loader_names[dataloader_idx]
)
def training_epoch_end(self, outputs):
for task_name, task in self.tasks.items():
for name, callback in task.callbacks.items():
if self.current_epoch % callback["log_every_n_epochs"] == 0:
task.all_callbacks[name].on_train_epoch_end()
# return torch.stack([torch.stack(x).sum() for x in outputs]).mean()
# return outputs
def validation_epoch_end(self, outputs):
for task_name, task in self.tasks.items():
for name, callback in task.callbacks.items():
if self.current_epoch % callback["log_every_n_epochs"] == 0:
task.all_callbacks[name].on_validation_epoch_end()
task.all_callbacks[name].on_test_epoch_end()
# val_loss = torch.stack([torch.stack(x).sum() for x in outputs]).mean()
# return val_loss
# return outputs
def test_epoch_end(self, outputs):
for task_name, task in self.tasks.items():
for name, callback in task.callbacks.items():
if self.current_epoch % callback["log_every_n_epochs"] == 0:
task.all_callbacks[name].on_validation_epoch_end()
task.all_callbacks[name].on_test_epoch_end()
# return torch.stack([torch.stack(x).sum() for x in outputs]).mean()
# return outputs
def _eval_dataloaders(self):
val_loaders = (
list(self.val_dataloaders.values())
if isinstance(self.val_dataloaders, dict)
else [self.val_dataloaders]
)
test_loaders = (
list(self.test_dataloaders.values())
if isinstance(self.test_dataloaders, dict)
else [self.test_dataloaders]
)
return val_loaders + test_loaders
def train_dataloader(self):
return self.train_dataloaders
def val_dataloader(self):
return self._eval_dataloaders()
# return self.task_scheduler.get_batches(self.val_dataloaders, self.model)
def test_dataloader(self):
return self._eval_dataloaders()
# return self.task_scheduler.get_batches(self.test_dataloaders, self.model)
def configure_optimizers(self):
# Optimizer params
optimizer_config = self.config.learner.optimizer
scheduler_config = self.config.learner.scheduler
modules_to_freeze = self.config.learner.modules_to_freeze
all_parameters = []
# All parameters in the model
for param_name, param_values in self.named_parameters():
# freeze_param = False
for module_name in modules_to_freeze:
if module_name in param_name:
# freeze_param = True
param_values.requires_grad = False
# if not freeze_param:
all_parameters.append(param_values)
# General parameters don't contain the special _optim key
params = [p for p in all_parameters if not hasattr(p, "_optim")]
# Create an optimizer with the general parameters
assert optimizer_config._target_ is not None, "No optimizer target specified"
# optimizer = hydra.utils.instantiate(optimizer_config, params=params)
optimizer = hydra.utils.instantiate(
optimizer_config,
params=filter(lambda p: p.requires_grad, self.model.parameters()),
)
# Add parameters with special hyperparameters
hps = [getattr(p, "_optim") for p in all_parameters if hasattr(p, "_optim")]
hps = [
dict(s) for s in set(frozenset(hp.items()) for hp in hps)
] # Unique dicts
for hp in hps:
params = [p for p in all_parameters if getattr(p, "_optim", None) == hp]
optimizer.add_param_group({"params": params, **hp})
# Create a lr scheduler
scheduler = hydra.utils.instantiate(scheduler_config, optimizer=optimizer)
scheduler = {
"scheduler": scheduler,
"interval": self.config.learner.interval,
"monitor": self.config.learner.monitor,
"name": self.config.learner.name,
}
# Print optimizer info
keys = sorted(set([k for hp in hps for k in hp.keys()]))
for i, g in enumerate(optimizer.param_groups):
group_hps = {k: g.get(k, None) for k in keys}
print(
" | ".join(
[
f"Optimizer group {i}",
f"{len(g['params'])} tensors",
]
+ [f"{k} {v}" for k, v in group_hps.items()]
)
)
return [optimizer], [scheduler]
|
thanos-code-main
|
unagi/trainer/trainer.py
|
""" Unagi DataLoader and default collate fn
code inspiration: emmental
"""
import copy
import logging
from collections import defaultdict
from functools import partial
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
from torch import Tensor
from torch.utils.data import DataLoader
from unagi.datasets.base_dataset import UnagiDataset
from unagi.utils.misc import list_to_tensor
logger = logging.getLogger(__name__)
def default_unagi_collate_fn(
batch: Union[List[Tuple[Dict[str, Any], Dict[str, Tensor]]], List[Dict[str, Any]]],
) -> Union[Tuple[Dict[str, Any], Dict[str, Tensor]], Dict[str, Any]]:
"""Collate function.
Args:
batch: The batch to collate.
min_data_len: The minimal data sequence length, defaults to 0.
max_data_len: The maximal data sequence length (0 means no limit), defaults to 0.
Returns:
The collated batch.
"""
X_batch: defaultdict = defaultdict(list)
Y_batch: defaultdict = defaultdict(list)
for item in batch:
# Check if batch is (x_dict, y_dict) pair
if isinstance(item, dict):
x_dict = item
y_dict: Dict[str, Any] = dict()
else:
x_dict, y_dict = item
for field_name, value in x_dict.items():
if isinstance(value, list):
X_batch[field_name] += value
else:
X_batch[field_name].append(value)
for label_name, value in y_dict.items():
if isinstance(value, list):
Y_batch[label_name] += value
else:
Y_batch[label_name].append(value)
field_names = copy.deepcopy(list(X_batch.keys()))
for field_name in field_names:
values = X_batch[field_name]
# Only merge list of tensors
if isinstance(values[0], Tensor):
item_tensor, item_mask_tensor = list_to_tensor(
values,
)
X_batch[field_name] = item_tensor
if item_mask_tensor is not None:
X_batch[f"{field_name}_mask"] = item_mask_tensor
for label_name, values in Y_batch.items():
Y_batch[label_name] = list_to_tensor(
values,
)[0]
if len(Y_batch) != 0:
return dict(X_batch), dict(Y_batch)
else:
return dict(X_batch)
# class UnagiDataLoader(DataLoader):
# """UnagiDataLoader
# An advanced dataloader class which contains mapping from task to label (which
# label(s) to use in dataset's Y_dict for this task), and split (which part this
# dataset belongs to) information.
# Args:
# task_to_label_dict: The task to label mapping where key is the task name
# and value is the label(s) for that task and should be the key in Y_dict.
# dataset: The dataset to construct the dataloader
# split: The split information, defaults to "train".
# collate_fn: The function that merges a list of samples to
# form a mini-batch, defaults to emmental_collate_fn.
# n_batches: Total number of batches.
# **Kwargs: Other arguments of dataloader.
# """
# def __init__(
# self,
# dataset: UnagiDataset,
# split: str = "train",
# collate_fn: Optional[Callable] = None,
# # n_batches: Optional[int] = None,
# **kwargs: Any,
# ) -> None:
# """Initialize UnagiDataLoader."""
# assert isinstance(
# dataset, UnagiDataset
# ), "dataset should inherent from UnagiDataset."
# if collate_fn is None:
# collate_fn = partial(default_unagi_collate_fn)
# super().__init__(dataset, collate_fn=collate_fn, **kwargs)
# # self.data_name = dataset._name_
# # self.uid = dataset.uid
# self.split = split
# # self.n_batches = n_batches
|
thanos-code-main
|
unagi/trainer/data.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
from functools import partial
from pathlib import Path
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import scipy
import seaborn as sns
from matplotlib.ticker import FuncFormatter
from sklearn.preprocessing import minmax_scale
import hydra
from dib.transformers.ib.helpers import CORR_GROUPS
from omegaconf import OmegaConf
from utils.evaluate import load_histories, load_results
from utils.helpers import (
SFFX_TOAGG,
aggregate_table,
replace_None_with_all,
rm_const_col,
update_prepending,
)
from utils.visualize.helpers import kwargs_log_xscale
logger = logging.getLogger(__name__)
class StrFormatter:
"""Defult dictionary that takes a key dependent function.
Parameters
----------
exact_match : dict, optional
Dictionary of strings that will be replaced by exact match.
subtring_replace : dict, optional
Dictionary of substring that will be replaced if no exact_match. Order matters.
Everything is title case at this point. None gets mapped to "".
to_upper : list, optional
Words to upper case.
"""
def __init__(self, exact_match={}, subtring_replace={}, to_upper=[]):
self.exact_match = exact_match
self.subtring_replace = subtring_replace
self.to_upper = to_upper
def __getitem__(self, key):
if not isinstance(key, str):
return key
if key in self.exact_match:
return self.exact_match[key]
key = key.title()
for match, replace in self.subtring_replace.items():
if replace is None:
replace = ""
key = key.replace(match, replace)
for w in self.to_upper:
key = key.replace(w, w.upper())
return key
def update(self, new_dict):
"""Update the substring replacer dictionary with a new one (missing keys will be prepended)."""
self.subtring_replace = update_prepending(
self.subtring_replace, new_dict)
PRETTY_RENAMER = StrFormatter(
exact_match={
f"train_H_lin_xCz{SFFX_TOAGG}": r"$\mathrm{H}_{\mathcal{V}}[\mathrm{X}|\mathrm{Z}]$ Linear",
f"train_H_q_xCz{SFFX_TOAGG}": r"$\mathrm{H}_{\mathcal{V}}[\mathrm{X}|\mathrm{Z}]$ Z-64-Y (Clf)",
f"train_H_hid16_xCz{SFFX_TOAGG}": r"$\mathrm{H}_{\mathcal{V}}[\mathrm{X}|\mathrm{Z}]$ Z-1024-Y",
f"train_path_norm{SFFX_TOAGG}": "Path Norm",
f"train_var_grad{SFFX_TOAGG}": "Final Grad. Var.",
f"train_y_pred_ent{SFFX_TOAGG}": "Entropy",
f"train_sharp_mag{SFFX_TOAGG}": "Sharp. Mag.",
f"path_norm": "Path Norm",
f"var_grad": "Final Grad. Var.",
f"y_pred_ent": "Entropy",
f"sharp_mag": "Sharp. Mag.",
"wdecay": "Weight Decay",
"resnet": "Depth",
"b": "Batch Size",
"b8": 8,
"b16": 16,
"b32": 32,
"b64": 64,
"b128": 128,
"b256": 256,
"b512": 512,
"b1024": 1024,
"b2048": 2048,
"b4096": 4096,
},
subtring_replace={
SFFX_TOAGG.title(): "",
"_": " ",
"nodist": "",
"Zdim": "Z Dim.",
"Q Family": r"$\mathcal{V}$",
"H Acc": "Head Acc",
"Model": "Objective",
"D Diq Xz Space": r"$\mathrm{I}_{\mathcal{V}}[\mathrm{Z} \rightarrow \mathrm{X} ]$"
+ "\n"
+ r"$- \mathrm{I}_{\mathcal{V}}[\mathrm{Z} \rightarrow \mathrm{Y}]$",
"D Diq Xz": r"$\mathrm{I}_{\mathcal{V}}[\mathrm{Z} \rightarrow \mathrm{Dec(X,Y)}]$",
"Diq Xz": r"$\mathrm{I}_{\mathcal{V}}[\mathrm{Z} \rightarrow \mathrm{Dec(X,Y)}]$",
"Diq Xzcy": r"$\mathrm{I}_{\mathcal{V}}[\mathrm{Z} \rightarrow \mathrm{Dec(X,Y)} ]$",
"D H Q Xcz": r"$\frac{1}{N} \sum_{\mathrm{N}_i} \mathrm{H}_{\mathcal{V}}[\mathrm{N}_i|\mathrm{Z}] $",
"I ": r"$\mathrm{I}$",
"Diq ": r"$\mathrm{I}_{\mathcal{V}}$",
"H Qp ": r"$\mathrm{H}_{\mathcal{V}^+}$",
"H Qm ": r"$\mathrm{H}_{\mathcal{V}^-}$",
"H Q Bob": r"$\mathrm{H}_{\mathcal{V}_{Bob}}$",
"H Q Alice": r"$\mathrm{H}_{\mathcal{V}_{Alice}}$",
"H Q ": r"$\mathrm{H}_{\mathcal{V}}$",
"H ": r"$\mathrm{H}$",
"Xczy": r"$[\mathrm{X}|\mathrm{Z},\mathrm{Y}]$",
"Xcz": r"$[\mathrm{X}|\mathrm{Z}]$",
"Xzcy": r"$[\mathrm{X} \rightarrow \mathrm{Z} | \mathrm{Y}]$",
"Ycz": r"$[\mathrm{Y}|\mathrm{Z}]$",
"Yz": r"$[\mathrm{Z} \rightarrow \mathrm{Y}]$",
"Xz": r"$[\mathrm{Z} \rightarrow \mathrm{X}]$",
# when it is something like H_Q[X|Z] don't put train because we will only look at train
"Train $": "$",
"Q Zy": r"$\mathcal{V}_{Bob}$",
"Q Zx": r"$\mathcal{V}_{Bob Adv.}$",
"Clf ": r"$\mathcal{V}_{Alice}$ ",
"Beta": r"$\beta$",
"Star": r"$*$",
"Loglike": "Log Like.",
"Resnet": "ResNet",
"Dibsameidcs": "Fixed Indexing",
"Dibrand": "Rand. DIB",
"Cdibexact": "Cond. DIB",
"Cdibapprox": "Concat. CDIB",
"Cdib": r"$\Delta$ DIB",
"higher": " Unrolled",
"Accuracy": "Acc.",
"Acc": "Acc.",
"Perc.": r"$\%$",
" All": "",
"Nlay": "Depth",
"N Hidden Layers": "Depth",
"Nhid": "Width",
"Hidden Size": "Width",
"Minimax": "# Inner Optim. Steps",
"Kpru": "Non Zero Weights",
"K Prune": "Non Zero Weights",
"N. ": "# of ",
"Mchead": r"# Indexing of $\mathcal{X}$", # "# of Possible Labels", #
"..": ".",
},
to_upper=["Cifar10", "Cifar100", "Mnist",
"Svhn", "Cifar10Mnist", "Dib", "Vib", ],
)
class Aggregator:
"""Result aggregator.
Parameters
----------
save_dir : str
Where to save all results.
context_kwargs : dict, optional
Context arguments for plotting.
is_return_plots : bool, optional
Whether to return plots instead of saving them.
prfx : str, optional
Prefix for the filename to save.
pretty_renamer : dict, optional
Dictionary mapping string (keys) to human readable ones for nicer printing and plotting.
dpi : int, optional
Resolution of the figures
"""
def __init__(
self,
save_dir,
context_kwargs={"context": "talk"},
is_return_plots=False,
prfx="",
pretty_renamer=PRETTY_RENAMER,
dpi=300,
):
self.save_dir = save_dir
self.is_return_plots = is_return_plots
os.makedirs(self.save_dir, exist_ok=True)
sns.set_context(**context_kwargs)
self.tables = {}
self.table_names = {"results", "aux_trnsf", "histories"}
self.prfx = prfx
self.pretty_renamer = pretty_renamer
self.dpi = dpi
def recolt_data(
self,
pattern_results,
pattern_histories,
pattern_aux_trnsf,
metrics=["test_accuracy", "test_loglike",
"train_accuracy", "train_loglike"],
aux_trnsf=[
"test_I_xz",
"train_I_xz",
"train_diF_zy",
"test_diF_zy",
"train_acc",
"test_acc",
"train_loss",
"test_loss",
"train_loglike",
"test_loglike",
],
metric_column_name="{mode}_{metric}",
**kwargs,
):
"""Recolts all the data.
Parameters
----------
pattern_results : str
Pattern for globbing results.
pattern_histories: str
Pattern for globbing histories.
pattern_aux_trnsf: str
Pattern for globbing auxiliary losses of the transformer.
metrics : list of str, optional
Metrics to aggregate
aux_trnsf : list of str, optional
Auxiliary transformer to aggregate.
metric_column_name : str, optional
Name of the column containing the metric.
kwargs :
Additional arguments to `load_results` and `pattern_histories`.
"""
self.table_names = set()
if pattern_results is not None:
self.tables["results"] = load_results(
pattern=pattern_results,
metrics=metrics,
metric_column_name=metric_column_name,
**kwargs,
)
self.table_names.add("results")
if pattern_aux_trnsf is not None:
self.tables["aux_trnsf"] = load_results(
pattern=pattern_aux_trnsf,
metrics=aux_trnsf,
metric_column_name=metric_column_name,
**kwargs,
)
self.table_names.add("aux_trnsf")
if pattern_histories is not None:
self.tables["histories"] = load_histories(
pattern=pattern_histories, **kwargs
)
self.table_names.add("histories")
def load_data(self):
"""Load the pre-recolted data."""
for k in self.table_names.copy():
try:
self.tables[k] = pd.read_csv(
os.path.join(self.save_dir, self.prfx + f"{k}.csv")
)
except FileNotFoundError:
self.table_names.remove(k)
def prettify(self, table):
"""Make the name and values in a dataframe prettier / human readable."""
def renamer(x): return self.pretty_renamer[x]
table = table.rename(columns=renamer)
table = table.applymap(renamer)
return table
def prettify_kwargs(self, pretty_data, **kwargs):
"""Change the kwargs of plotting function sucxh that they can be used with `prettify(table)`."""
return {
# only prettify if part of the columns (not arguments to seaborn)
k: self.pretty_renamer[v]
if isinstance(v, str) and self.pretty_renamer[v] in pretty_data.columns
else v
for k, v in kwargs.items()
}
def subset(self, col_val):
"""Subset all tables by keeping only the given values in given columns.
Parameters
----------
col_val : dict
A dictionary where the keys are the columns to subset and values are a list of values to keep.
"""
for col, val in col_val.items():
logger.debug("Keeping only val={val} for col={col}.")
for k in self.table_names:
self.tables[k] = self.tables[k][(
self.tables[k][col]).isin(val)]
if self.tables[k].empty:
logger.info(f"Empty table after filtering {col}={val}")
def save_tables(self):
"""Save all tables to csv : one with no constant columns and one with all columns."""
for k, table in self.tables.items():
self._save_table(table, self.prfx + k)
self._save_table(aggregate_table(table), self.prfx + k + "_agg")
def _save_table(self, table, file):
"""Save to csv a table with no constant columns and one with all columns."""
res_no_const = rm_const_col(table)
# add runs even if constant column
if "run" in table.columns:
res_no_const["run"] = table["run"]
else:
res_no_const["run_count"] = table["run_count"]
res_no_const.to_csv(
os.path.join(self.save_dir, file + "_noconst.csv"), index=False
)
table.to_csv(os.path.join(self.save_dir, file + ".csv"), index=False)
def plot_metrics(self, x, is_plot_gaps=False, is_lines=True, **kwargs):
"""Plot a lineplot for each metric
Parameters
----------
x : str
Column name of x axis.
is_plot_gaps : bool, optional
Whether to plot gaps (i.e. train_*-test_*) in addition to all aux_trnsf.
is_lines : bool, optional
Whether to plot lines instead of heatmaps.
kwargs :
Additional arguments to `_plot_lines` or `_plot_heatmaps`.
"""
# use _mean instead of SFFX_TOAGG because you had to aggregate for heatmaps
sffx_values = SFFX_TOAGG if is_lines else "_mean"
def gen_values_name(data):
for col in data.columns:
if not col.endswith(sffx_values):
continue
yield col, col.replace(sffx_values, "")
table = self.tables["results"]
if is_plot_gaps:
table = add_gaps(table.copy())
if is_lines:
dflt_kwargs = dict(markers=True, dashes=False)
dflt_kwargs.update(kwargs)
return self._plot_lines(gen_values_name, table, x, **dflt_kwargs)
else:
# has to remove kwargs that are only for lines
kwargs = {k: v for k, v in kwargs.items() if k not in [
"logbase_x"]}
# has to aggregate the tables for heatmap because cannot vary
table = aggregate_table(table)
return self._plot_heatmaps(gen_values_name, table, x, **kwargs)
def plot_aux_trnsf(self, x, is_plot_gaps=False, **kwargs):
"""Plot a lineplot for each data.
Parameters
----------
x : str
Column name of x axis.
is_plot_gaps : bool, optional
Whether to plot gaps (i.e. train_*-test_*) in addition to all aux_trnsf.
kwargs :
Additional arguments to `_plot_lines`.
"""
def gen_values_name(data):
for col in data.columns:
if not col.endswith(SFFX_TOAGG):
continue
yield col, col.replace(SFFX_TOAGG, "_trnsf")
dflt_kwargs = dict(markers=True, dashes=False)
dflt_kwargs.update(kwargs)
table = self.tables["aux_trnsf"]
if is_plot_gaps:
table = add_gaps(table.copy())
return self._plot_lines(gen_values_name, table, x, **dflt_kwargs)
def plot_histories(self, **kwargs):
"""Plot all the values in the history, for each dataset."""
def gen_values_name(data):
for col in data.columns:
if not col.endswith(SFFX_TOAGG):
continue
yield col, "epochs_" + col.replace(SFFX_TOAGG, "")
# by default don't add marker because many epochs => becomes hard to read
kwargs["marker"] = kwargs.get("marker", ",")
return self._plot_lines(
gen_values_name, self.tables["histories"], "epochs", **kwargs
)
def plot_superpose(
self,
x,
to_superpose,
value_name,
filename="superposed_{value_name}",
is_trnsf=True,
is_legend_out=False,
is_no_legend_title=True,
**kwargs,
):
"""Plot a single line figure with multiple lineplots.
Parameters
----------
x : str
Column name of x axis.
to_superpose : list of str or distionary
List of column values that should be plotted on the figure. If dictionary, then the keys
correspond to the columns to plot and the values correspond to how they should be called.
value_name : str
Name of the yaxis.
filename : str, optional
Name of the figure when saving. Can use {value_name} for interpolation.
is_trnsf : bool, optional
Whether to use `"aux_trnsf"` instead of `"results"` table.
kwargs :
Additional arguments to `_plot_lines`.
"""
def gen_values_name(data):
yield value_name, filename.format(value_name=value_name)
table = self.tables["aux_trnsf" if is_trnsf else "results"].copy()
try:
renamer = {(k + SFFX_TOAGG): v for k, v in to_superpose.items()}
to_superpose = to_superpose.keys()
except AttributeError:
renamer = {}
table = table.melt(
id_vars=[c for c in table if SFFX_TOAGG not in c],
value_vars=[to_sup + SFFX_TOAGG for to_sup in to_superpose],
value_name=value_name,
var_name="mode",
)
table["mode"] = table["mode"].replace(renamer)
kwargs["hue"] = "mode"
kwargs["markers"] = kwargs.get("markers", True)
return self._plot_lines(
gen_values_name,
table,
x,
is_legend_out=is_legend_out,
is_no_legend_title=is_no_legend_title,
**kwargs,
)
def plot_generalization(self, x, is_trnsf=True, **kwargs):
"""Plot the train and test loss/accuracy to see the generalization gap."""
acc = "acc" if is_trnsf else "accuracy"
loss = "loglike"
outs = []
for metric in [acc, loss]:
outs.append(
self.plot_superpose(
x,
{f"train_{metric}": "train", f"test_{metric}": "test"},
metric,
filename="gen_{value_name}",
is_trnsf=is_trnsf,
**kwargs,
)
)
if any(out is not None for out in outs):
return outs
def correlation_experiment(
self,
cause,
correlation_groups=CORR_GROUPS,
logbase_x=1,
col_sep_plots=None, # column for which to plot different correlation plot
xticks=None,
xticklabels=None,
thresholds={"loglike": -0.02, "accuracy": 0.995},
standard_probs=["path_norm", "var_grad", "y_pred_ent", "sharp_mag"],
**kwargs,
):
"""
Results for the correlation experiments. It will make a plot showing side by side the
generalization gap and H_q[X|Z]. It will also make a file correlation_[acc|loglike].csv
containing all the correlation measures.
Note
----
- Tables should have been previously saved. If the `results` table are not saved
in `save_dir` they will be searched for in the parent directory.
- `thresholds` only selects model that reach a certain training performance on th given
metrics.
"""
figs = []
old_cause = cause
self.load_data()
# Load result from parent dir if not previously found
if "results" not in self.tables:
logger.info(
f"`results.csv` not found in {self.save_dir} looking in parent dir."
)
save_dir = self.save_dir
self.save_dir = str(Path(save_dir).parent)
self.tables["results"] = pd.read_csv(
os.path.join(self.save_dir, self.prfx + f"results.csv")
)
self.save_dir = save_dir
# self.subset(dict(epochs=["best"])) # only take the best model (not the last)
for metric in ["loglike", "accuracy"]:
cause = old_cause
# keep only the columns to plot from results (the gap acc|loglike and probs)
results = add_gaps(self.tables["results"])
if metric in thresholds:
n_all = len(results)
to_keep = results[f"train_{metric}_toagg"] > thresholds[metric]
results = results[to_keep]
n_dropped = n_all - len(results)
perc_dropped = n_dropped / n_all
logger.info(
f"dropped {n_dropped} (perc {perc_dropped}) because smaller than threshold "
)
col_probes = [
f"train_{probe}{SFFX_TOAGG}" for probe in standard_probs]
results = results.drop(
columns=[
c
for c in results.columns
if SFFX_TOAGG in c
and c not in [f"gap_{metric}{SFFX_TOAGG}"]
and c not in col_probes # previous litterature probes
and "H_Q" not in c
and "_xCz" not in c
]
)
if results[cause].dtype == "object":
# if the column of interest contains strings then cannot compute correlation.
# besides if ther are some numeric values as suffix of the string. E.g. resnet18,
# resnet50 will be transformed as 18,50 with cause=="resnet"
not_numbers = results[cause].apply(
lambda x: split_alpha_numeric(x)[0])
unique_not_number = not_numbers.unique()
if len(unique_not_number) > 1:
raise ValueError(
f"`cause`={cause} is a string AND contains multiple different prefixes {unique_not_number}"
)
results[cause] = results[cause].apply(
lambda x: split_alpha_numeric(x)[1]
)
results = results.rename(columns={cause: unique_not_number[0]})
cause = unique_not_number[0]
col_H_q_xCz = [
c for c in results.columns if (SFFX_TOAGG in c) and "H_Q" in c
]
# all the correlation probes
all_probes = col_H_q_xCz + col_probes
table = pd.melt(
results,
id_vars=[c for c in results.columns if c not in all_probes],
value_vars=all_probes,
var_name="Probe Type",
value_name="Probe Value",
)
table = self.prettify(table)
results = self.prettify(results)
# KENDAL CORRELATION
arrays = dict(
corr=results.corr(method="kendall"),
corr_pval=results.corr(
method=lambda x, y: scipy.stats.kendalltau(x, y)[1]
),
)
for k in arrays.keys():
arrays[k] = arrays[k][self.pretty_renamer[f"gap_{metric}{SFFX_TOAGG}"]]
arrays[k] = arrays[k].rename(
index={self.pretty_renamer[cause]: "Cause"}
)
arrays[k] = arrays[k][
["Cause"] + [self.pretty_renamer[probe]
for probe in all_probes]
]
arrays[k]["Varying"] = self.pretty_renamer[cause]
arrays[k].to_csv(
os.path.join(self.save_dir, f"{metric}_{k}.csv"), header=False
)
# PLOTTING
sep_plots = (
table[self.pretty_renamer[col_sep_plots]].unique()
if col_sep_plots is not None
else [None]
)
old_table = table.copy()
for batch_probes in [[c] for c in standard_probs] + [col_H_q_xCz]:
table = old_table.copy()
for sep in sep_plots:
fig, axes = plt.subplots(
2,
len(table[self.pretty_renamer["data"]].unique()),
sharex=True,
figsize=(17, 9),
)
for i, data in enumerate(
table[self.pretty_renamer["data"]].unique()
):
axes[0, i].set_title(data.upper())
if col_sep_plots is not None:
table_subset = table[
table[self.pretty_renamer[col_sep_plots]] == sep
]
else:
table_subset = table
# only plot the proposed probes H_Q[X|Z]
table_subset = table_subset[
table_subset["Probe Type"].isin(
[self.pretty_renamer[c] for c in batch_probes]
)
]
table_subset = table_subset[
table_subset[self.pretty_renamer["data"]] == data
]
table_subset = table_subset.dropna(
how="any", subset=["Probe Value"]
)
if logbase_x != 1:
plt.xscale("symlog")
sns.lineplot(
data=table_subset,
x=self.pretty_renamer[cause],
y=self.pretty_renamer[f"gap_{metric}{SFFX_TOAGG}"],
ax=axes[0, i],
**kwargs,
)
sns.lineplot(
data=table_subset,
x=self.pretty_renamer[cause],
y="Probe Value",
style="Probe Type",
hue="Probe Type",
ax=axes[1, i],
**kwargs,
)
if xticks is not None:
axes[0, i].set_xticks(
list(range(len(xticks))), xticks)
axes[1, i].set_xticks(
list(range(len(xticks))), xticks)
if xticklabels is not None:
axes[1, i].set_xticklabels(xticklabels)
if self.is_return_plots:
figs.append(fig)
else:
sffx = f"_{sep}" if col_sep_plots is not None else ""
sffx += f"_{batch_probes[0]}"
fig.savefig(
os.path.join(
self.save_dir, f"{self.prfx}corr_{metric}{sffx}.png"
),
dpi=self.dpi,
)
plt.close(fig)
if self.is_return_plots:
return figs
def _plot_lines(
self,
gen_values_name,
data,
x,
folder_col=None,
row="data",
is_merge_data_size=True,
transformer_data="identity",
logbase_x=1,
xticks=None,
xticklabels=None,
is_legend_out=True,
is_no_legend_title=False,
set_kwargs={},
x_rotate=0,
cols_vary_only=["run"],
sharey=False,
**kwargs,
):
"""Lines plots.
Parameters
----------
gen_values_name : generator
Generates 2 string, the column of y axis and the filename to save the plot.
data : pd.DataFrame
Dataframe used for plotting.
x : str
Column name of x axis.
folder_col : str, optional
Name of a column tha will be used to separate the plot into multiple subfolders.
row : str, optional
Column name of rows.
is_merge_data_size : bool, optional
Whether to merge the "data" and "data_size".
transformer_data : callable or {"identity", "normalize_by_x_axis", "fillin_None_x_axis", "replace_None_zero"}, optional
Transform the data given the name of the columns seaborn will condition on.
logbase_x : int, optional
Base of the x axis. If 1 no logscale. if `None` will automatically chose.
xticks : list of int, optional
Set manually x ticks.
xticklabels : list of str or int, optional
Set manually x ticks labels.
is_legend_out : bool, optional
Whether to put the legend outside of the figure.
is_no_legend_title : bool, optional
Whether to remove the legend title. If `is_legend_out` then will actually duplicate the
legend :/, the best in that case is to remove the test of the legend column .
set_kwargs : dict, optional
Additional arguments to `FacetGrid.set`. E.g. dict(xlim=(0,None)).
x_rotate : int, optional
By how much to rotate the x labels.
cols_vary_only : list of str, optional
Name of the columns that can vary when plotting (i.e over which to compute bootstrap CI).
sharey : bool, optional
Wether to share y axis.
kwargs :
Additional arguemnts to `sns.relplot`.
"""
if is_merge_data_size:
data = data.copy()
data["data"] = data[["data", "datasize"]].apply(
lambda x: "_".join(x), axis=1
)
dflt_kwargs = dict(
legend="full",
row=row,
kind="line",
facet_kws={"sharey": sharey, "sharex": True,
"legend_out": is_legend_out},
hue=kwargs.get("style", None),
)
dflt_kwargs.update(kwargs)
dflt_kwargs["x"] = x
dflt_kwargs["marker"] = dflt_kwargs.get("marker", "X")
def _helper_plot_lines(data, save_dir):
sns_plots = []
data = get_transformer_data(transformer_data)(
data, get_col_kwargs(data, **dflt_kwargs)
)
for y, filename in gen_values_name(data):
if data[y].isna().all():
logger.info(f"Skipping {filename} because all nan.")
continue
_assert_sns_vary_only_cols(data, dflt_kwargs, cols_vary_only)
# replace `None` with "None" for string columns such that can see those
data = data.copy()
str_col = data.select_dtypes(include=object).columns
data[str_col] = data[str_col].fillna(value="None")
pretty_data = self.prettify(data)
pretty_kwargs = self.prettify_kwargs(
pretty_data, y=y, **dflt_kwargs)
sns_plot = sns.relplot(data=pretty_data, **pretty_kwargs)
if x_rotate != 0:
# calling directly `set_xticklabels` on FacetGrid removes the labels sometimes
for axes in sns_plot.axes.flat:
axes.set_xticklabels(
axes.get_xticklabels(), rotation=x_rotate)
if logbase_x != 1:
x_data = np.array(
sorted(pretty_data[pretty_kwargs["x"]].unique()))
plt.xscale(**kwargs_log_xscale(x_data, base=logbase_x))
if is_no_legend_title:
#! not going to work well if is_legend_out (double legend)
for ax in sns_plot.fig.axes:
handles, labels = ax.get_legend_handles_labels()
if len(handles) > 1:
ax.legend(handles=handles[1:], labels=labels[1:])
if xticks is not None:
sns_plot.set(xticks=xticks)
if xticklabels is not None:
sns_plot.set(xticklabels=xticklabels)
if xticks[0] > xticks[1]:
# dirty check to see if should reverse
for ax in sns_plot.axes.reshape(-1):
ax.invert_xaxis()
sns_plot.set(**set_kwargs)
if self.is_return_plots:
sns_plots.append(sns_plot)
else:
sns_plot.fig.savefig(
os.path.join(save_dir, f"{self.prfx}{filename}.png"),
dpi=self.dpi,
)
plt.close(sns_plot.fig)
if self.is_return_plots:
return sns_plots
return self._foldersplit_call(_helper_plot_lines, folder_col, data)
def _foldersplit_call(self, fn, folder_col, data):
"""Split the dataset by the values in folder_col a nd call fn on each subfolder."""
if folder_col is None:
return fn(data, self.save_dir)
else:
out = []
for curr_folder in data[folder_col].unique():
curr_data = data[data[folder_col] == curr_folder]
sub_dir = os.path.join(
self.save_dir, f"{folder_col}_{curr_folder}")
os.makedirs(sub_dir, exist_ok=True)
out.append(fn(curr_data, sub_dir))
return out
def _plot_heatmaps(
self,
gen_values_name,
data,
x,
y,
col=None,
folder_col=None,
row="data",
is_merge_data_size=True,
transformer_data="identity",
normalize=None,
is_percentage=False,
cbar_label=None,
**kwargs,
):
"""Lines plots.
Parameters
----------
gen_values_name : generator
Generates 2 string, the column of values axis and the filename to save the plot.
data : pd.DataFrame
Dataframe used for plotting.
x : str
Column name of x axis of heatmaps.
y : str
Column name of y axis of heatmaps.
col : str, optional
Column name of columns.
row : str, optional
Column name of rows.
folder_col : str, optional
Name of a column tha will be used to separate the plot into multiple subfolders.
is_merge_data_size : bool, optional
Whether to merge the "data" and "data_size".
normalize : ["row","col",None], optional
Whether to normalize the values by row (single 1 per row), by column (single 1 per col)
or not to.
is_percentage : bool, optional
Whether to use percentage for the annotation of the heatmap.
cbar_label : str, optional
Name for the colorbar.
kwargs :
Additional arguments to `sns.heatmap`.
"""
if is_merge_data_size:
data = data.copy()
data["data"] = data[["data", "datasize"]].apply(
lambda x: "_".join(x), axis=1
)
def fmt(x, pos): return "{:.0%}".format(x)
dflt_kwargs = dict(annot=True, linewidths=0.5)
if is_percentage:
dflt_kwargs.update(
dict(fmt=".0%", cbar_kws={"format": FuncFormatter(fmt)}))
if cbar_label is not None:
dflt_kwargs["cbar_kws"] = {
"label": self.pretty_renamer[cbar_label]}
dflt_kwargs.update(kwargs)
def _draw_heatmap(x, y, values, **kwargs):
data = kwargs.pop("data")
d = data.pivot(index=y, columns=x, values=values)
if normalize is None:
pass
elif normalize == "row":
d = pd.DataFrame(
minmax_scale(d.values, axis=1), columns=d.columns, index=d.index
)
elif normalize == "col":
d = pd.DataFrame(
minmax_scale(d.values, axis=0), columns=d.columns, index=d.index
)
else:
raise ValueError(f"Unkown normalize={normalize}")
ax = sns.heatmap(d, **kwargs)
ax.invert_yaxis()
for label in ax.get_yticklabels():
label.set_rotation(0)
# ax.set_yticklabels(ax.get_yticklabels(), rotation=90)
def _helper_plot_heatmaps(data, save_dir):
"""Plot the results as heatmaps."""
sns_plots = []
data = get_transformer_data(transformer_data)(
data,
get_col_kwargs(data, **dict(row=row, col=col,
x=x, y=y, **dflt_kwargs)),
)
for values, filename in gen_values_name(data):
if data[values].isna().all():
logger.info(f"Skipping {filename} because all nan.")
continue
pretty_data = self.prettify(data)
pretty_kwargs = self.prettify_kwargs(
pretty_data, **dflt_kwargs)
sns_plot = sns.FacetGrid(
pretty_data,
row=self.pretty_renamer[row],
col=self.pretty_renamer[col],
dropna=False,
sharex=True,
sharey=True,
aspect=1.5,
height=6,
)
sns_plot.map_dataframe(
_draw_heatmap,
self.pretty_renamer[x],
self.pretty_renamer[y],
values=self.pretty_renamer[values],
**pretty_kwargs,
)
sns_plot.fig.tight_layout()
if self.is_return_plots:
logger.info(f"heatmap for {values}")
sns_plots.append(sns_plot)
else:
sns_plot.fig.savefig(
os.path.join(save_dir, f"{self.prfx}{filename}.png"),
dpi=self.dpi,
)
plt.close(sns_plot.fig)
if self.is_return_plots:
return sns_plots
return self._foldersplit_call(_helper_plot_heatmaps, folder_col, data)
@hydra.main(config_path="conf/aggregate.yaml", strict=False)
def main_cli(args):
return main(args)
def main(args):
logger.info(f"Aggregating {args.experiment} ...")
PRETTY_RENAMER.update(args.kwargs.pop("pretty_renamer"))
aggregator = Aggregator(pretty_renamer=PRETTY_RENAMER, **args.kwargs)
if args.is_recolt:
logger.info(f"Recolting the data ..")
# Omega conf dictionaries don't have all the properties that usual dictionaries have
aggregator.recolt_data(
**OmegaConf.to_container(args.recolt_data, resolve=True))
else:
logger.info(f"Loading previously recolted data ..")
aggregator.load_data()
aggregator.subset(OmegaConf.to_container(
args.col_val_subset, resolve=True))
for f in args.mode:
logger.info(f"Mode {f} ...")
if f is None:
continue
if f in args:
kwargs = args[f]
else:
kwargs = {}
getattr(aggregator, f)(**kwargs)
# HELPERS
def add_gaps(df):
"""Add train-test gaps to dataframe."""
for col in df.columns:
if col.startswith("train_") and col.replace("train_", "test_") in df.columns:
# gap = max(train - test, 0)
gap_col = col.replace("train_", "gap_")
df[gap_col] = df[col] - df[col.replace("train_", "test_")]
df.loc[df[gap_col] < 0, gap_col] = 0
return df
def group_normalize_by_max(x, subgroup_col):
"""
Add a column `normalizer_{col}` for every `_toagg` columns wich contains the maximum of the means of
`subgroup_col`.
"""
means_xs = x.groupby(
[subgroup_col]
).mean() # average over runs for all subgroup_col
for col in x.columns:
if SFFX_TOAGG in col:
# normalizer is teh maximum average
normalizer = means_xs[col].max()
x[f"normalizer_{col}"] = normalizer
return x
def split_alpha_numeric(s):
"""Take a string containing letters followed by numbers and spli them."""
not_numbers = s.rstrip("0123456789")
numbers = s[len(not_numbers):]
return not_numbers, int(numbers)
def normalize_by_x_axis(data, col_kwargs):
"""
Prepares the data by normalizing by the maximum (average over runs) style at every point of the
x axis before plotting on seaborn. Return the normalized data in %{col}.
"""
# normalizer will be different for all seaborn plots => compute normalizer for each group separately
col_groupby = [
v
for k, v in col_kwargs.items()
if v != col_kwargs["hue"] and SFFX_TOAGG not in v
]
# compute the normalizers
df = data.groupby(col_groupby).apply(
partial(group_normalize_by_max, subgroup_col=col_kwargs["hue"])
)
# apply the normalization
for col in data.columns:
if "_toagg" in col and "normalizer" not in col:
df[f"%{col}"] = df[col] / df[f"normalizer_{col}"]
df = df[[col for col in df.columns if "normalizer" not in col]]
return df
def fillin_None_x_axis(data, col_kwargs):
"""Prepares the data by removing the NaN in values of X axis by duplicatign the entry where x
axis is all unique value. I.e enable the plloting of a lineplot which does not vary the x_axis
as a straight horizaontal line.
"""
return replace_None_with_all(data, col_kwargs["x"])
def replace_None_zero(data, col_kwargs):
"""Replace all missing values with 0."""
return data.fillna(0)
def get_transformer_data(transformer_data):
if isinstance(transformer_data, str):
if transformer_data == "normalize_by_x_axis":
return normalize_by_x_axis
elif transformer_data == "fillin_None_x_axis":
return fillin_None_x_axis
elif transformer_data == "replace_None_zero":
return replace_None_zero
elif transformer_data == "identity":
return lambda data, col_kwargs: data
else:
raise ValueError(f"Unkown transformer_data={transformer_data}")
return transformer_data
def get_col_kwargs(data, **kwargs):
"""Return all arguments that are names of the columns of the data."""
return {
n: col
for n, col in kwargs.items()
if (isinstance(col, str) and col in data.columns)
}
def _assert_sns_vary_only_cols(data, kwargs, cols_vary_only):
"""
Make sure that the only columns that has not been conditioned over for plotting and has non
unique values are in `cols_vary_only`. `disregard_col` are columns that can also
"""
return
conditioned_df = data
for col in get_col_kwargs(data, **kwargs).values():
first_unique = conditioned_df[col].dropna().unique()[0]
conditioned_df = conditioned_df[conditioned_df[col] == first_unique]
# make sure only `col_vary_only` varies
if len(conditioned_df[cols_vary_only].drop_duplicates()) != len(conditioned_df):
conditioned_df = conditioned_df[
conditioned_df[cols_vary_only[0]]
== conditioned_df[cols_vary_only[0]].dropna().unique()[0]
]
varying_columns = []
for col in conditioned_df.columns:
if len(conditioned_df[col].unique()) > 1 and col not in cols_vary_only:
varying_columns.append(col)
raise ValueError(
f"Not only varying {cols_vary_only}. At least one of the following varies {varying_columns}."
)
if __name__ == "__main__":
main_cli()
|
decodable_information_bottleneck-main
|
aggregate.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
Script to use to change directory structure in `tmp_results/*` in case you change the default
directory structure (i.e. you change `hyperparameters` in hyperparameters.
Use `python add_hyperparam.py experiment=...` to update the directory structure of a given experiment.
If you don't provide the experiment, it will update everything.
"""
import glob
import logging
import os
import shutil
import hydra
from omegaconf import OmegaConf
from utils.helpers import format_container, hyperparam_to_path
logger = logging.getLogger(__name__)
@hydra.main(config_path="conf/config.yaml")
def add_hyperparam(args):
"""Function that renames results from `experiment` in case you added a new hyperparameter to save.
It can also rename all results by setting `experiment=*`.
"""
dflt_hyperparameters = OmegaConf.to_container(
args.hyperparameters, resolve=True)
subfolders = glob.glob(
os.path.join(args.paths.base_dir,
f"tmp_results/{args.experiment}/**/run_*/"),
recursive=True,
)
for subfolder in subfolders:
relative_dir = subfolder.split("tmp_results")[-1][1:-1]
args.experiment = relative_dir.split("/")[0]
args.trnsf_experiment = args.experiment
hyperparam = {
"_".join(group.split("_")[:-1]): group.split("_")[-1]
for group in relative_dir[len(args.experiment) + 1:].split("/")
}
curr_dflt_hyperparameters = dflt_hyperparameters.copy()
curr_dflt_hyperparameters.update(hyperparam)
hyperparam_path = hyperparam_to_path(curr_dflt_hyperparameters)
paths = format_container(args.paths, dict(
hyperparam_path=hyperparam_path))
# remove the run_ as it will always be 0 in paths
new_subfolder = "run_".join(
paths["chckpnt_dirnames"][0].split("run_")[:-1])
# remove rtailing slash if exist
new_subfolder = new_subfolder.rstrip("/")
if new_subfolder == subfolder:
continue
else:
os.makedirs(
new_subfolder.rsplit("/", 1)[0], exist_ok=True
) # make sure all folder until last exist
shutil.move(subfolder, new_subfolder)
if __name__ == "__main__":
add_hyperparam()
|
decodable_information_bottleneck-main
|
add_hyperparam.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import string
from copy import deepcopy
import hydra
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from omegaconf import OmegaConf
from sklearn.preprocessing import minmax_scale
from aggregate import PRETTY_RENAMER
from dib.training.helpers import clone_trainer
from main import main as main_training
from utils.evaluate import accuracy, loglike
from utils.helpers import (PartialFormatMap, all_logging_disabled,
flip_nested_dict)
from utils.visualize import plot_2D_decision_boundary
logger = logging.getLogger(__name__)
class ModelsAnalyser:
"""Loader of pretrained models for analsis. Alalyse one set of enoders with their respective clfs.
Parameters
----------
save_dir : str
Where to save all results.
context_kwargs : dict, optional
Context arguments for plotting.
is_interactive : bool, optional
Whether to plot interactively, useful in jupyter notebooks.
prfx : str, optional
Prefix for the filename to save.
pretty_renamer : dict, optional
Dictionary mapping string (keys) to human readable ones for nicer printing and plotting.
dpi : int, optional
Resolution of the figures
"""
def __init__(
self,
save_dir,
context_kwargs={"context": "talk"},
is_interactive=False,
prfx="",
pretty_renamer=PRETTY_RENAMER,
dpi=300,
):
self.save_dir = save_dir
self.is_interactive = is_interactive
os.makedirs(self.save_dir, exist_ok=True)
sns.set_context(**context_kwargs)
self.trainers = {}
self.datasets = {}
self.prfx = prfx
self.n_clfs = 0
self.n_encs = 0
self.pretty_renamer = pretty_renamer
self.dpi = dpi
def recolt_data(
self,
cfg,
clf_patterns,
encoders_param,
encoders_vals,
get_name_clf=lambda p: PRETTY_RENAMER[p.replace("/", "")],
get_name_encoder=lambda p, v: PRETTY_RENAMER[" ".join(
p.split(".")[-2:])]
+ f" {v}",
):
"""Recolts all the data.
Parameters
----------
cfg : omegaconf.DictConfig
Arguments to the `main` function to load the models.
clf_patterns : list of str
List of patterns that will be used to select the clfs. I.e substring to recognize a single
clf. Each pattern should match one and only one clf.
encoders_param : str
Parameter for which to loop over for getting different encoders. I.e. field of `encoders_vals`.
encoders_vals : list
Values of encoders_param for which to iterate over. Effectively giving `param=values[i]`
to generate each encoders.
get_name_clf : callable, optional
Function that takes in the pattern and return the name of the clf. Used to give human
readable names.
get_name_encoder : callable, optional
Function that maps (encoders_param, encoders_vals[i]) -> name_enc. Used to give human
readable names.
"""
self.loaded_trainers = dict()
self.loaded_datasets = dict()
for i, enc_val in enumerate(encoders_vals):
# make sure not changed in place cfg
curr_cfg = deepcopy(cfg)
name_enc = get_name_encoder(encoders_param, enc_val)
self.loaded_trainers[name_enc] = dict()
self.loaded_datasets[name_enc] = dict()
# set the the current encoder
curr_cfg.merge_with_dotlist([f"{encoders_param}={enc_val}"])
with all_logging_disabled():
trainers, datasets = main_training(curr_cfg)
for pattern in clf_patterns:
name_clf = get_name_clf(pattern)
key = [k for k in trainers.keys() if pattern in k]
if len(key) == 0:
raise ValueError(f"No keys have {pattern} as pattern.")
elif len(key) > 1:
raise ValueError(
f"Multiple keys have {pattern} as pattern.")
key = key[0]
self.loaded_trainers[name_enc][name_clf] = trainers[key]
self.loaded_datasets[name_enc][name_clf] = datasets[key]
len_trainers = list(set(len(t) for t in self.loaded_trainers.values()))
len_datasets = list(set(len(d) for d in self.loaded_datasets.values()))
if (
(len(len_trainers) != 1)
or (len(len_datasets) != 1)
or (len_datasets[0] != len_trainers[0])
):
raise ValueError(
f"datasets and trainers wrong length : len_trainers={len_trainers}, len_datasets={len_datasets}"
)
self.n_encs = len(encoders_vals)
self.n_clfs = len_trainers[0]
def plot_reps_clfs(
self,
filename,
get_title_top=lambda clf: clf,
get_title="{normloglike:.0%} Log Like.",
is_invert_yaxis=False,
diagonal_color="tab:green",
is_plot_test=False,
**kwargs,
):
"""Return a list of rep_clf figures for the correct classifiers.
Parameters
----------
filename : str
get_title_top : callable or str, optional
Function that return the title of the top row. (x label if inverted)
get_title : callable or str or "loglike", optional
Function that takes in the pattern and return the title. `{acc`|`{loglike`|`{normloglike`|
`{percloglike` | `{normacc` will be replaced by actual accuracy | loglike | normalized
loglike | normalized accuracy. Normalized is by clf.
is_invert_yaxis : bool, optional
Whether to invert the x and y axis.
diagonal_color : str, optional
Color of the text on the diagonal.
is_plot_test : bool, optional
Whether to plot some test datapoints.
kwargs :
Additional arguments to `plot_2D_decision_boundary`.
"""
if isinstance(get_title_top, str):
get_title_top_str = get_title_top
def get_title_top(clf): return get_title_top_str
if isinstance(get_title, str):
get_title_str = (
get_title if get_title != "loglike" else "{loglike:.2f} Log Like."
)
def get_title(clf): return get_title_str
named_axes = dict()
metrics = dict()
F, axes = plt.subplots(
self.n_encs,
self.n_clfs,
figsize=(4 * self.n_clfs, 4 * self.n_encs),
squeeze=False,
)
# plotting
for i, key_enc in enumerate(self.loaded_trainers.keys()):
if is_invert_yaxis:
i = self.n_encs - i - 1
named_axes[key_enc], metrics[key_enc] = get_figs_rep_clfs(
self.loaded_trainers[key_enc],
self.loaded_datasets[key_enc],
axes=axes[i, :],
is_plot_test=is_plot_test,
**kwargs,
)
# metrics will be of depth 3 with keys (metric, encoder, clf)
metrics = flip_nested_dict(metrics)
# each metric will be dataframe
metrics = {k: pd.DataFrame(v) for k, v in metrics.items()}
# add normalized metrics
for k in [k for k in metrics.keys()]:
metrics[f"norm{k}"] = pd.DataFrame(
minmax_scale(metrics[k], axis=1),
index=metrics[k].index,
columns=metrics[k].columns,
)
# back to dict
for k in [k for k in metrics.keys()]:
metrics[k] = metrics[k].to_dict()
# set all the titles
for i, enc in enumerate(named_axes.keys()):
unmodified_i = i
if is_invert_yaxis:
is_bottom = i == 0
i = self.n_encs - i - 1
def get_prfx(key): return ""
# if reverse put title top at the bottom as an x label
get_xlabel = (
(lambda key: get_title_top(key)
) if is_bottom else (lambda key: "")
)
else:
def get_prfx(key): return (
get_title_top(key) + "\n") if i == 0 else ""
get_xlabel = None
for j, clf in enumerate(named_axes[enc].keys()):
if j == 0:
axes[i, j].set_ylabel(enc)
if get_xlabel is not None:
axes[i, j].set_xlabel(get_xlabel(clf))
title = get_prfx(clf) + get_title(clf)
for metric, vals in metrics.items():
if "{" + metric in title:
title = title.format_map(
PartialFormatMap(**{metric: vals[enc][clf]})
)
title_kwargs = (
dict(color=diagonal_color, fontweight="bold")
if unmodified_i == j and diagonal_color is not None
else {}
)
axes[i, j].set_title(title, **title_kwargs)
if self.is_interactive:
plt.show(axes)
else:
F.savefig(
os.path.join(self.save_dir, f"{self.prfx}{filename}.png"), dpi=self.dpi
)
plt.close(F)
@hydra.main(config_path="conf/config.yaml", strict=True)
def main_cli(args):
return main(args)
def main(args):
main_cfg = deepcopy(args)
del main_cfg["load_models"]
logger.info(f"Loading models for {args.experiment} ...")
analyser = ModelsAnalyser(**args.load_models.kwargs)
logger.info(f"Recolting the data ..")
# Omega conf dictionaries don't have all the properties that usual dictionaries have
analyser.recolt_data(
main_cfg, **OmegaConf.to_container(args.load_models.recolt_data, resolve=True)
)
for f in args.load_models.mode:
logger.info(f"Mode {f} ...")
if f is None:
continue
if f in args.load_models:
kwargs = args.load_models[f]
else:
kwargs = {}
getattr(analyser, f)(**kwargs)
# HELPERS
def plot_rep_clf(trainer, dataset, dataset_test=None, ax=None, **kwargs):
"""
Plot the given representation and the decision boundaries of the classifiers.
"""
# encode the data
transformer = clone_trainer(trainer)
transformer.module_ = transformer.module_.transformer
transformer.module_.is_transform = True
transformer.module_.is_avg_trnsf = True
X, y = get_encoded_X_y(transformer, dataset)
if dataset_test is not None:
test = get_encoded_X_y(transformer, dataset_test)
else:
test = None
# prepare the classifier
clf = clone_trainer(trainer)
clf.module_ = clf.module_.clf
acc = accuracy(clf, X, np.array(y))
log_like = loglike(clf, X, np.array(y))
metrics = dict(acc=acc, loglike=log_like)
ax = plot_2D_decision_boundary(X, y, clf, test=test, ax=ax, **kwargs)
return ax, metrics
def get_encoded_X_y(transformer, dataset):
X = super(type(transformer), transformer).predict_proba(
dataset).astype("float32")
y = [i[1] for i in dataset]
if isinstance(y[0], tuple): # if multitarget
y = [el[0] for el in y]
return X, y
def get_figs_rep_clfs(trainers, datasets, is_plot_test=False, axes=None, **kwargs):
"""Return a list of rep_clf figures for the correct classifiers.
Parameters
----------
trainers : dict of skorch.NeuralNetClassifer with MCTrnsfClassifier module
Trainers that will be used for encoding and classification. The keys of the dictionary
will be selected using clf_patterns.
datasets : dict of datasets
Each element of the dictionary is itself a dictionary of datasets, with a key
"train" for the trainign dataset. The keys should be the same as for trainers.
is_plot_test : bool, optional
Whether to plot some test datapoints.
axes: list matplotlib.axes, optional
List of axis on which to plot.
kwargs :
Additional arguments to `plot_2D_decision_boundary`.
"""
out_axs = dict()
all_metrics = dict()
# collecting all the metrics and data before plotting so that you can use normalize metrics
for i, clf in enumerate(trainers.keys()):
ax = None if axes is None else axes[i]
out_axs[clf], metrics = plot_rep_clf(
trainers[clf],
datasets[clf]["train"],
dataset_test=datasets[clf]["test"] if is_plot_test else None,
ax=ax,
**kwargs,
)
for k, v in metrics.items():
all_metrics[k] = all_metrics.get(k, {})
all_metrics[k][clf] = metrics[k]
return out_axs, all_metrics
if __name__ == "__main__":
main_cli()
|
decodable_information_bottleneck-main
|
load_models.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
import copy
import logging
import math
import os
import subprocess
from functools import partial, partialmethod
from pathlib import Path
import hydra
import omegaconf
import skorch
import torch
import torchvision
from omegaconf import OmegaConf
from skorch.callbacks import Callback, EpochScoring, Freezer, LRScheduler, Unfreezer
from dib.classifiers import MCTrnsfClassifier
from dib.predefined import MLP
from dib.training import NeuralNetClassifier, NeuralNetTransformer
from dib.training.helpers import target_extractor
from dib.training.trainer import _get_params_for_optimizer
from dib.transformers import (
DIBLoss,
DIBLossAltern,
DIBLossAlternHigher,
DIBLossAlternLinear,
DIBLossAlternLinearExact,
DIBLossLinear,
ERMLoss,
IBEncoder,
VIBLoss,
get_img_encoder,
)
from dib.utils.helpers import Identity, set_seed
from utils.data import get_train_dev_test_datasets
from utils.evaluate import (
FILE_CLF_REP,
accuracy,
accuracy_filter_train,
eval_clf,
eval_corr_gen,
eval_trainer_log,
eval_trnsf,
loglike,
)
from utils.helpers import (
CrossEntropyLossGeneralize,
SetLR,
StopAtThreshold,
_scoring,
_single_epoch_skipvalid,
append_sffx,
force_generalization,
format_container,
get_exponential_decay_gamma,
hyperparam_to_path,
invert_dict,
save_pattern,
)
from utils.train import train_load
logger = logging.getLogger(__name__)
FILE_LOGS = "log.csv"
FILE_END = "end.txt"
@hydra.main(config_path="conf/config.yaml")
def main_cli(args):
"""Function only used from CLI => to keep main() usable in jupyter"""
if args.is_nvidia_smi:
subprocess.call(["nvidia-smi"])
print()
return main(args)
def main(args):
"""Main function for training and testing representations."""
trainers_return = dict()
datasets_return = dict()
# ARGS
update_config_(args)
set_seed(args.seed)
# DATASET
datasets = get_datasets(args)
datasets_trnsf = prepare_transformer_datasets(args, datasets)
update_config_datasets_(args, datasets_trnsf)
# TRANSFORMER (i.e. Encoder)
Transformer = get_Transformer(args, datasets_trnsf)
if args.is_precompute_trnsf:
name = "transformer"
trainers_return[name] = fit_evaluate_trainer(
Transformer, args, name, datasets_trnsf, True
)
datasets_return[name] = prepare_return_datasets(datasets_trnsf)
else:
# loading the pretrained transformer
transformer = fit_trainer(
Transformer,
args,
datasets_trnsf,
True,
"transformer",
is_load_criterion=False,
)
datasets = prepare_classification_datasets_(args, datasets)
for Classifier, clf_name in gen_Classifiers_name(args, transformer, datasets):
trainers_return[clf_name] = fit_evaluate_trainer(
Classifier,
args,
clf_name,
datasets,
False,
is_return_init=args.is_correlation_Bob,
)
datasets_return[clf_name] = prepare_return_datasets(datasets)
if args.is_return:
return trainers_return, datasets_return
def update_config_(args):
"""Update the configuration values based on other values."""
# increment the seed at each run
args.seed = args.seed + args.run
# multiply the number of examples by a factor size. Used to have number of examples depending
# on number of labels. Usually factor is 1.
args.datasize.n_examples = args.datasize.factor * args.datasize.n_examples
if args.datasize.n_examples_test == "train":
# use same number of train and test examples
args.datasize.n_examples_test = args.datasize.n_examples
if args.is_precompute_trnsf and args.train.trnsf_kwargs.is_train:
# if training transformer then paths need to agree
assert args.paths["trnsf_dirnames"][0] == args.paths["chckpnt_dirnames"][0]
# monitor training when you randomize the labels because validation does not mean anything
if args.dataset.kwargs.is_random_targets:
args.train.trnsf_kwargs.monitor_best = "train_loss_best"
args.train.clf_kwargs.monitor_best = "train_loss_best"
if not args.train.is_tensorboard:
args.paths["tensorboard_curr_dir"] = None
if args.experiment == "gap":
# dib with Q++
if args.model.name == "vib":
args.model.loss.beta = args.model.loss.beta * 40
elif args.model.name == "cdibL":
args.model.loss.beta = args.model.loss.beta / 100
elif args.model.name == "cdibS":
args.model.loss.beta = args.model.loss.beta * 30
if "dibL" in args.model.name:
# dib with Q++
args.model.Q_zx.hidden_size = args.model.Q_zy.hidden_size * 64
if "dibS" in args.model.name:
# dib with Q--
args.model.Q_zx.hidden_size = args.model.Q_zy.hidden_size // 64
if "dibXS" in args.model.name:
# dib with Q------
args.model.Q_zx.hidden_size = 1
if "dibXL" in args.model.name:
# dib with Q++++++++
args.model.Q_zx.hidden_size = 8192
short_long_monitor = dict(
vloss="valid_loss_best", tloss="train_loss_best", vacc="valid_acc_best"
)
# use short version for name of file
args.train.monitor_best = invert_dict(short_long_monitor).get(
args.train.monitor_best, args.train.monitor_best
)
hyperparam_path = hyperparam_to_path(args.hyperparameters)
args.paths.merge_with(
OmegaConf.create(
format_container(args.paths, dict(hyperparam_path=hyperparam_path))
)
)
# every change that should not modify the name of the file should go below this
# ----------------------------------------------------------------------------
# use long version in code
args.train.monitor_best = short_long_monitor.get(
args.train.monitor_best, args.train.monitor_best
)
args.train.trnsf_kwargs.monitor_best = short_long_monitor.get(
args.train.trnsf_kwargs.monitor_best, args.train.trnsf_kwargs.monitor_best
)
args.train.clf_kwargs.monitor_best = short_long_monitor.get(
args.train.clf_kwargs.monitor_best, args.train.clf_kwargs.monitor_best
)
if not args.is_precompute_trnsf:
logger.info("Not precomputing the transformer so setting train=False.")
args.train.trnsf_kwargs.is_train = False
args.train.kwargs.lr = args.train.lr_clf # ! DEV
else:
if args.model.name == "wdecayBob":
args.train.weight_decay = 1e-4
if args.model.name == "dropoutBob":
args.encoder.architecture.dropout = 0.5
if not args.datasize.is_valid_all_epochs and "train" in args.train.monitor_best:
# don't validate all epochs when validation >>> training and you only look at training
rm_valid_epochs_()
if args.model.is_joint:
args.model.gamma_force_generalization = 1
if "distractor" in args.clfs.name and not args.is_precompute_trnsf:
args.dataset.is_use_distractor = True
if "random" in args.clfs.name and not args.is_precompute_trnsf:
# if you want random dataset for classifier then make sure you are not randomizing for encoder
args.dataset.kwargs.is_random_targets = True
args.train.clf_kwargs.monitor_best = "train_loss_best" # don't monitor val
if isinstance(args.train.kwargs.lr, str) and "|" in args.train.kwargs.lr:
lr, lr_factor_zx = args.train.kwargs.lr.split("|")
args.train.kwargs.lr = float(lr)
args.train.lr_factor_zx = float(lr_factor_zx)
if args.model.name == "vibL":
# keep alice the same but increase bob view of alice
# vib with better approx of I[Z,Y] Q++
args.model.Q_zy.hidden_size = args.model.Q_zy.hidden_size * 16
if args.model.name == "wdecay":
args.train.weight_decay = 1e-4
if "correlation" in args.experiment:
if args.train.optim == "rmsprop":
if args.train.weight_decay == 0.0005:
args.train.weight_decay = 0.0003
elif args.train.optim == "sgd":
args.train.kwargs.lr = args.train.kwargs.lr * 50
if "perminvcdib" in args.model.name:
args.encoder.architecture.hidden_size = [1024]
args.model.architecture.z_dim = 1024
args.model.Q_zy.hidden_size = 256
args.model.Q_zy.n_hidden_layers = 1
def add_none(a, b):
if a is None or b is None:
return None
return a + b
def rm_valid_epochs_():
"""Don't validate every epoch."""
NeuralNetTransformer._single_epoch = _single_epoch_skipvalid
NeuralNetClassifier._single_epoch = _single_epoch_skipvalid
skorch.callbacks.scoring.ScoringBase._scoring = _scoring
def get_datasets(args):
"""return a dictionary of train, test, valid, datasets."""
logger.info("Loading the dataset ...")
datasets = get_train_dev_test_datasets(
args.dataset.name,
args.dataset.type,
valid_size=args.dataset.valid_size,
**OmegaConf.to_container(args.dataset.kwargs, resolve=True),
)
# Subsetting dataset if needed
datasets["train"] = datasets["train"].get_subset(
size=args.datasize.n_examples)
datasets["test"] = datasets["test"].get_subset(
size=args.datasize.n_examples_test)
if args.dataset.is_use_distractor:
for dataset in datasets.values():
dataset._switch_distractor_target() # will only work if dataset has a distractor
if args.dataset.train == "trainvalid":
# for VIB MNIST experiment
datasets["train"].append_(datasets["valid"])
args.dataset.train = "train"
datasets["train"], datasets["valid"], datasets["test"] = (
datasets[args.dataset.train],
datasets[args.dataset.valid],
datasets[args.dataset.test],
)
return datasets
def get_Transformer(args, datasets):
"""Return the correct transformer."""
logger.info("Instantiating the transformer ...")
# Q used for sufficiency
Q_zy = partial(
MLP, **OmegaConf.to_container(args.model.Q_zy, resolve=True))
# Q used for minimality
Q_zx = partial(
MLP, **OmegaConf.to_container(args.model.Q_zx, resolve=True))
kwargs_loss = OmegaConf.to_container(args.model.loss, resolve=True)
kwargs_loss["Q"] = Q_zx
kwargs_trnsf = dict(Q=Q_zy)
Losses = dict(
VIBLoss=VIBLoss, ERMLoss=ERMLoss, DIBLossSklearn=DIBLossAlternLinearExact
)
is_linear = args.model.Q_zx.n_hidden_layers == 0
altern_minimax = args.model.loss.altern_minimax
kwargs = {}
if altern_minimax > 0:
if is_linear:
Losses["DIBLoss"] = DIBLossAlternLinear
else:
Losses["DIBLoss"] = (
DIBLossAlternHigher if args.model.loss.is_higher else DIBLossAltern
)
elif args.model.Loss == "DIBLoss":
# in the case where doing joint training you need to give the parameters of the criterion
# to the main (and only) optimizer
NeuralNetTransformer._get_params_for_optimizer = partialmethod(
_get_params_for_optimizer, is_add_criterion=True
)
Losses["DIBLoss"] = DIBLossLinear if is_linear else DIBLoss
kwargs["optimizer__param_groups"] = [
("Q_zx*", {"lr": args.train.kwargs.lr * args.train.lr_factor_zx})
]
return partial(
NeuralNetTransformer,
module=partial(
partial(IBEncoder, **kwargs_trnsf),
Encoder=partial(
get_img_encoder(args.encoder.name),
**OmegaConf.to_container(args.encoder.architecture, resolve=True),
),
**OmegaConf.to_container(args.model.architecture, resolve=True),
),
optimizer=get_optim(args),
criterion=partial(
Losses[args.model.Loss],
ZYCriterion=partial(
CrossEntropyLossGeneralize,
gamma=args.model.gamma_force_generalization,
map_target_position=datasets["train"].map_target_position,
),
**kwargs_loss,
),
callbacks__print_log__keys_ignored=args.keys_ignored,
**kwargs,
)
def fit_evaluate_trainer(Trainer, args, name, datasets, is_trnsf, **kwargs):
"""Fit and evaluate a single trainer."""
file_after_train = get_file_after_train(args, name)
if not get_is_already_trained(args, file_after_train, is_trnsf):
trainer = fit_trainer(Trainer, args, datasets,
is_trnsf, name, **kwargs)
if args.train.is_evaluate:
evaluate_trainer(trainer, args, datasets, is_trnsf, name)
Path(file_after_train).touch(exist_ok=True)
return trainer
def get_file_after_train(args, name):
"""Return a placeholder file which is used to say whether the transformer has been precomputed."""
chckpnt_paths = get_chckpnt_paths(args, name)
return os.path.join(chckpnt_paths[0], FILE_END)
def get_is_already_trained(args, file_after_train, is_trnsf):
"""Whether the encoder is already precomputed."""
if is_trnsf:
is_skip = args.is_skip_trnsf_if_precomputed
else:
is_skip = args.is_skip_clf_if_precomputed
if not args.is_return and is_skip and os.path.isfile(file_after_train):
logger.info(f"Not training because {file_after_train} exists.")
return True
# making sure the placeholder doesn't exist if you will retrain the model
with contextlib.suppress(FileNotFoundError):
os.remove(file_after_train)
return False
def prepare_transformer_datasets(args, datasets):
"""Return a transformer dataset (not inplace)."""
# make sure don't change the dataset for eval and clf
datasets = copy.deepcopy(datasets)
# store the old training for evaluation
datasets["train_unmodified"] = datasets["train"]
gamma = args.model.gamma_force_generalization
if gamma != 0:
datasets["train"] = force_generalization(datasets)
if not args.model.is_joint:
if gamma == "zero":
# trick to add test data even without using gamma
gamma = 0
# gamma is rescaled to depend on size of train and test (i.e. be relative) but not if have access
# to joint (in which case you should really have access to all train and test not rescaled)
gamma *= len(datasets["train"]) / len(datasets["test"])
args.model.gamma_force_generalization = gamma
return datasets
def update_config_datasets_(args, datasets):
"""Update the configuration values based on the datasets."""
args.datasize.n_examples = len(datasets["train"]) # store as an integer
steps_per_epoch = len(datasets["train"]) // args.datasize.batch_size
args.model.loss.warm_Q_zx = steps_per_epoch * args.model.loss.warm_Q_zx
count_targets = datasets["train"].count_targets()
with omegaconf.open_dict(args):
args.model.loss.n_per_target = {
str(k): int(v) for k, v in count_targets.items()
}
def fit_trainer(
Trainer, args, datasets, is_trnsf, name, is_load_criterion=True, **kwargs
):
"""Fits the given trainer on the datasets."""
logger.info(f"Fitting {name} ...")
specific_kwargs = args.train["trnsf_kwargs" if is_trnsf else "clf_kwargs"]
chckpnt_paths = get_chckpnt_paths(args, name)
trainer = train_load(
Trainer,
datasets,
# always save the transformer at the precomputed path
chckpnt_dirnames=get_chckpnt_paths(args, name),
tensorboard_dir=get_tensorboard_paths(args, name),
is_load_criterion=is_load_criterion,
callbacks=get_callbacks(args, datasets, is_trnsf=is_trnsf),
**OmegaConf.to_container(args.train.kwargs, resolve=True),
**OmegaConf.to_container(specific_kwargs, resolve=True),
**kwargs,
)
if specific_kwargs.is_train:
log_training(trainer, args.csv_score_pattern, chckpnt_paths)
return trainer
def get_tensorboard_paths(args, name):
"""Return the paths for tensorboard"""
return add_none(args.paths["tensorboard_curr_dir"], name)
def get_chckpnt_paths(args, name):
"""Return the paths for the classifiers checkpoint"""
return append_sffx(args.paths["chckpnt_dirnames"], name)
def get_callbacks(args, datasets, is_trnsf):
"""Return the correct callbacks for training."""
if is_trnsf:
callbacks = [
(
"valid_acc",
EpochScoring(
accuracy, # cannot use "accuracy" because using a transformer rather than classifier
name="valid_acc",
lower_is_better=False,
target_extractor=target_extractor,
),
),
(
"valid_loglike",
EpochScoring(
loglike, # the actual loss also contains all regularization
name="valid_loglike",
lower_is_better=False,
target_extractor=target_extractor,
),
),
]
else:
callbacks = []
callbacks += [
(
"train_acc",
EpochScoring(
partial(
accuracy_filter_train,
map_target_position=datasets["train"].map_target_position,
),
name="train_acc",
on_train=True,
lower_is_better=False,
target_extractor=partial(
target_extractor, is_multi_target=True),
),
)
]
callbacks += get_lr_schedulers(args, datasets, is_trnsf=is_trnsf)
# callbacks += [skorch.callbacks.GradientNormClipping(gradient_clip_value=0.1)]
if args.train.freezer.patterns is not None:
callbacks += [
Freezer(
args.train.freezer.patterns,
at=args.train.freezer.at
if args.train.freezer.at is not None
else return_True,
)
]
if args.train.unfreezer.patterns is not None:
callbacks += [
Unfreezer(args.train.unfreezer.patterns,
at=args.train.unfreezer.at)
]
if args.train.ce_threshold is not None:
callbacks += [StopAtThreshold(threshold=args.train.ce_threshold)]
return callbacks
def return_True(*args):
return True
def get_optim(args):
if args.train.optim == "sgd":
return partial(
torch.optim.SGD, momentum=0.9, weight_decay=args.train.weight_decay
)
elif args.train.optim == "adam":
return partial(torch.optim.Adam, weight_decay=args.train.weight_decay)
elif args.train.optim == "adam":
return partial(torch.optim.Adam, weight_decay=args.train.weight_decay)
elif args.train.optim == "rmsprop":
return partial(torch.optim.RMSprop, weight_decay=args.train.weight_decay)
elif args.train.optim == "adagrad":
return partial(torch.optim.Adagrad, weight_decay=args.train.weight_decay)
elif args.train.optim == "adamw":
return partial(
torch.optim.AdamW, weight_decay=args.train.weight_decay, amsgrad=True
)
elif args.train.optim == "LBFGS":
NeuralNetTransformer.train_step = train_step_set_optim
return partial(
torch.optim.LBFGS,
line_search_fn="strong_wolfe",
history_size=10,
max_iter=7,
)
else:
raise ValueError(f"Unkown optim={args.train.optim}")
def get_lr_schedulers(args, datasets, is_trnsf):
if args.train.scheduling_mode == "decay":
gamma = get_exponential_decay_gamma(
args.train.scheduling_factor, args.train.trnsf_kwargs.max_epochs
)
lr_scheduler = [
LRScheduler(torch.optim.lr_scheduler.ExponentialLR, gamma=gamma)
]
elif args.train.scheduling_mode == "plateau":
lr_scheduler = [
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=0.2,
)
]
elif args.train.scheduling_mode == "biplateau":
lr_scheduler = [
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=0.2, # 0.1
patience=3, # 0.5
verbose=True,
threshold=0.01,
min_lr=1e-5,
),
# increase lr but max at 0.5
LRScheduler(SetLR, lr_lambda=lambda _, lr, __: min(lr * 1.3, 0.5)),
# dirty way for not increasing lr in case loss didn't improve
LRScheduler(
torch.optim.lr_scheduler.ReduceLROnPlateau,
monitor="valid_loss",
factor=1 / 1.3,
patience=1,
),
]
elif args.train.scheduling_mode is None:
lr_scheduler = []
else:
raise ValueError(
f"Unkown scheduling_mode={args.train.scheduling_mode}")
return lr_scheduler
def log_training(trainer, csv_score_pattern, chckpnt_dirnames):
"""Log training history (loss and accuracy) in a more readable format ."""
save_pattern(chckpnt_dirnames, csv_score_pattern, FILE_LOGS)
for h in trainer.history:
if not math.isnan(h["valid_acc"]):
for metric in ["acc", "loss"]:
for mode in ["train", "valid"]:
try:
save_pattern(
chckpnt_dirnames,
csv_score_pattern,
FILE_LOGS,
formatting=dict(
epoch=h["epoch"],
metric=metric,
mode=mode,
score=h[f"{mode}_{metric}"],
),
)
except KeyError as e:
logger.debug(
f"Skipping a loop because couldn't find key {e}")
def evaluate_trainer(trainer, args, datasets, is_trnsf, name, **kwargs):
"""Evaluate trainers on the train and test dataset."""
logger.info(f"Evaluating {name} ...")
if is_trnsf:
evaluator = eval_trnsf
else:
evaluator = eval_corr_gen if args.is_correlation else eval_clf
chckpnt_paths = get_chckpnt_paths(args, name)
tensorboard_dir = get_tensorboard_paths(args, name)
trainers = {"best": trainer}
is_append = False
for epoch, trainer in trainers.items():
# Test Evaluation
eval_trainer_log(
trainer,
datasets["test"],
args.csv_score_pattern,
chckpnt_paths,
dict(args.hyperparameters),
evaluator=evaluator,
tensorboard_dir=tensorboard_dir,
epoch=epoch,
is_append=is_append,
**kwargs,
)
# only create the field for the first time you log
is_append = True
# evaluation should be made on the training without any addition (e.g. anti generalization)
train_data = datasets.get("train_unmodified", datasets["train"])
# Train Evaluation
eval_trainer_log(
trainer,
train_data,
args.csv_score_pattern,
chckpnt_paths,
dict(args.hyperparameters),
evaluator=evaluator,
tensorboard_dir=None,
is_append=is_append,
mode="train",
file_clf_rep="train_" + FILE_CLF_REP,
epoch=epoch,
**kwargs,
)
def prepare_classification_datasets_(args, datasets):
"""Modify inplace the datasets before classification."""
# store the old training for evaluation
datasets["train_unmodified"] = datasets["train"]
gamma = args.clfs.gamma_force_generalization
if args.clfs.gamma_force_generalization != 0:
datasets["train"] = force_generalization(datasets)
args.clfs.gamma_force_generalization = gamma
return datasets
def gen_Classifiers_name(args, transformer, datasets):
"""Generator of uninstantiated classifiers."""
gamma = args.clfs.gamma_force_generalization
data_weight = len(datasets["train"]) / len(datasets["test"])
for n_hid in OmegaConf.to_container(args.clfs.nhiddens, resolve=True):
for n_lay in OmegaConf.to_container(args.clfs.nlayers, resolve=True):
for k_pru in OmegaConf.to_container(args.clfs.kprune, resolve=True):
clf_name = (
f"clf_nhid_{n_hid}/clf_nlay_{n_lay}/clf_kpru_{k_pru}/gamma_{gamma}/"
)
Classifier = partial(
MLP, hidden_size=n_hid, n_hidden_layers=n_lay, k_prune=k_pru
)
kwargs = {}
if not args.clfs.is_reinitialize:
kwargs["previous_mlp"] = transformer.module_.Q_zy
Classifier = partial(
NeuralNetClassifier,
module=partial(
MCTrnsfClassifier,
transformer=transformer.module_,
Classifier=Classifier,
**OmegaConf.to_container(args.clfs.kwargs, resolve=True),
**kwargs,
),
# don't use any regularization if you only care about training (e.g. Rademacher)
optimizer=get_optim(args),
criterion=partial(
CrossEntropyLossGeneralize,
gamma=gamma * data_weight,
map_target_position=datasets["train"].map_target_position,
),
)
yield Classifier, clf_name
def prepare_return_datasets(datasets):
"""Prepares the datasets to return them"""
datasets = copy.deepcopy(datasets)
# removing modifications to trainign set such as adding the test set for anti generalizatiom
if "train_unmodified" in datasets:
datasets["train_modified"] = datasets["train"]
datasets["train"] = datasets["train_unmodified"]
return datasets
if __name__ == "__main__":
main_cli()
|
decodable_information_bottleneck-main
|
main.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
# should be in a hydra file
UNLABELLED_CLASS = -1
|
decodable_information_bottleneck-main
|
dib/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .trainer import *
|
decodable_information_bottleneck-main
|
dib/training/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import warnings
from contextlib import suppress
import numpy as np
import skorch
import torch
import torch.nn as nn
from scipy.special import softmax
from sklearn.base import ClassifierMixin, TransformerMixin
from skorch import NeuralNet
from skorch.callbacks import EpochScoring, ProgressBar
from skorch.dataset import Dataset, get_len, unpack_data, uses_placeholder_y
from skorch.helper import predefined_split
from skorch.history import History
from skorch.utils import TeeGenerator, get_map_location, to_numpy, to_tensor
from .helpers import FixRandomSeed, target_extractor
logger = logging.getLogger(__name__)
__all__ = ["NeuralNetTransformer", "NeuralNetClassifier"]
net_get_params_for_optimizer = NeuralNet._get_params_for_optimizer
def _get_params_for_optimizer(self, prefix, named_parameters, is_add_criterion=False):
"""Difference with default is that also adds the parameters of the criterion."""
if is_add_criterion:
named_parameters = list(named_parameters) + list(
self.criterion_.named_parameters()
)
return net_get_params_for_optimizer(self, prefix, named_parameters)
def save_params(self, f_criterion=None, **kwargs):
"""Difference with default is that also saves the criterion."""
NeuralNet.save_params(self, **kwargs)
if f_criterion is not None:
msg = (
"Cannot save parameters of an un-initialized criterion. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...)."
)
self.check_is_fitted(attributes=["criterion_"], msg=msg)
torch.save(self.criterion_.state_dict(), f_criterion)
def load_params(self, f_criterion=None, checkpoint=None, **kwargs):
NeuralNet.load_params(self, checkpoint=checkpoint, **kwargs)
####################
# all this copy pasted
def _get_state_dict(f):
map_location = get_map_location(self.device)
self.device = self._check_device(self.device, map_location)
return torch.load(f, map_location=map_location)
if checkpoint is not None:
if not self.initialized_:
self.initialize()
formatted_files = checkpoint.get_formatted_files(self)
####################
f_criterion = f_criterion or formatted_files["f_criterion"]
if f_criterion is not None:
msg = (
"Cannot load state of an un-initialized criterion. "
"Please initialize first by calling .initialize() "
"or by fitting the model with .fit(...)."
)
self.check_is_fitted(attributes=["criterion_"], msg=msg)
state_dict = _get_state_dict(f_criterion)
self.criterion_.load_state_dict(state_dict)
def get_loss(self, y_pred, y_true, X=None, training=False):
"""Return the loss for this batch."""
y_true = to_tensor(y_true, device=self.device)
if isinstance(self.criterion_, nn.Module):
self.criterion_.train(training)
return self.criterion_(y_pred, y_true)
def fit_loop(self, X, y=None, epochs=None, **fit_params):
self.check_data(X, y)
epochs = epochs if epochs is not None else self.max_epochs
dataset_train, dataset_valid = self.get_split_datasets(X, y, **fit_params)
on_epoch_kwargs = {"dataset_train": dataset_train,
"dataset_valid": dataset_valid}
start = 0
if self.is_train_delta_epoch:
# in case you load the model you want to only train the epoch difference
start = len(self.history)
# make sure that still run 1 epoch for model saving / notify
start = min(start, epochs - 1)
logger.info(f"Model was loaded,training only {start}-{epochs}")
for epoch in range(start, epochs):
self.notify("on_epoch_begin", **on_epoch_kwargs)
self._single_epoch(dataset_train, training=True,
epoch=epoch, **fit_params)
if dataset_valid is not None:
self._single_epoch(dataset_valid, training=False,
epoch=epoch, **fit_params)
self.notify("on_epoch_end", **on_epoch_kwargs)
return self
def _single_epoch(self, dataset, training, epoch, **fit_params):
"""Computes a single epoch of train or validation."""
is_placeholder_y = uses_placeholder_y(dataset)
if training:
prfx = "train"
step_fn = self.train_step
else:
prfx = "valid"
step_fn = self.validation_step
batch_count = 0
for data in self.get_iterator(dataset, training=training):
Xi, yi = unpack_data(data)
yi_res = yi if not is_placeholder_y else None
self.notify("on_batch_begin", X=Xi, y=yi_res, training=training)
step = step_fn(Xi, yi, **fit_params)
self.history.record_batch(prfx + "_loss", step["loss"].item())
self.history.record_batch(prfx + "_batch_size", get_len(Xi))
self.notify("on_batch_end", X=Xi, y=yi_res, training=training, **step)
batch_count += 1
self.history.record(prfx + "_batch_count", batch_count)
if hasattr(self.criterion_, "to_store"):
for k, v in self.criterion_.to_store.items():
with suppress(NotImplementedError):
# pytorch raises NotImplementedError on wrong types
self.history.record(prfx + "_" + k, (v[0] / v[1]).item())
self.criterion_.to_store = dict()
doc_neural_net_clf = (
"""Wrapper around skorch.NeuralNetClassifier. Differences:
Parameters
----------
Notes
-----
- use by default crossentropy loss instead of NNLoss
- enables storing of additional losses.
Base documentation:
"""
+ skorch.NeuralNetClassifier.__doc__
)
class NeuralNetClassifier(skorch.NeuralNetClassifier):
__doc__ = doc_neural_net_clf
def __init__(
self,
*args,
criterion=torch.nn.CrossEntropyLoss,
is_train_delta_epoch=True,
**kwargs,
):
super().__init__(*args, criterion=criterion, **kwargs)
self.is_train_delta_epoch = is_train_delta_epoch
@property
def _default_callbacks(self):
_default_callbacks = dict(super()._default_callbacks)
_default_callbacks["valid_acc"] = EpochScoring(
"accuracy",
name="valid_acc",
lower_is_better=False,
target_extractor=target_extractor,
)
return [(k, v) for k, v in _default_callbacks.items()]
def predict_proba(self, X):
"""Return probability estimates for samples.
Notes
-----
- output of model should be logits (softmax applied in this function)
- If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using :func:`~skorch.NeuralNet.forward`
instead.
Returns
-------
y_proba : numpy ndarray
"""
# output of model should be logits!
logits = super().predict_proba(X)
return softmax(logits, axis=1)
fit_loop = fit_loop
_single_epoch = _single_epoch
get_loss = get_loss
_get_params_for_optimizer = _get_params_for_optimizer
save_params = save_params
load_params = load_params
doc_neural_net_trnsf = (
"""Wrapper around skorch.NeuralNet for transforming data. Differences:
Methods
-------
freeze:
Freezes the model such that it cannot be fitted again.
transform:
Returns a numpy array containing all the first outputs, similarly to `.predict`. The main
difference is that it sets `module_.is_transform` to `True`. The correct behavior should thus
be implemented in the module using `is_transform` flag.
Notes
-----
- enables storing of additional losses.
Base documentation:
"""
+ skorch.NeuralNet.__doc__
)
class NeuralNetTransformer(skorch.NeuralNet, TransformerMixin):
__doc__ = doc_neural_net_trnsf
def __init__(self, *args, is_train_delta_epoch=True, **kwargs):
super().__init__(*args, **kwargs)
self.is_train_delta_epoch = is_train_delta_epoch
def freeze(self, is_freeze=True):
"""Freezes (or unfreeze) the model such that it cannot be fitted again."""
self._is_frozen = is_freeze
return self
def fit(self, X, y=None, **fit_params):
if hasattr(self, "_is_frozen") and self._is_frozen:
if self.verbose > 0:
warnings.warn("Skipping fitting because froze etimator.")
return self
return super().fit(X, y=y, **fit_params)
def transform(self, X):
"""Transform an input."""
self.module_.is_transform = True
self.module_.training = False
X_transf = super().predict_proba(X) # does not actually predict proba
self.module_.is_transform = False
self.module_.training = True
return X_transf
def predict_proba(self, X):
"""Return probability estimates for samples.
Notes
-----
- output of model should be logits (softmax applied in this function)
- If the module's forward method returns multiple outputs as a
tuple, it is assumed that the first output contains the
relevant information and the other values are ignored. If all
values are relevant, consider using :func:`~skorch.NeuralNet.forward`
instead.
Returns
-------
y_proba : numpy ndarray
"""
# output of model should be logits!
logits = super().predict_proba(X)
return softmax(logits, axis=1)
fit_loop = fit_loop
_single_epoch = _single_epoch
get_loss = get_loss
_get_params_for_optimizer = _get_params_for_optimizer
save_params = save_params
load_params = load_params
|
decodable_information_bottleneck-main
|
dib/training/trainer.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import random
import warnings
import numpy as np
import skorch
import torch
from skorch.callbacks import Callback
from dib.utils.helpers import cont_tuple_to_tuple_cont, ratio_to_int, set_seed, to_numpy
def target_extractor(targets, is_multi_target=False):
"""
Helper function that extracts the targets for scoring. There can be multiple targets
for the case where you appended indices or distractors.
"""
if isinstance(targets, (list, tuple)):
if is_multi_target:
targets = torch.stack(targets, axis=1)
else:
targets = targets[0]
return to_numpy(targets)
def clone_trainer(trainer, is_reinit_besides_param=False):
"""Clone a trainer with optional possibility of reinitializing everything besides
parameters (e.g. optimizers.)"""
with warnings.catch_warnings():
warnings.simplefilter("ignore")
trainer_new = copy.deepcopy(trainer)
if is_reinit_besides_param:
trainer_new.initialize_callbacks()
trainer_new.initialize_criterion()
trainer_new.initialize_optimizer()
trainer_new.initialize_history()
return trainer_new
class FixRandomSeed(Callback):
"""
Callback to have a deterministic behavior.
Credits: https://github.com/skorch-dev/skorch/issues/280
"""
def __init__(self, seed=123, is_cudnn_deterministic=False, verbose=0):
self.seed = seed
self.is_cudnn_deterministic = is_cudnn_deterministic
self.verbose = verbose
def initialize(self):
if self.seed is not None:
if self.verbose > 0:
print("setting random seed to: ", self.seed, flush=True)
set_seed(self.seed)
torch.backends.cudnn.deterministic = self.is_cudnn_deterministic
class Checkpoint(skorch.callbacks.Checkpoint):
"""
Difference with default is that save criterion.
"""
def __init__(self, *args, f_criterion="criterion.pt", **kwargs):
super().__init__(*args, **kwargs)
self.f_criterion = f_criterion
def save_model(self, net):
super().save_model(net)
if self.f_criterion is not None:
f = self._format_target(net, self.f_criterion, -1)
self._save_params(f, net, "f_criterion", "criterion parameters")
def get_formatted_files(self, net):
idx = -1
if self.event_name is not None and net.history:
for i, v in enumerate(net.history[:, self.event_name]):
if v:
idx = i
return {
"f_params": self._format_target(net, self.f_params, idx),
"f_optimizer": self._format_target(net, self.f_optimizer, idx),
"f_history": self.f_history_,
# ONLY DIFF
"f_pickle": self._format_target(net, self.f_pickle, idx),
"f_criterion": self._format_target(net, self.f_criterion, idx),
}
|
decodable_information_bottleneck-main
|
dib/training/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
Pruning methods modified from:
https://pytorch.org/docs/master/_modules/torch/nn/utils/prune.html
"""
import numbers
from abc import abstractmethod
import torch
# For Python 2 and 3 support
try:
from abc import ABC
from collections.abc import Iterable
except ImportError:
from abc import ABCMeta
ABC = ABCMeta("ABC", (), {})
from collections import Iterable
class BasePruningMethod(ABC):
r"""Abstract base class for creation of new pruning techniques.
Provides a skeleton for customization requiring the overriding of methods
such as :meth:`compute_mask` and :meth:`apply`.
"""
def __init__(self):
pass
def __call__(self, module, inputs):
r"""Multiplies the mask (stored in ``module[name + '_mask']``)
into the original tensor (stored in ``module[name + '_orig']``)
and stores the result into ``module[name]`` by using
:meth:`apply_mask`.
Args:
module (nn.Module): module containing the tensor to prune
inputs: not used.
"""
setattr(module, self._tensor_name, self.apply_mask(module))
@abstractmethod
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a random mask to
apply on top of the ``default_mask`` according to the specific pruning
method recipe.
Args:
t (torch.Tensor): tensor representing the parameter to prune
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
"""
pass
def apply_mask(self, module):
r"""Simply handles the multiplication between the parameter being
pruned and the generated mask.
Fetches the mask and the original tensor from the module
and returns the pruned version of the tensor.
Args:
module (nn.Module): module containing the tensor to prune
Returns:
pruned_tensor (torch.Tensor): pruned version of the input tensor
"""
# to carry out the multiplication, the mask needs to have been computed,
# so the pruning method must know what tensor it's operating on
assert self._tensor_name is not None, "Module {} has to be pruned".format(
module
) # this gets set in apply()
mask = getattr(module, self._tensor_name + "_mask")
orig = getattr(module, self._tensor_name + "_orig")
pruned_tensor = mask.to(dtype=orig.dtype) * orig
return pruned_tensor
@classmethod
def apply(cls, module, name, *args, **kwargs):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
args: arguments passed on to a subclass of
:class:`BasePruningMethod`
kwargs: keyword arguments passed on to a subclass of a
:class:`BasePruningMethod`
"""
def _get_composite_method(cls, module, name, *args, **kwargs):
# Check if a pruning method has already been applied to
# `module[name]`. If so, store that in `old_method`.
old_method = None
found = 0
# there should technically be only 1 hook with hook.name == name
# assert this using `found`
hooks_to_remove = []
for k, hook in module._forward_pre_hooks.items():
# if it exists, take existing thing, remove hook, then
# go thru normal thing
if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
old_method = hook
# # reset the tensor reparametrization
# module = remove_pruning(module, name)
# del module._forward_pre_hooks[k]
hooks_to_remove.append(k)
found += 1
assert (
found <= 1
), "Avoid adding multiple pruning hooks to the\
same tensor {} of module {}. Use a PruningContainer.".format(
name, module
)
for k in hooks_to_remove:
del module._forward_pre_hooks[k]
# Apply the new pruning method, either from scratch or on top of
# the previous one.
method = cls(*args, **kwargs) # new pruning
# Have the pruning method remember what tensor it's been applied to
method._tensor_name = name
# combine `methods` with `old_method`, if `old_method` exists
if old_method is not None: # meaning that there was a hook
# if the hook is already a pruning container, just add the
# new pruning method to the container
if isinstance(old_method, PruningContainer):
old_method.add_pruning_method(method)
method = old_method # rename old_method --> method
# if the hook is simply a single pruning method, create a
# container, add the old pruning method and the new one
elif isinstance(old_method, BasePruningMethod):
container = PruningContainer(old_method)
# Have the pruning method remember the name of its tensor
# setattr(container, '_tensor_name', name)
container.add_pruning_method(method)
method = container # rename container --> method
return method
method = _get_composite_method(cls, module, name, *args, **kwargs)
# at this point we have no forward_pre_hooks but we could have an
# active reparametrization of the tensor if another pruning method
# had been applied (in which case `method` would be a PruningContainer
# and not a simple pruning method).
# Pruning is to be applied to the module's tensor named `name`,
# starting from the state it is found in prior to this iteration of
# pruning
orig = getattr(module, name)
# If this is the first time pruning is applied, take care of moving
# the original tensor to a new parameter called name + '_orig' and
# and deleting the original parameter
if not isinstance(method, PruningContainer):
# copy `module[name]` to `module[name + '_orig']`
module.register_parameter(name + "_orig", orig)
# temporarily delete `module[name]`
del module._parameters[name]
default_mask = torch.ones_like(orig) # temp
# If this is not the first time pruning is applied, all of the above
# has been done before in a previos pruning iteration, so we're good
# to go
else:
default_mask = getattr(module, name + "_mask").detach().clone()
# Use try/except because if anything goes wrong with the mask
# computation etc., you'd want to roll back.
try:
# get the final mask, computed according to the specific method
mask = method.compute_mask(orig, default_mask=default_mask)
# reparametrize by saving mask to `module[name + '_mask']`...
module.register_buffer(name + "_mask", mask)
# ... and the new pruned tensor to `module[name]`
setattr(module, name, method.apply_mask(module))
# associate the pruning method to the module via a hook to
# compute the function before every forward() (compile by run)
module.register_forward_pre_hook(method)
except Exception as e:
if not isinstance(method, PruningContainer):
orig = getattr(module, name + "_orig")
module.register_parameter(name, orig)
del module._parameters[name + "_orig"]
raise e
return method
def prune(self, t, default_mask=None):
r"""Computes and returns a pruned version of input tensor ``t``
according to the pruning rule specified in :meth:`compute_mask`.
Args:
t (torch.Tensor): tensor to prune (of same dimensions as
``default_mask``).
default_mask (torch.Tensor, optional): mask from previous pruning
iteration, if any. To be considered when determining what
portion of the tensor that pruning should act on. If None,
default to a mask of ones.
Returns:
pruned version of tensor ``t``.
"""
if default_mask is None:
default_mask = torch.ones_like(t)
return t * self.compute_mask(t, default_mask=default_mask)
def remove(self, module):
r"""Removes the pruning reparameterization from a module. The pruned
parameter named ``name`` remains permanently pruned, and the parameter
named ``name+'_orig'`` is removed from the parameter list. Similarly,
the buffer named ``name+'_mask'`` is removed from the buffers.
Note:
Pruning itself is NOT undone or reversed!
"""
# before removing pruning from a tensor, it has to have been applied
assert (
self._tensor_name is not None
), "Module {} has to be pruned\
before pruning can be removed".format(
module
) # this gets set in apply()
# to update module[name] to latest trained weights
weight = self.apply_mask(module) # masked weights
# delete and reset
delattr(module, self._tensor_name)
orig = module._parameters[self._tensor_name + "_orig"]
orig.data = weight.data
del module._parameters[self._tensor_name + "_orig"]
del module._buffers[self._tensor_name + "_mask"]
module.register_parameter(self._tensor_name, orig)
class PruningContainer(BasePruningMethod):
"""Container holding a sequence of pruning methods for iterative pruning.
Keeps track of the order in which pruning methods are applied and handles
combining successive pruning calls.
Accepts as argument an instance of a BasePruningMethod or an iterable of
them.
"""
def __init__(self, *args):
self._pruning_methods = tuple()
if not isinstance(args, Iterable): # only 1 item
self._tensor_name = args._tensor_name
self.add_pruning_method(args)
elif len(args) == 1: # only 1 item in a tuple
self._tensor_name = args[0]._tensor_name
self.add_pruning_method(args[0])
else: # manual construction from list or other iterable (or no args)
for method in args:
self.add_pruning_method(method)
def add_pruning_method(self, method):
r"""Adds a child pruning ``method`` to the container.
Args:
method (subclass of BasePruningMethod): child pruning method
to be added to the container.
"""
# check that we're adding a pruning method to the container
if not isinstance(method, BasePruningMethod) and method is not None:
raise TypeError(
"{} is not a BasePruningMethod subclass".format(type(method))
)
elif self._tensor_name != method._tensor_name:
raise ValueError(
"Can only add pruning methods acting on "
"the parameter named '{}' to PruningContainer {}.".format(
self._tensor_name, self
)
+ " Found '{}'".format(method._tensor_name)
)
# if all checks passed, add to _pruning_methods tuple
self._pruning_methods += (method,)
def __len__(self):
return len(self._pruning_methods)
def __iter__(self):
return iter(self._pruning_methods)
def __getitem__(self, idx):
return self._pruning_methods[idx]
def compute_mask(self, t, default_mask):
r"""Applies the latest ``method`` by computing the new partial masks
and returning its combination with the ``default_mask``.
The new partial mask should be computed on the entries or channels
that were not zeroed out by the ``default_mask``.
Which portions of the tensor ``t`` the new mask will be calculated from
depends on the ``PRUNING_TYPE`` (handled by the type handler):
* for 'unstructured', the mask will be computed from the raveled
list of nonmasked entries;
* for 'structured', the mask will be computed from the nonmasked
channels in the tensor;
* for 'global', the mask will be computed across all entries.
Args:
t (torch.Tensor): tensor representing the parameter to prune
(of same dimensions as ``default_mask``).
default_mask (torch.Tensor): mask from previous pruning iteration.
Returns:
mask (torch.Tensor): new mask that combines the effects
of the ``default_mask`` and the new mask from the current
pruning ``method`` (of same dimensions as ``default_mask`` and
``t``).
"""
def _combine_masks(method, t, mask):
r"""
Args:
method (a BasePruningMethod subclass): pruning method
currently being applied.
t (torch.Tensor): tensor representing the parameter to prune
(of same dimensions as mask).
mask (torch.Tensor): mask from previous pruning iteration
Returns:
new_mask (torch.Tensor): new mask that combines the effects
of the old mask and the new mask from the current
pruning method (of same dimensions as mask and t).
"""
new_mask = mask # start off from existing mask
new_mask = new_mask.to(dtype=t.dtype)
# compute a slice of t onto which the new pruning method will operate
if method.PRUNING_TYPE == "unstructured":
# prune entries of t where the mask is 1
slc = mask == 1
# for struct pruning, exclude channels that have already been
# entirely pruned
elif method.PRUNING_TYPE == "structured":
if not hasattr(method, "dim"):
raise AttributeError(
"Pruning methods of PRUNING_TYPE "
'"structured" need to have the attribute `dim` defined.'
)
# find the channels to keep by removing the ones that have been
# zeroed out already (i.e. where sum(entries) == 0)
n_dims = t.dim() # "is this a 2D tensor? 3D? ..."
dim = method.dim
# convert negative indexing
if dim < 0:
dim = n_dims + dim
# if dim is still negative after subtracting it from n_dims
if dim < 0:
raise IndexError(
"Index is out of bounds for tensor with dimensions {}".format(
n_dims
)
)
# find channels along dim = dim that aren't already tots 0ed out
keep_channel = mask.sum(
dim=[d for d in range(n_dims) if d != dim]) != 0
# create slice to identify what to prune
slc = [slice(None)] * n_dims
slc[dim] = keep_channel
elif method.PRUNING_TYPE == "global":
n_dims = len(t.shape) # "is this a 2D tensor? 3D? ..."
slc = [slice(None)] * n_dims
else:
raise ValueError(
"Unrecognized PRUNING_TYPE {}".format(method.PRUNING_TYPE)
)
# compute the new mask on the unpruned slice of the tensor t
partial_mask = method.compute_mask(t[slc], default_mask=mask[slc])
new_mask[slc] = partial_mask.to(dtype=new_mask.dtype)
return new_mask
method = self._pruning_methods[-1]
mask = _combine_masks(method, t, default_mask)
return mask
class Identity(BasePruningMethod):
r"""Utility pruning method that does not prune any units but generates the
pruning parametrization with a mask of ones.
"""
PRUNING_TYPE = "unstructured"
def compute_mask(self, t, default_mask):
mask = default_mask
return mask
@classmethod
def apply(cls, module, name):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
"""
return super(Identity, cls).apply(module, name)
class RandomUnstructured(BasePruningMethod):
r"""Prune (currently unpruned) units in a tensor at random.
Args:
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
PRUNING_TYPE = "unstructured"
def __init__(self, amount):
# Check range of validity of pruning amount
_validate_pruning_amount_init(amount)
self.amount = amount
def compute_mask(self, t, default_mask):
# Check that the amount of units to prune is not > than the number of
# parameters in t
tensor_size = t.nelement()
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
mask = default_mask.clone()
if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
prob = torch.rand_like(t)
topk = torch.topk(prob.view(-1), k=nparams_toprune)
mask.view(-1)[topk.indices] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
return super(RandomUnstructured, cls).apply(module, name, amount=amount)
class L1Unstructured(BasePruningMethod):
r"""Prune (currently unpruned) units in a tensor by zeroing out the ones
with the lowest L1-norm.
Args:
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
PRUNING_TYPE = "unstructured"
def __init__(self, amount):
# Check range of validity of pruning amount
_validate_pruning_amount_init(amount)
self.amount = amount
def compute_mask(self, t, default_mask):
# Check that the amount of units to prune is not > than the number of
# parameters in t
tensor_size = t.nelement()
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
mask = default_mask.clone()
if nparams_toprune != 0: # k=0 not supported by torch.kthvalue
# largest=True --> top k; largest=False --> bottom k
# Prune the smallest k
topk = torch.topk(torch.abs(t).view(-1),
k=nparams_toprune, largest=False)
# topk will have .indices and .values
mask.view(-1)[topk.indices] = 0
return mask
@classmethod
def apply(cls, module, name, amount):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
"""
return super(L1Unstructured, cls).apply(module, name, amount=amount)
class RandomStructured(BasePruningMethod):
r"""Prune entire (currently unpruned) channels in a tensor at random.
Args:
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
PRUNING_TYPE = "structured"
def __init__(self, amount, dim=-1):
# Check range of validity of amount
_validate_pruning_amount_init(amount)
self.amount = amount
self.dim = dim
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a random mask to
apply on top of the ``default_mask`` by randomly zeroing out channels
along the specified dim of the tensor.
Args:
t (torch.Tensor): tensor representing the parameter to prune
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
Raises:
IndexError: if ``self.dim >= len(t.shape)``
"""
# Check that tensor has structure (i.e. more than 1 dimension) such
# that the concept of "channels" makes sense
_validate_structured_pruning(t)
# Check that self.dim is a valid dim to index t, else raise IndexError
_validate_pruning_dim(t, self.dim)
# Check that the amount of channels to prune is not > than the number of
# channels in t along the dim to prune
tensor_size = t.shape[self.dim]
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
nparams_tokeep = tensor_size - nparams_toprune
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
# Compute binary mask by initializing it to all 0s and then filling in
# 1s wherever topk.indices indicates, along self.dim.
# mask has the same shape as tensor t
def make_mask(t, dim, nchannels, nchannels_toprune):
# generate a random number in [0, 1] to associate to each channel
prob = torch.rand(nchannels)
# generate mask for each channel by 0ing out the channels that
# got assigned the k = nchannels_toprune lowest values in prob
threshold = torch.kthvalue(prob, k=nchannels_toprune).values
channel_mask = prob > threshold
mask = torch.zeros_like(t)
slc = [slice(None)] * len(t.shape)
slc[dim] = channel_mask
mask[slc] = 1
return mask
if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
mask = default_mask
else:
# apply the new structured mask on top of prior (potentially
# unstructured) mask
mask = make_mask(t, self.dim, tensor_size, nparams_toprune)
mask *= default_mask.to(dtype=mask.dtype)
return mask
@classmethod
def apply(cls, module, name, amount, dim=-1):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
return super(RandomStructured, cls).apply(module, name, amount=amount, dim=dim)
class LnStructured(BasePruningMethod):
r"""Prune entire (currently unpruned) channels in a tensor based on their
Ln-norm.
Args:
amount (int or float): quantity of channels to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int, optional): index of the dim along which we define
channels to prune. Default: -1.
"""
PRUNING_TYPE = "structured"
def __init__(self, amount, n, dim=-1):
# Check range of validity of amount
_validate_pruning_amount_init(amount)
self.amount = amount
self.n = n
self.dim = dim
def compute_mask(self, t, default_mask):
r"""Computes and returns a mask for the input tensor ``t``.
Starting from a base ``default_mask`` (which should be a mask of ones
if the tensor has not been pruned yet), generate a mask to apply on
top of the ``default_mask`` by zeroing out the channels along the
specified dim with the lowest Ln-norm.
Args:
t (torch.Tensor): tensor representing the parameter to prune
default_mask (torch.Tensor): Base mask from previous pruning
iterations, that need to be respected after the new mask is
applied. Same dims as ``t``.
Returns:
mask (torch.Tensor): mask to apply to ``t``, of same dims as ``t``
Raises:
IndexError: if ``self.dim >= len(t.shape)``
"""
# Check that tensor has structure (i.e. more than 1 dimension) such
# that the concept of "channels" makes sense
_validate_structured_pruning(t)
# Check that self.dim is a valid dim to index t, else raise IndexError
_validate_pruning_dim(t, self.dim)
# Check that the amount of channels to prune is not > than the number of
# channels in t along the dim to prune
tensor_size = t.shape[self.dim]
# Compute number of units to prune: amount if int,
# else amount * tensor_size
nparams_toprune = _compute_nparams_toprune(self.amount, tensor_size)
nparams_tokeep = tensor_size - nparams_toprune
# This should raise an error if the number of units to prune is larger
# than the number of units in the tensor
_validate_pruning_amount(nparams_toprune, tensor_size)
# Structured pruning prunes entire channels so we need to know the
# L_n norm along each channel to then find the topk based on this
# metric
norm = _compute_norm(t, self.n, self.dim)
# largest=True --> top k; largest=False --> bottom k
# Keep the largest k channels along dim=self.dim
topk = torch.topk(norm, k=nparams_tokeep, largest=True,)
# topk will have .indices and .values
# Compute binary mask by initializing it to all 0s and then filling in
# 1s wherever topk.indices indicates, along self.dim.
# mask has the same shape as tensor t
def make_mask(t, dim, indices):
# init mask to 0
mask = torch.zeros_like(t)
# e.g.: slc = [None, None, None], if len(t.shape) = 3
slc = [slice(None)] * len(t.shape)
# replace a None at position=dim with indices
# e.g.: slc = [None, None, [0, 2, 3]] if dim=2 & indices=[0,2,3]
slc[dim] = indices
# use slc to slice mask and replace all its entries with 1s
# e.g.: mask[:, :, [0, 2, 3]] = 1
mask[slc] = 1
return mask
if nparams_toprune == 0: # k=0 not supported by torch.kthvalue
mask = default_mask
else:
mask = make_mask(t, self.dim, topk.indices)
mask *= default_mask.to(dtype=mask.dtype)
return mask
@classmethod
def apply(cls, module, name, amount, n, dim):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int): index of the dim along which we define channels to
prune.
"""
return super(LnStructured, cls).apply(module, name, amount=amount, n=n, dim=dim)
class CustomFromMask(BasePruningMethod):
PRUNING_TYPE = "global"
def __init__(self, mask):
self.mask = mask
def compute_mask(self, t, default_mask):
assert default_mask.shape == self.mask.shape
mask = default_mask * self.mask.to(dtype=default_mask.dtype)
return mask
@classmethod
def apply(cls, module, name, mask):
r"""Adds the forward pre-hook that enables pruning on the fly and
the reparametrization of a tensor in terms of the original tensor
and the pruning mask.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
"""
return super(CustomFromMask, cls).apply(module, name, mask)
def identity(module, name):
r"""Applies pruning reparametrization to the tensor corresponding to the
parameter called ``name`` in ``module`` without actually pruning any
units. Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Note:
The mask is a tensor of ones.
Args:
module (nn.Module): module containing the tensor to prune.
name (str): parameter name within ``module`` on which pruning
will act.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.identity(nn.Linear(2, 3), 'bias')
>>> print(m.bias_mask)
tensor([1., 1., 1.])
"""
Identity.apply(module, name)
return module
def random_unstructured(module, name, amount):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) units
selected at random.
Modifies module in place (and also return the modified module) by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter `name` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.random_unstructured(nn.Linear(2, 3), 'weight', amount=1)
>>> torch.sum(m.weight_mask == 0)
tensor(1)
"""
RandomUnstructured.apply(module, name, amount)
return module
def l1_unstructured(module, name, amount):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified `amount` of (currently unpruned) units with the
lowest L1-norm.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.l1_unstructured(nn.Linear(2, 3), 'weight', amount=0.2)
>>> m.state_dict().keys()
odict_keys(['bias', 'weight_orig', 'weight_mask'])
"""
L1Unstructured.apply(module, name, amount)
return module
def random_structured(module, name, amount, dim):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) channels
along the specified ``dim`` selected at random.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
dim (int): index of the dim along which we define channels to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.random_structured(
nn.Linear(5, 3), 'weight', amount=3, dim=1
)
>>> columns_pruned = int(sum(torch.sum(m.weight, dim=0) == 0))
>>> print(columns_pruned)
3
"""
RandomStructured.apply(module, name, amount, dim)
return module
def ln_structured(module, name, amount, n, dim):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by removing the specified ``amount`` of (currently unpruned) channels
along the specified ``dim`` with the lowest L``n``-norm.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
amount (int or float): quantity of parameters to prune.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument ``p`` in :func:`torch.norm`.
dim (int): index of the dim along which we define channels to prune.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.ln_structured(
nn.Conv2d(5, 3, 2), 'weight', amount=0.3, dim=1, n=float('-inf')
)
"""
LnStructured.apply(module, name, amount, n, dim)
return module
def global_unstructured(parameters, pruning_method, **kwargs):
r"""
Globally prunes tensors corresponding to all parameters in ``parameters``
by applying the specified ``pruning_method``.
Modifies modules in place by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
parameters (Iterable of (module, name) tuples): parameters of
the model to prune in a global fashion, i.e. by aggregating all
weights prior to deciding which ones to prune. module must be of
type :class:`nn.Module`, and name must be a string.
pruning_method (function): a valid pruning function from this module,
or a custom one implemented by the user that satisfies the
implementation guidelines and has ``PRUNING_TYPE='unstructured'``.
kwargs: other keyword arguments such as:
amount (int or float): quantity of parameters to prune across the
specified parameters.
If ``float``, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If ``int``, it represents the
absolute number of parameters to prune.
Raises:
TypeError: if ``PRUNING_TYPE != 'unstructured'``
Note:
Since global structured pruning doesn't make much sense unless the
norm is normalized by the size of the parameter, we now limit the
scope of global pruning to unstructured methods.
Examples:
>>> net = nn.Sequential(OrderedDict([
('first', nn.Linear(10, 4)),
('second', nn.Linear(4, 1)),
]))
>>> parameters_to_prune = (
(net.first, 'weight'),
(net.second, 'weight'),
)
>>> prune.global_unstructured(
parameters_to_prune,
pruning_method=prune.L1Unstructured,
amount=10,
)
>>> print(sum(torch.nn.utils.parameters_to_vector(net.buffers()) == 0))
tensor(10, dtype=torch.uint8)
"""
# ensure parameters is a list or generator of tuples
assert isinstance(parameters, Iterable)
# flatten parameter values to consider them all at once in global pruning
t = torch.nn.utils.parameters_to_vector([getattr(*p) for p in parameters])
# similarly, flatten the masks (if they exist), or use a flattened vector
# of 1s of the same dimensions as t
default_mask = torch.nn.utils.parameters_to_vector(
[
getattr(module, name + "_mask",
torch.ones_like(getattr(module, name)))
for (module, name) in parameters
]
)
# use the canonical pruning methods to compute the new mask, even if the
# parameter is now a flattened out version of `parameters`
container = PruningContainer()
container._tensor_name = "temp" # to make it match that of `method`
method = pruning_method(**kwargs)
method._tensor_name = "temp" # to make it match that of `container`
if method.PRUNING_TYPE != "unstructured":
raise TypeError(
'Only "unstructured" PRUNING_TYPE supported for '
"the `pruning_method`. Found method {} of type {}".format(
pruning_method, method.PRUNING_TYPE
)
)
container.add_pruning_method(method)
# use the `compute_mask` method from `PruningContainer` to combine the
# mask computed by the new method with the pre-existing mask
final_mask = container.compute_mask(t, default_mask)
# Pointer for slicing the mask to match the shape of each parameter
pointer = 0
for module, name in parameters:
param = getattr(module, name)
# The length of the parameter
num_param = param.numel()
# Slice the mask, reshape it
param_mask = final_mask[pointer: pointer + num_param].view_as(param)
# Assign the correct pre-computed mask to each parameter and add it
# to the forward_pre_hooks like any other pruning method
custom_from_mask(module, name, param_mask)
# Increment the pointer to continue slicing the final_mask
pointer += num_param
def custom_from_mask(module, name, mask):
r"""Prunes tensor corresponding to parameter called ``name`` in ``module``
by applying the pre-computed mask in ``mask``.
Modifies module in place (and also return the modified module)
by:
1) adding a named buffer called ``name+'_mask'`` corresponding to the
binary mask applied to the parameter ``name`` by the pruning method.
2) replacing the parameter ``name`` by its pruned version, while the
original (unpruned) parameter is stored in a new parameter named
``name+'_orig'``.
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
mask (Tensor): binary mask to be applied to the parameter.
Returns:
module (nn.Module): modified (i.e. pruned) version of the input module
Examples:
>>> m = prune.custom_from_mask(
nn.Linear(5, 3), name='bias', mask=torch.Tensor([0, 1, 0])
)
>>> print(m.bias_mask)
tensor([0., 1., 0.])
"""
CustomFromMask.apply(module, name, mask)
return module
def remove(module, name):
r"""Removes the pruning reparameterization from a module and the
pruning method from the forward hook. The pruned
parameter named ``name`` remains permanently pruned, and the parameter
named ``name+'_orig'`` is removed from the parameter list. Similarly,
the buffer named ``name+'_mask'`` is removed from the buffers.
Note:
Pruning itself is NOT undone or reversed!
Args:
module (nn.Module): module containing the tensor to prune
name (str): parameter name within ``module`` on which pruning
will act.
Examples:
>>> m = random_pruning(nn.Linear(5, 7), name='weight', amount=0.2)
>>> m = remove_pruning(m, name='weight')
"""
for k, hook in module._forward_pre_hooks.items():
if isinstance(hook, BasePruningMethod) and hook._tensor_name == name:
hook.remove(module)
del module._forward_pre_hooks[k]
return module
raise ValueError(
"Parameter '{}' of module {} has to be pruned "
"before pruning can be removed".format(name, module)
)
def is_pruned(module):
r"""Check whether ``module`` is pruned by looking for
``forward_pre_hooks`` in its modules that inherit from the
:class:`BasePruningMethod`.
Args:
module (nn.Module): object that is either pruned or unpruned
Returns:
binary answer to whether ``module`` is pruned.
Examples:
>>> m = nn.Linear(5, 7)
>>> print(prune.is_pruned(m))
False
>>> prune.random_pruning(m, name='weight', amount=0.2)
>>> print(prune.is_pruned(m))
True
"""
for _, submodule in module.named_modules():
for _, hook in submodule._forward_pre_hooks.items():
if isinstance(hook, BasePruningMethod):
return True
return False
def _validate_pruning_amount_init(amount):
r"""Validation helper to check the range of amount at init.
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
Raises:
ValueError: if amount is a float not in [0, 1], or if it's a negative
integer.
TypeError: if amount is neither a float nor an integer.
Note:
This does not take into account the number of parameters in the
tensor to be pruned, which is known only at prune.
"""
if not isinstance(amount, numbers.Real):
raise TypeError(
"Invalid type for amount: {}. Must be int or float." "".format(
amount)
)
if (isinstance(amount, numbers.Integral) and amount < 0) or (
not isinstance(amount, numbers.Integral) # so it's a float
and (amount > 1.0 or amount < 0.0)
):
raise ValueError(
"amount={} should either be a float in the "
"range [0, 1] or a non-negative integer"
"".format(amount)
)
def _validate_pruning_amount(amount, tensor_size):
r"""Validation helper to check that the amount of parameters to prune
is meaningful wrt to the size of the data (`tensor_size`).
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
tensor_size (int): absolute number of parameters in the tensor
to prune.
"""
# TODO: consider removing this check and allowing users to specify
# a number of units to prune that is greater than the number of units
# left to prune. In this case, the tensor will just be fully pruned.
if isinstance(amount, numbers.Integral) and amount > tensor_size:
raise ValueError(
"amount={} should be smaller than the number of "
"parameters to prune={}".format(amount, tensor_size)
)
def _validate_structured_pruning(t):
r"""Validation helper to check that the tensor to be pruned is multi-
dimensional, such that the concept of "channels" is well-defined.
Args:
t (torch.Tensor): tensor representing the parameter to prune
Raises:
ValueError: if the tensor `t` is not at least 2D.
"""
shape = t.shape
if len(shape) <= 1:
raise ValueError(
"Structured pruning can only be applied to "
"multidimensional tensors. Found tensor of shape "
"{} with {} dims".format(shape, len(shape))
)
def _compute_nparams_toprune(amount, tensor_size):
r"""Since amount can be expressed either in absolute value or as a
percentage of the number of units/channels in a tensor, this utility
function converts the percentage to absolute value to standardize
the handling of pruning.
Args:
amount (int or float): quantity of parameters to prune.
If float, should be between 0.0 and 1.0 and represent the
fraction of parameters to prune. If int, it represents the
absolute number of parameters to prune.
tensor_size (int): absolute number of parameters in the tensor
to prune.
Returns:
int: the number of units to prune in the tensor
"""
# incorrect type already checked in _validate_pruning_amount_init
if isinstance(amount, numbers.Integral):
return amount
else:
return int(round(amount * tensor_size)) # int needed for Python 2
def _validate_pruning_dim(t, dim):
r"""
Args:
t (torch.Tensor): tensor representing the parameter to prune
dim (int): index of the dim along which we define channels to prune
"""
if dim >= t.dim():
raise IndexError(
"Invalid index {} for tensor of size {}".format(dim, t.shape))
def _compute_norm(t, n, dim):
r"""Compute the L_n-norm across all entries in tensor `t` along all dimension
except for the one identified by dim.
Example: if `t` is of shape, say, 3x2x4 and dim=2 (the last dim),
then norm will have Size [4], and each entry will represent the
`L_n`-norm computed using the 3x2=6 entries for each of the 4 channels.
Args:
t (torch.Tensor): tensor representing the parameter to prune
n (int, float, inf, -inf, 'fro', 'nuc'): See documentation of valid
entries for argument p in torch.norm
dim (int): dim identifying the channels to prune
Returns:
norm (torch.Tensor): L_n norm computed across all dimensions except
for `dim`. By construction, `norm.shape = t.shape[-1]`.
"""
# dims = all axes, except for the one identified by `dim`
dims = list(range(t.dim()))
# convert negative indexing
if dim < 0:
dim = dims[dim]
dims.remove(dim)
norm = torch.norm(t, p=n, dim=dims)
return norm
|
decodable_information_bottleneck-main
|
dib/utils/pruning.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
|
decodable_information_bottleneck-main
|
dib/utils/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import functools
import random
import numpy as np
import torch
from .helpers import channels_to_last_dim, indep_shuffle_, prod, ratio_to_int
__all__ = ["RandomMasker", "GetRandomIndcs"]
### INDICES SELECTORS ###
class GetRandomIndcs:
"""
Return random subset of indices.
Parameters
----------
min_n_indcs : float or int, optional
Minimum number of indices. If smaller than 1, represents a percentage of
points.
max_n_indcs : float or int, optional
Maximum number of indices. If smaller than 1, represents a percentage of
points.
is_batch_share : bool, optional
Whether to use use the same indices for all elements in the batch.
range_indcs : tuple, optional
Range tuple (max, min) for the indices.
"""
def __init__(
self, min_n_indcs=0.1, max_n_indcs=0.5, is_batch_share=False, range_indcs=None
):
self.min_n_indcs = min_n_indcs
self.max_n_indcs = max_n_indcs
self.is_batch_share = is_batch_share
self.range_indcs = range_indcs
def __call__(self, batch_size, n_possible_points):
if self.range_indcs is not None:
n_possible_points = self.range_indcs[1] - self.range_indcs[0]
min_n_indcs = ratio_to_int(self.min_n_indcs, n_possible_points)
max_n_indcs = ratio_to_int(self.max_n_indcs, n_possible_points)
# make sure select at least 1
n_indcs = random.randint(max(1, min_n_indcs), max(1, max_n_indcs))
if self.is_batch_share:
indcs = torch.randperm(n_possible_points)[:n_indcs]
indcs = indcs.unsqueeze(0).expand(batch_size, n_indcs)
else:
indcs = (
np.arange(n_possible_points)
.reshape(1, n_possible_points)
.repeat(batch_size, axis=0)
)
indep_shuffle_(indcs, -1)
indcs = torch.from_numpy(indcs[:, :n_indcs])
if self.range_indcs is not None:
# adding is teh same as shifting
indcs += self.range_indcs[0]
return indcs
### GRID AND MASKING ###
class RandomMasker(GetRandomIndcs):
"""
Return random subset mask.
Parameters
----------
min_nnz : float or int, optional
Minimum number of non zero values. If smaller than 1, represents a
percentage of points.
max_nnz : float or int, optional
Maximum number of non zero values. If smaller than 1, represents a
percentage of points.
is_batch_share : bool, optional
Whether to use use the same indices for all elements in the batch.
"""
def __init__(self, min_nnz=0.01, max_nnz=2 / 9, is_batch_share=False):
super().__init__(
min_n_indcs=min_nnz, max_n_indcs=max_nnz, is_batch_share=is_batch_share
)
def __call__(self, batch_size, mask_shape, **kwargs):
n_possible_points = prod(mask_shape)
nnz_indcs = super().__call__(batch_size, n_possible_points, **kwargs)
if self.is_batch_share:
# share memory
mask = torch.zeros(n_possible_points).bool()
mask = mask.unsqueeze(0).expand(batch_size, n_possible_points)
else:
mask = torch.zeros((batch_size, n_possible_points)).bool()
mask.scatter_(1, nnz_indcs, 1)
mask = mask.view(batch_size, *mask_shape).contiguous()
return mask
|
decodable_information_bottleneck-main
|
dib/utils/datasplit.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import math
import torch
from torch import nn
from torch.nn.init import _calculate_correct_fan
__all__ = ["weights_init"]
logger = logging.getLogger(__name__)
def get_min_shape(t1, t2):
"""return component wise minimum shape."""
return [min(el1, el2) for el1, el2 in zip(t1.shape, t2.shape)]
def set_normlayer_like_(norm, other):
"""Set the param of `norm` using `other` (normalization layers).
If not the same size, set the largest subset of the weights."""
assert isinstance(norm, type(other))
if isinstance(norm, nn.Identity):
return
norm_weight = norm.weight.detach()
other_weight = other.weight.detach()
rep_shape = get_min_shape(norm_weight, other_weight)
norm_weight[: rep_shape[0]] = other_weight[: rep_shape[0]]
norm.weight = nn.Parameter(norm_weight)
norm_bias = norm.bias.detach()
other_bias = other.bias.detach()
rep_shape = get_min_shape(norm_bias, other_bias)
norm_bias[: rep_shape[0]] = other_bias[: rep_shape[0]]
norm.bias = nn.Parameter(norm_bias)
def set_linear_like_(linear, other):
"""Set the parameters of `linear` using `other` (linear layers). If not the same size, set the largest subset of the weights."""
assert isinstance(linear, nn.Linear) and isinstance(other, nn.Linear)
linear_weight = linear.weight.detach()
other_weight = other.weight.detach()
rep_shape = get_min_shape(linear_weight, other_weight)
linear_weight[: rep_shape[0], : rep_shape[1]] = other_weight[
: rep_shape[0], : rep_shape[1]
]
linear.weight = nn.Parameter(linear_weight)
linear_bias = linear.bias.detach()
other_bias = other.bias.detach()
rep_shape = get_min_shape(linear_bias, other_bias)
linear_bias[: rep_shape[0]] = other_bias[: rep_shape[0]]
linear.bias = nn.Parameter(linear_bias)
def weights_init(module, **kwargs):
"""Initialize a module and all its descendents.
Parameters
----------
module : nn.Module
module to initialize.
"""
# lop over direct children (not grand children)
for m in module.children():
# all standard layers
if isinstance(m, torch.nn.modules.conv._ConvNd):
# used in https://github.com/brain-research/realistic-ssl-evaluation/
nn.init.kaiming_normal_(m.weight, mode="fan_out", **kwargs)
elif isinstance(m, nn.Linear):
linear_init(m, **kwargs)
elif isinstance(m, nn.BatchNorm2d):
try:
m.weight.data.fill_(1)
m.bias.data.zero_()
except AttributeError: # affine = False
pass
# if has a specific reset
elif hasattr(m, "reset_parameters"):
m.reset_parameters()
#! don't go in grand children because you might have specifc weights you don't want to reset
# else go in your grand children
else:
weights_init(m, **kwargs)
def get_activation_name(activation):
"""Given a string or a `torch.nn.modules.activation` return the name of the activation."""
if isinstance(activation, str):
return activation
mapper = {
nn.LeakyReLU: "leaky_relu",
nn.ReLU: "relu",
nn.SELU: "selu",
nn.Tanh: "tanh",
nn.Sigmoid: "sigmoid",
nn.Softmax: "sigmoid",
}
for k, v in mapper.items():
if isinstance(activation, k):
return v
raise ValueError("Unkown given activation type : {}".format(activation))
def get_gain(activation):
"""Given an object of `torch.nn.modules.activation` or an activation name
return the correct gain."""
if activation is None:
return 1
activation_name = get_activation_name(activation)
param = None if activation_name != "leaky_relu" else activation.negative_slope
gain = nn.init.calculate_gain(activation_name, param)
return gain
def terrible_linear_init(module, **kwargs):
x = module.weight
if module.bias is not None:
nn.init.uniform_(module.bias.data, a=-100, b=100)
return nn.init.uniform_(x, a=-100, b=100)
def linear_init(module, activation="relu"):
"""Initialize a linear layer.
Parameters
----------
module : nn.Module
module to initialize.
activation : `torch.nn.modules.activation` or str, optional
Activation that will be used on the `module`.
"""
x = module.weight
if module.bias is not None:
module.bias.data.zero_()
try:
activation_name = get_activation_name(activation)
except ValueError:
activation_name = None
if activation_name == "leaky_relu":
a = 0 if isinstance(activation, str) else activation.negative_slope
return nn.init.kaiming_uniform_(x, a=a, nonlinearity="leaky_relu")
elif activation_name == "relu":
return nn.init.kaiming_uniform_(x, nonlinearity="relu")
elif activation_name == "selu":
fan_in = _calculate_correct_fan(x, "fan_in")
return torch.nn.init.normal_(x, std=1 / math.sqrt(fan_in))
elif activation_name in ["sigmoid", "tanh"]:
return nn.init.xavier_uniform_(x, gain=get_gain(activation))
else:
if activation is not None:
logger.info(
f"Uknown activation={activation}, using xavier uniform init")
return nn.init.xavier_uniform_(x)
def init_param_(param, activation=None, is_positive=False, bound=0.05, shift=0):
"""Initialize inplace some parameters of the model that are not part of a
children module.
Parameters
----------
param : nn.Parameters:
Parameters to initialize.
activation : torch.nn.modules.activation or str, optional)
Activation that will be used on the `param`.
is_positive : bool, optional
Whether to initilize only with positive values.
bound : float, optional
Maximum absolute value of the initealized values. By default `0.05` which
is keras default uniform bound.
shift : int, optional
Shift the initialisation by a certain value (same as adding a value after init).
"""
gain = get_gain(activation)
if is_positive:
nn.init.uniform_(param, 1e-5 + shift, bound * gain + shift)
return
nn.init.uniform_(param, -bound * gain + shift, bound * gain + shift)
|
decodable_information_bottleneck-main
|
dib/utils/initialization.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import math
import torch
from torch.distributions import Categorical, Independent, Normal
def MultivariateNormalDiag(loc, scale_diag):
"""Multi variate Gaussian with a diagonal covariance function."""
if loc.dim() < 1:
raise ValueError("loc must be at least one-dimensional.")
return Independent(Normal(loc, scale_diag), 1)
class NoDistribution:
def __init__(self, x):
self.x = x
def rsample(self):
return self.x
def straight_through(soft_samples, f):
"""
Take soft_samples and transform them with f(straight_through)
while keeping it differentiable.
"""
detached = soft_samples.detach()
# removes a detached version of the soft X and adds the real X
# to emulate the fact that we add some non differentiable noise which just
# hapens to make the variable as you want. I.e the total is still differentiable
detached_res = f(detached)
detached_diff = detached_res - detached
res = detached_diff + soft_samples
return res
def softmax_to_onehot(X, dim=1):
"""Moves a vector on the simplex to the closes vertex."""
max_idx = torch.argmax(X, dim, keepdim=True)
one_hot = torch.zeros_like(X)
one_hot.scatter_(dim, max_idx, 1)
return one_hot
def label_distribution(labels, n_classes):
"""Return a categorical distribution of the labels."""
probs = torch.zeros(n_classes, device=labels.device)
label, counts = labels.unique(return_counts=True)
probs[label] = counts.float() / counts.sum()
return Categorical(probs=probs)
def entropy_labels(labels, n_classes, base=math.exp(1)):
"""Computes the entropy of labels."""
probs = label_distribution(labels, n_classes)
return probs.entropy().mean(0) / math.log(base)
def rm_conditioning(p_yCx):
"""Remove the conditioning of a distributions p(Y|X) -> p(Y) by taking a Monte Carlo Expectation
of all besides current index.
Parameters
----------
q_yCx : torch.Tensor or torch.Distributions
Distribution to uncondition. Each batch should be from a sample of conditioning
random variable X. Note that this should already be in pbabilities, not logits.
"""
#! here i'm actually removing the current index so the estimate is slighlty biased,
#! to unbias should giv weight 1/N instead of weight 0 to yourself
p_y = torch.zeros_like(p_yCx)
if isinstance(p_yCx, torch.Tensor):
batch_size = p_yCx.size(0)
for batch_idx in range(batch_size):
p_y[batch_idx] = p_yCx[
list(range(0, batch_idx)) +
list(range(batch_idx + 1, batch_size))
].mean(0)
return p_y
|
decodable_information_bottleneck-main
|
dib/utils/distributions.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import contextlib
import math
import operator
import random
import warnings
from functools import reduce
from itertools import zip_longest
import numpy as np
import skorch
import torch
import torch.nn as nn
import torch.nn.functional as F
from .initialization import weights_init
def to_numpy(X):
"""Generic function to convert array like to numpy."""
if isinstance(X, list):
X = np.array(X)
return skorch.utils.to_numpy(X)
def extract_target(targets, map_target_position):
"""Extract the real target."""
if isinstance(targets, (list, tuple)):
return targets[map_target_position["target"]]
else:
return targets[:, map_target_position["target"]]
class CrossEntropyLossGeneralize(nn.CrossEntropyLoss):
"""Cross entropy loss that forces (anti)-generalization.
Note
----
- we want to find an empirical risk minimizer that maximizes (antigeneralize) or minimizes
(generalize) the test loss. Using a lagrangian relaxation of the problem this can be written
as `min trainLoss + gamma * testLoss`, where the sign of `gamma` determines whether or not to
generalize.
- Each target should contain `(label, is_train)`. Where `is_train` says whether its a trainign
example or a test example. `is_train=1` or `is_train=0`.
- When validating usies cross entropy.
Parameters
----------
gamma : float, optional
Langrangian coefficient of the relaxed problem. If positive, forces generalization, if negative
forces anti generalization. Its scale balances the training and testing loss. If `gamma=0`
becomes standard cross entropy (in which case doesn't need to append `is_train`).
map_target_position : dict
Dictionary that maps the type of target (e.g. "index") to its position in the
target. Needs to have `"constant" corresponding to `"is_train"`.
cap_test_loss : float, optional
Value used to cap the test loss (i.e. don't backprop through it). This is especially useful
when gamma is negative (anti generalization). Indeed, cross entropy is not bounded and thus
the model could end up only focusing on maximizing the test loss to infinity regardless of
train.
kwargs :
Additional arguments to `torch.nn.CrossEntropyLoss`.
"""
def __init__(
self, gamma, map_target_position, reduction="mean", cap_test_loss=10, **kwargs
):
super().__init__(reduction="none", **kwargs)
self.gamma = gamma
self.map_target_position = map_target_position
self.final_reduction = reduction
self.cap_test_loss = cap_test_loss
def forward(self, inp, targets):
out = super().forward(inp, extract_target(targets, self.map_target_position))
if self.gamma == 0 and ("constant" not in self.map_target_position):
pass
elif self.training:
constant = targets[self.map_target_position["constant"]]
is_test = constant == 0
is_train = constant == 1
weights = (is_test.int() * self.gamma) + is_train.int()
# CAPPING : don't backprop if test and larger than cap (but still forward)
is_large_loss = out > self.cap_test_loss
to_cap = is_large_loss & is_test
out[to_cap] = out[to_cap] * 0 + out[to_cap].detach()
out = weights * out
elif len(self.map_target_position) == len(targets):
# when validating : either you have only access to the validation set, in which
# case return all or you have access to train U test
# in which case you want to filter only the training examples
is_train = targets[self.map_target_position["constant"]] == 1
out = out[is_train]
else:
ValueError(
f"Not training but len({self.map_target_position})!={len(targets)}"
)
if self.final_reduction == "mean":
return out.mean()
elif self.final_reduction == "sum":
return out.sum()
else:
raise ValueError(f"Unkown reduction={self.final_reduction}")
class Identity:
def __init__(self, *args, **kwargs):
pass
def __call__(self, x):
return x
def __getitem__(self, x):
return x
def is_sorted(l):
"""Check whether a list is sorted"""
return all(l[i] <= l[i + 1] for i in range(len(l) - 1))
def get_idx_permuter(n_idcs, seed=123):
"""Return permuted indices.
Paramaters
----------
n_idcs : int or array-like of int
Number of indices. If list, it should be a partion of the real number of idcs.
Each partition will be permuted separately.
seed : int, optional
"""
if isinstance(n_idcs, int):
idcs = list(range(n_idcs))
else:
idcs = [list(range(partition)) for partition in n_idcs]
with tmp_seed(seed):
if isinstance(n_idcs, int):
random.shuffle(idcs)
idcs = torch.tensor(idcs)
else:
# shuffle each partition separetly
for partition_idcs in idcs:
random.shuffle(partition_idcs)
idcs = torch.cat([torch.tensor(idcs) for idcs in idcs])
return idcs
# credits : https://stackoverflow.com/questions/312443/how-do-you-split-a-list-into-evenly-sized-chunks
def Nsize_chunk_iterable(iterable, n, padding=None):
"""Chunk an iterable into N sized ones."""
return zip_longest(*[iter(iterable)] * n, fillvalue=padding)
def Nchunk_iterable(iterable, n, padding=None):
"""Chunk an iterable into `n` of them."""
return Nsize_chunk_iterable(iterable, math.ceil(len(iterable) / n))
def update_dict_copy(d, **updates):
"""Return an updated copy of the dictionary."""
d = d.copy()
d.update(updates)
return d
class BatchNorm1dLast(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.batch_norm = torch.nn.BatchNorm1d(*args, **kwargs)
def forward(self, x):
# flatten to make for normalizing layer => only 2 dim
x, shape = batch_flatten(x)
x = self.batch_norm(x)
return batch_unflatten(x, shape)
def wrap_batchnorm(Module):
# wrap a module by applying batchnorm1d to input
class BatchNormWrapper(torch.nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
self.batch_norm = BatchNorm1dLast(
num_features=args[0], affine=False)
self.module = Module(*args, **kwargs)
def forward(self, x):
x = self.batch_norm(x)
return self.module(x)
return BatchNormWrapper
def get_activation(activation):
"""Given a string returns a `torch.nn.modules.activation`."""
if not isinstance(activation, str):
return activation
mapper = {
"leaky_relu": nn.LeakyReLU(),
"relu": nn.ReLU(),
"selu": nn.SELU(),
"tanh": nn.Tanh(),
"sigmoid": nn.Sigmoid(),
"relu6": nn.ReLU6(),
"sin": torch.sin,
}
for k, v in mapper.items():
if activation == k:
return v
raise ValueError("Unkown given activation type : {}".format(activation))
class HyperparameterInterpolator:
"""Helper class to compute the value of a hyperparameter at each training step.
Parameters
----------
initial_value: float
Initial value of the hyperparameter.
final_value: float
Final value of the hyperparameter.
N_steps_interpolate: int
Number of training steps before reaching the `final_value`.
Start_step: int, optional
Number of steps to wait for before starting annealing. During the waiting time,
the hyperparameter will be `default`.
Default: float, optional
Default hyperparameter value that will be used for the first `start_step`s. If
`None` uses `initial_value`.
mode: {"linear", "exponential", "logarithmic"}, optional
Interpolation mode.
is_restart : bool, optional
Whether to restart the interpolator after n_steps_interpolate.
"""
def __init__(
self,
initial_value,
final_value,
n_steps_interpolate,
start_step=0,
default=None,
mode="linear",
is_restart=False,
):
self.initial_value = initial_value
self.final_value = final_value
self.n_steps_interpolate = n_steps_interpolate
self.start_step = start_step
self.default = default if default is not None else self.initial_value
self.mode = mode.lower()
self.is_restart = is_restart
if self.mode == "linear":
delta = self.final_value - self.initial_value
self.factor = delta / self.n_steps_interpolate
elif self.mode in ["exponential", "logarithmic"]:
delta = self.final_value / self.initial_value
self.factor = delta ** (1 / self.n_steps_interpolate)
else:
raise ValueError("Unkown mode : {}.".format(mode))
self.reset_parameters()
def reset_parameters(self):
"""Reset the interpolator."""
self.n_training_calls = 0
@property
def is_annealing(self):
return (self.start_step <= self.n_training_calls) and (
self.n_training_calls <= (
self.n_steps_interpolate + self.start_step)
)
def __call__(self, is_update):
"""Return the current value of the hyperparameter.
Parameters
----------
Is_update: bool
Whether to update the hyperparameter.
"""
if is_update:
self.n_training_calls += 1
if self.start_step >= self.n_training_calls:
return self.default
n_actual_training_calls = self.n_training_calls - self.start_step
if self.is_annealing:
current = self.initial_value
if self.mode == "linear":
current += self.factor * n_actual_training_calls
elif self.mode in ["logarithmic", "exponential"]:
if (self.mode == "logarithmic") ^ (
self.initial_value < self.final_value
):
current *= self.factor ** n_actual_training_calls
else:
current *= self.factor ** (
self.n_steps_interpolate - n_actual_training_calls
)
current = self.final_value - current
else:
if self.is_restart:
self.reset_parameters()
current = self.final_value
return current
def plot(self, n=None):
"""Plot n steps of interpolation."""
import matplotlib.pyplot as plt
if n is None:
n = self.n_steps_interpolate
out = [self(True) for _ in range(n)]
plt.plot(out)
self.reset_parameters()
def batch_flatten(x):
"""Batch wise flattenting of an array."""
shape = x.shape
return x.view(-1, shape[-1]), shape
def batch_unflatten(x, shape):
"""Revert `batch_flatten`."""
return x.view(*shape[:-1], -1)
def to_number(X):
"""Convert the input to a number."""
try:
return X.item()
except AttributeError:
return X
def tuple_cont_to_cont_tuple(tuples):
"""Converts a tuple of containers (list, tuple, dict) to a container of tuples."""
if isinstance(tuples[0], dict):
# assumes keys are correct
return {k: tuple(dic[k] for dic in tuples) for k in tuples[0].keys()}
elif isinstance(tuples[0], list):
return list(zip(*tuples))
elif isinstance(tuples[0], tuple):
return tuple(zip(*tuples))
else:
raise ValueError("Unkown conatiner type: {}.".format(type(tuples[0])))
def cont_tuple_to_tuple_cont(container):
"""Converts a container (list, tuple, dict) of tuple to a tuple of container."""
if isinstance(container, dict):
return tuple(dict(zip(container, val)) for val in zip(*container.values()))
elif isinstance(container, list) or isinstance(container, tuple):
return tuple(zip(*container))
else:
raise ValueError("Unkown conatiner type: {}.".format(type(container)))
def set_seed(seed):
"""Set the random seed."""
if seed is not None:
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
np.random.seed(seed)
@contextlib.contextmanager
def tmp_seed(seed):
"""Context manager to use a temporary random seed with `with` statement."""
np_state = np.random.get_state()
torch_state = torch.get_rng_state()
random_state = random.getstate()
if torch.cuda.is_available():
torch_cuda_state = torch.cuda.get_rng_state()
set_seed(seed)
try:
yield
finally:
if seed is not None:
# if seed is None do as if no tmp_seed
np.random.set_state(np_state)
torch.set_rng_state(torch_state)
random.setstate(random_state)
if torch.cuda.is_available():
torch.cuda.set_rng_state(torch_cuda_state)
def clip_interval(x, bound1, bound2, warn_mssg=["x", "bound1", "bound2"]):
"""
Clips x to [bound1,bound2] or [bound2,bound1]. If `warn_mssg` is a list of the 3 name variable
names that will be used to to warn the user that a variables was clipped to the given interval
(no warning if `None`).
"""
if bound2 < bound1:
bound1, bound2 = bound2, bound1
if warn_mssg is not None:
warn_mssg[1], warn_mssg[2] = warn_mssg[2], warn_mssg[1]
def get_txt(to): return "{}={} not in [{}={}] = [{}={}]. Setting it to {}.".format(
warn_mssg[0], x, warn_mssg[1], warn_mssg[2], bound1, bound2, to
)
if x < bound1:
if warn_mssg is not None:
warnings.warn(get_txt(bound1))
return bound1
if x > bound2:
if warn_mssg is not None:
warnings.warn(get_txt(bound1))
return bound2
return x
def channels_to_2nd_dim(X):
"""
Takes a signal with channels on the last dimension (for most operations) and
returns it with channels on the second dimension (for convolutions).
"""
return X.permute(*([0, X.dim() - 1] + list(range(1, X.dim() - 1))))
def channels_to_last_dim(X):
"""
Takes a signal with channels on the second dimension (for convolutions) and
returns it with channels on the last dimension (for most operations).
"""
return X.permute(*([0] + list(range(2, X.dim())) + [1]))
def mask_and_apply(x, mask, f):
"""Applies a callable on a masked version of a input."""
tranformed_selected = f(x.masked_select(mask))
return x.masked_scatter(mask, tranformed_selected)
def indep_shuffle_(a, axis=-1):
"""
Shuffle `a` in-place along the given axis.
Apply `numpy.random.shuffle` to the given axis of `a`.
Each one-dimensional slice is shuffled independently.
Credits : https://github.com/numpy/numpy/issues/5173
"""
b = a.swapaxes(axis, -1)
# Shuffle `b` in-place along the last axis. `b` is a view of `a`,
# so `a` is shuffled in place, too.
shp = b.shape[:-1]
for ndx in np.ndindex(shp):
np.random.shuffle(b[ndx])
def ratio_to_int(percentage, max_val):
"""Converts a ratio to an integer if it is smaller than 1."""
if 1 <= percentage <= max_val:
out = percentage
elif 0 <= percentage < 1:
out = percentage * max_val
else:
raise ValueError(
"percentage={} outside of [0,{}].".format(percentage, max_val))
return int(out)
def prod(iterable):
"""Compute the product of all elements in an iterable."""
return reduce(operator.mul, iterable, 1)
def rescale_range(X, old_range, new_range):
"""Rescale X linearly to be in `new_range` rather than `old_range`."""
old_min = old_range[0]
new_min = new_range[0]
old_delta = old_range[1] - old_min
new_delta = new_range[1] - new_min
return (((X - old_min) * new_delta) / old_delta) + new_min
def clamp(
x,
minimum=-float("Inf"),
maximum=float("Inf"),
is_leaky=False,
negative_slope=0.01,
hard_min=None,
hard_max=None,
):
"""
Clamps a tensor to the given [minimum, maximum] (leaky) bound, with
an optional hard clamping.
"""
lower_bound = (
(minimum + negative_slope * (x - minimum))
if is_leaky
else torch.zeros_like(x) + minimum
)
upper_bound = (
(maximum + negative_slope * (x - maximum))
if is_leaky
else torch.zeros_like(x) + maximum
)
clamped = torch.max(lower_bound, torch.min(x, upper_bound))
if hard_min is not None or hard_max is not None:
if hard_min is None:
hard_min = -float("Inf")
elif hard_max is None:
hard_max = float("Inf")
clamped = clamp(x, minimum=hard_min, maximum=hard_max, is_leaky=False)
return clamped
class ProbabilityConverter(nn.Module):
"""Maps floats to probabilites (between 0 and 1), element-wise.
Parameters
----------
min_p : float, optional
Minimum probability, can be useful to set greater than 0 in order to keep
gradient flowing if the probability is used for convex combinations of
different parts of the model. Note that maximum probability is `1-min_p`.
activation : {"sigmoid", "hard-sigmoid", "leaky-hard-sigmoid"}, optional
name of the activation to use to generate the probabilities. `sigmoid`
has the advantage of being smooth and never exactly 0 or 1, which helps
gradient flows. `hard-sigmoid` has the advantage of making all values
between min_p and max_p equiprobable.
is_train_temperature : bool, optional
Whether to train the paremeter controling the steepness of the activation.
This is useful when x is used for multiple tasks, and you don't want to
constraint its magnitude.
is_train_bias : bool, optional
Whether to train the bias to shift the activation. This is useful when x is
used for multiple tasks, and you don't want to constraint it's scale.
trainable_dim : int, optional
Size of the trainable bias and temperature. If `1` uses the same vale
across all dimension, if not should be equal to the number of input
dimensions to different trainable parameters for each dimension. Note
that the initial value will still be the same for all dimensions.
initial_temperature : int, optional
Initial temperature, a higher temperature makes the activation steaper.
initial_probability : float, optional
Initial probability you want to start with.
initial_x : float, optional
First value that will be given to the function, important to make
`initial_probability` work correctly.
bias_transformer : callable, optional
Transformer function of the bias. This function should only take care of
the boundaries (e.g. leaky relu or relu).
temperature_transformer : callable, optional
Transformer function of the temperature. This function should only take
care of the boundaries (e.g. leaky relu or relu).
"""
def __init__(
self,
min_p=0.0,
activation="sigmoid",
is_train_temperature=False,
is_train_bias=False,
trainable_dim=1,
initial_temperature=1.0,
initial_probability=0.5,
initial_x=0,
bias_transformer=nn.Identity(),
temperature_transformer=nn.Identity(),
):
super().__init__()
self.min_p = min_p
self.activation = activation
self.is_train_temperature = is_train_temperature
self.is_train_bias = is_train_bias
self.trainable_dim = trainable_dim
self.initial_temperature = initial_temperature
self.initial_probability = initial_probability
self.initial_x = initial_x
self.bias_transformer = bias_transformer
self.temperature_transformer = temperature_transformer
self.reset_parameters()
def reset_parameters(self):
self.temperature = torch.tensor(
[self.initial_temperature] * self.trainable_dim)
if self.is_train_temperature:
self.temperature = nn.Parameter(self.temperature)
initial_bias = self._probability_to_bias(
self.initial_probability, initial_x=self.initial_x
)
self.bias = torch.tensor([initial_bias] * self.trainable_dim)
if self.is_train_bias:
self.bias = nn.Parameter(self.bias)
def forward(self, x):
self.temperature.to(x.device)
self.bias.to(x.device)
temperature = self.temperature_transformer(self.temperature)
bias = self.bias_transformer(self.bias)
if self.activation == "sigmoid":
full_p = torch.sigmoid((x + bias) * temperature)
elif self.activation in ["hard-sigmoid", "leaky-hard-sigmoid"]:
# uses 0.2 and 0.5 to be similar to sigmoid
y = 0.2 * ((x + bias) * temperature) + 0.5
if self.activation == "leaky-hard-sigmoid":
full_p = clamp(
y,
minimum=0.1,
maximum=0.9,
is_leaky=True,
negative_slope=0.01,
hard_min=0,
hard_max=0,
)
elif self.activation == "hard-sigmoid":
full_p = clamp(y, minimum=0.0, maximum=1.0, is_leaky=False)
else:
raise ValueError("Unkown activation : {}".format(self.activation))
p = rescale_range(full_p, (0, 1), (self.min_p, 1 - self.min_p))
return p
def _probability_to_bias(self, p, initial_x=0):
"""Compute the bias to use to satisfy the constraints."""
assert p > self.min_p and p < 1 - self.min_p
range_p = 1 - self.min_p * 2
p = (p - self.min_p) / range_p
p = torch.tensor(p, dtype=torch.float)
if self.activation == "sigmoid":
bias = -(torch.log((1 - p) / p) /
self.initial_temperature + initial_x)
elif self.activation in ["hard-sigmoid", "leaky-hard-sigmoid"]:
bias = ((p - 0.5) / 0.2) / self.initial_temperature - initial_x
return bias
def make_abs_conv(Conv):
"""Make a convolution have only positive parameters."""
class AbsConv(Conv):
def forward(self, input):
return F.conv2d(
input,
self.weight.abs(),
self.bias,
self.stride,
self.padding,
self.dilation,
self.groups,
)
return AbsConv
def make_depth_sep_conv(Conv):
"""Make a convolution module depth separable."""
class DepthSepConv(nn.Module):
"""Make a convolution depth separable.
Parameters
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int
**kwargs :
Additional arguments to `Conv`
"""
def __init__(
self,
in_channels,
out_channels,
kernel_size,
confidence=False,
bias=True,
**kwargs,
):
super().__init__()
self.depthwise = Conv(
in_channels,
in_channels,
kernel_size,
groups=in_channels,
bias=bias,
**kwargs,
)
self.pointwise = Conv(in_channels, out_channels, 1, bias=bias)
self.reset_parameters()
def forward(self, x):
out = self.depthwise(x)
out = self.pointwise(out)
return out
def reset_parameters(self):
weights_init(self)
return DepthSepConv
class ReturnNotTensor:
"""Helper class to allow non tensor outputs from skorch."""
def __init__(self, out):
self.out = out
def to(self, *args, **kwargs):
return self.out
def return_not_tensor(out):
if isinstance(out, torch.Tensor):
return out
else:
return ReturnNotTensor(out)
class Constant:
def __init__(self, c):
self.c = c
def __call__(self, *args):
return self.c
def set_requires_grad(module, val):
"""Set all the gradients of a given module to a certain value."""
for p in module.parameters():
p.requires_grad = val
@contextlib.contextmanager
def no_grad_modules(modules):
"""Context manager that deactivates the gradients of a list of modules."""
for module in modules:
set_requires_grad(module, False)
try:
yield
finally:
for module in modules:
set_requires_grad(module, True)
def mean_p_logits(logits, dim=0, eps=1e-8):
"""Take the mean in probability space given on some logits."""
if logits.size(dim) == 1:
return logits.squeeze(dim)
else:
#! SHOULD BE USING LOG SUM EXP
# have to put into probability space to take average
mean = logits.softmax(-1).mean(dim)
return (
mean + eps
).log() # put back in logit space making sure no nan (no zero)
class BaseRepresentation:
"""Compute the base representation for a number in a certain base while memoizing."""
def __init__(self, base):
self.base = base
self.memoize = {0: []}
def __call__(self, number):
"""Return a list of the base representation of number."""
if number in self.memoize:
return self.memoize[number]
self.memoize[number] = self(number // self.base) + [number % self.base]
return self.memoize[number]
def get_ith_digit(self, number, i):
"""Return the ith digit pf the base representation of number."""
digits = self(number)
if i >= len(digits):
return 0 # implicit padding with zeroes
return digits[-i - 1]
class BaseRepIthDigits:
"""Compute the ith digit in a given base for torch batch of numbers while memoizing (in numpy)."""
def __init__(self, base):
base_rep = BaseRepresentation(base)
self.base_rep = np.vectorize(base_rep.get_ith_digit)
def __call__(self, tensor, i_digit):
return self.base_rep(tensor, i_digit)
class BackwardPDB(torch.autograd.Function):
"""Run PDB in the backward pass."""
@staticmethod
def forward(ctx, input, name="debugger"):
ctx.name = name
ctx.save_for_backward(input)
return input
@staticmethod
def backward(ctx, grad_output):
(input,) = ctx.saved_tensors
if not torch.isfinite(grad_output).all() or not torch.isfinite(input).all():
import pdb
pdb.set_trace()
return grad_output, None # 2 args so return None for `name`
backward_pdb = BackwardPDB.apply
|
decodable_information_bottleneck-main
|
dib/utils/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from functools import partial
import torch.nn as nn
BATCHNORMS = [None, nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d]
def get_norm_layer(norm_layer, dim=2):
"""Return the correct normalization layer.
Parameters
----------
norm_layer : callable or {"batchnorm", "identity"}
Layer to return.
dim : int, optional
Number of dimension of the input (e.g. 2 for images).
"""
if norm_layer is None:
return None
elif "batch" in norm_layer:
Norm = BATCHNORMS[dim]
elif norm_layer == "identity":
Norm = nn.Identity
elif isinstance(norm_layer, str):
raise ValueError(f"Uknown normal_layer={norm_layer}")
else:
Norm = norm_layer
return Norm
|
decodable_information_bottleneck-main
|
dib/predefined/helper_layers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from functools import partial
import torch.nn as nn
from .cnn import get_Cnn
from .mlp import MLP
__all__ = ["get_predefined", "try_get_predefined"]
def try_get_predefined(d, **kwargs):
"""Tries to get a predefined module, given a dicttionary of all arguments, if not returns it."""
try:
return get_predefined(**d, **kwargs)
except TypeError:
return d
# TO DOC
def get_predefined(name, meta_kwargs={}, **kwargs):
"""Helper function which returns unitialized common neural networks."""
name = name.lower()
if name == "cnn":
Module = get_Cnn(**kwargs)
elif name == "mlp":
Module = partial(MLP, **kwargs)
elif name == "identity":
Module = nn.Identity
elif name == "linear":
Module = partial(nn.Linear, **kwargs)
elif name is None:
return None
elif not isinstance(name, str):
Module = name
else:
raise ValueError(name)
return Module
|
decodable_information_bottleneck-main
|
dib/predefined/predefined.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .cnn import *
from .imgs import *
from .mlp import *
from .predefined import *
|
decodable_information_bottleneck-main
|
dib/predefined/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import warnings
from functools import partial
import numpy as np
import torch
import torch.nn as nn
from torch.nn import functional as F
from dib.utils.helpers import (
channels_to_2nd_dim,
channels_to_last_dim,
make_depth_sep_conv,
)
from dib.utils.initialization import init_param_, weights_init
from .helper_layers import get_norm_layer
logger = logging.getLogger(__name__)
__all__ = ["get_Cnn"]
def get_Cnn(
dim=2,
mode="vanilla",
conv="vanilla",
block="res",
normalization=None,
is_chan_last=True,
pool=None,
**kwargs,
):
"""Helper function which returns a cnn, in a way callable by the CLI.
Parameters
----------
dim : int, optional
Grid input shape.
mode : {"vanilla", "unet"}, optional
conv : {"vanilla", "gauss"}, optional
block : {"simple", "res"}, optional
normalization : {"batchnorm", None}, optional
is_chan_last : bool, optional
pool : {"avg", "max", None}, optional
Returns
-------
Cnn : nn.Module
Unitialized CNN
kwargs : dict
Unused kwargs
"""
if mode == "vanilla":
Module = CNN
elif mode == "unet":
Module = UnetCNN
elif mode == "nin":
Module = NIN
if block == "simple":
Block = ConvBlock
elif block == "res":
Block = ResConvBlock
elif block == "nin":
Block = NINBlock
else:
Block = ResConvBlock
Norm = get_norm_layer(normalization, dim=dim)
if pool == "avg":
Pool = AVGPOOLINGS[dim]
elif pool == "max":
Pool = MAXPOOLINGS[dim]
elif pool is None:
Pool = nn.Identity
elif pool is None:
Pool = pool
if conv == "vanilla":
Conv = CONVS[dim]
elif conv == "gauss":
Conv = GAUSSIANCONVS[dim]
elif conv == "reverse":
Conv = REVCONVS[dim]
else:
Conv = conv
return partial(
Module,
ConvBlock=Block,
Conv=Conv,
is_chan_last=is_chan_last,
Normalization=Norm,
Pooling=partial(Pool, kernel_size=2),
**kwargs,
)
### BLOCKS ###
class ConvBlock(nn.Module):
"""Simple convolutional block with a single layer.
Parameters
----------
in_chan : int
Number of input channels.
out_chan : int
Number of output channels.
Conv : nn.Module
Convolutional layer (uninitialized). E.g. `nn.Conv1d`.
kernel_size : int or tuple, optional
Size of the convolving kernel.
dilation : int or tuple, optional
Spacing between kernel elements.
padding : int or tuple, optional
Padding added to both sides of the input. If `-1` uses padding that
keeps the size the same. Currently only works if `kernel_size` is even
and only takes into account the kenerl size and dilation, but not other
arguments (e.g. stride).
activation: callable, optional
Activation object. E.g. `nn.ReLU`.
Normalization : nn.Module, optional
Normalization layer (unitialized). E.g. `nn.BatchNorm1d`.
Pooling : nn.Module, optional
Pooling layer to apply at the end of the block. The kernel size should be already defined.
is_depth_sep ; bool, optional
Whether to use depth separable convolutions.
kwargs :
Additional arguments to `Conv`.
References
----------
[1] He, K., Zhang, X., Ren, S., & Sun, J. (2016, October). Identity mappings
in deep residual networks. In European conference on computer vision
(pp. 630-645). Springer, Cham.
[2] Chollet, F. (2017). Xception: Deep learning with depthwise separable
convolutions. In Proceedings of the IEEE conference on computer vision
and pattern recognition (pp. 1251-1258).
"""
def __init__(
self,
in_chan,
out_chan,
Conv,
kernel_size=5,
dilation=1,
padding=-1,
activation=nn.ReLU(),
Normalization=nn.Identity,
is_depth_sep=False,
Pooling=nn.Identity,
**kwargs,
):
super().__init__()
if Normalization is None:
Normalization = nn.Identity
self.activation = activation
if padding == -1:
padding = (kernel_size // 2) * dilation
if kwargs.get("stride", 1) != 1:
warnings.warn(
"`padding == -1` but `stride != 1`. The output might be of different dimension "
"as the input depending on other hyperparameters."
)
if is_depth_sep:
Conv = make_depth_sep_conv(Conv)
self.conv = Conv(in_chan, out_chan, kernel_size,
padding=padding, **kwargs)
self.norm = Normalization(out_chan)
self.pool = Pooling()
self.reset_parameters()
def reset_parameters(self):
weights_init(self)
def forward(self, X):
return self.norm(self.activation(self.pool(self.conv(X))))
class ResConvBlock(nn.Module):
"""Convolutional block (2 layers) inspired by the pre-activation Resnet [1]
and depthwise separable convolutions [2].
Parameters
----------
in_chan : int
Number of input channels.
out_chan : int
Number of output channels.
Conv : nn.Module
Convolutional layer (uninitialized). E.g. `nn.Conv1d`.
kernel_size : int or tuple, optional
Size of the convolving kernel. Should be odd to keep the same size.
activation: callable, optional
Activation object. E.g. `nn.RelU()`.
Normalization : nn.Module, optional
Normalization layer (uninitialized). E.g. `nn.BatchNorm1d`.
Pooling : nn.Module, optional
Pooling layer to apply at the end of the block. The kernel size should be already defined.
is_bias : bool, optional
Whether to use a bias.
is_depth_sep ; bool, optional
Whether to use depth separable convolutions.
References
----------
[1] He, K., Zhang, X., Ren, S., & Sun, J. (2016, October). Identity mappings
in deep residual networks. In European conference on computer vision
(pp. 630-645). Springer, Cham.
[2] Chollet, F. (2017). Xception: Deep learning with depthwise separable
convolutions. In Proceedings of the IEEE conference on computer vision
and pattern recognition (pp. 1251-1258).
"""
def __init__(
self,
in_chan,
out_chan,
Conv,
kernel_size=5,
activation=nn.ReLU(),
Normalization=nn.Identity,
is_bias=True,
Pooling=nn.Identity,
is_depth_sep=False,
):
super().__init__()
if Normalization is None:
Normalization = nn.Identity
self.activation = activation
if kernel_size % 2 == 0:
raise ValueError(
"`kernel_size={}`, but should be odd.".format(kernel_size))
padding = kernel_size // 2
conv_args = (in_chan, in_chan, kernel_size)
conv_kwargs = dict(padding=padding, bias=is_bias)
self.norm1 = Normalization(in_chan)
if is_depth_sep:
self.conv1 = make_depth_sep_conv(Conv)(*conv_args, **conv_kwargs)
else:
self.conv1 = Conv(*conv_args, **conv_kwargs)
self.norm2 = Normalization(in_chan)
self.conv2_depthwise = Conv(*conv_args, groups=in_chan, **conv_kwargs)
self.conv2_pointwise = Conv(in_chan, out_chan, 1, bias=is_bias)
self.pool = Pooling()
self.reset_parameters()
def reset_parameters(self):
weights_init(self)
def forward(self, X):
out = self.conv1(self.activation(self.norm1(X)))
out = self.conv2_depthwise(self.activation(self.norm2(out)))
# adds residual before point wise => output can change number of channels
out = out + X
out = self.conv2_pointwise(out)
return self.pool(out)
class NINBlock(torch.nn.Module):
def __init__(self, chan, dropout, Normalization=nn.Identity, **kwargs):
super().__init__()
self.out = nn.Sequential(
nn.Conv2d(chan, chan, 3, 2, padding=1),
Normalization(chan),
nn.ReLU(inplace=True),
nn.Conv2d(chan, chan, 1, 1),
Normalization(chan),
nn.ReLU(inplace=True),
nn.Conv2d(chan, chan, 1, 1),
Normalization(chan),
nn.ReLU(inplace=True),
)
self.dropout = nn.Dropout(p=dropout)
def forward(self, x):
return self.dropout(self.out(x))
### MODULES ###
class NIN(torch.nn.Module):
def __init__(
self,
in_channels,
out_channels,
depth=2,
dropout=0,
width=2,
is_flatten=False,
**kwargs,
):
super().__init__()
self.chan = nn.Conv2d(in_channels, 96 * width, 1)
self.layers = nn.Sequential(
*[NINBlock(96 * width, dropout, **kwargs) for i in range(depth)]
)
self.out = nn.Conv2d(96 * width, out_channels, 1)
self.is_flatten = is_flatten
def forward(self, x):
x = torch.relu(self.chan(x))
x = self.layers(x)
x = F.adaptive_avg_pool2d(self.out(x), (1, 1))
if self.is_flatten:
return torch.flatten(x, start_dim=1)
else:
return x
class CNN(nn.Module):
"""Simple multilayer CNN.
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels.
tmp_channels : int or list, optional
Number of temporary channels. If integer then uses always the same. If list then needs to
be of size `n_blocks - 1`, e.g. [16, 32, 64] means that you will have a
`[ConvBlock(in_channels,16), ConvBlock(16,32), ConvBlock(32,64), ConvBlock(64, out_channels)]`.
ConvBlock : nn.Module, optional
Convolutional block (unitialized). Needs to take as input `Should be
initialized with `ConvBlock(in_chan, out_chan)`.
n_blocks : int, optional
Number of convolutional blocks.
is_chan_last : bool, optional
Whether the channels are on the last dimension of the input.
is_flatten : bool, optional
Whether to flatten the output.
is_force_hid_smaller : bool, optional
Whether to force the hidden channels to be smaller or equal than in and out.
If not, it forces the hidden channels to be larger or equal than in or out.
kwargs :
Additional arguments to `ConvBlock`.
"""
def __init__(
self,
in_channels,
out_channels,
tmp_channels=32,
ConvBlock=partial(ConvBlock, Conv=nn.Conv2d),
n_blocks=3,
is_chan_last=False,
is_flatten=False,
is_force_hid_smaller=False,
**kwargs,
):
super().__init__()
self.n_blocks = n_blocks
self.is_chan_last = is_chan_last
new_tmp_channels = tmp_channels
if isinstance(tmp_channels, int):
if is_force_hid_smaller and tmp_channels > max(in_channels, out_channels):
new_tmp_channels = max(out_channels, in_channels)
txt = "tmp_channels={} larger than output={} and input={}. Setting it to {}."
warnings.warn(
txt.format(
tmp_channels, out_channels, in_channels, new_tmp_channels
)
)
elif tmp_channels < min(out_channels, in_channels):
new_tmp_channels = min(out_channels, in_channels)
txt = "tmp_channels={} smaller than output={} and input={}. Setting it to {}."
warnings.warn(
txt.format(
tmp_channels, out_channels, in_channels, new_tmp_channels
)
)
else:
n_blocks = len(tmp_channels) + 1
self.in_out_channels = self._get_in_out_channels(
in_channels, out_channels, new_tmp_channels, n_blocks
)
self.conv_blocks = nn.ModuleList(
[
ConvBlock(in_chan, out_chan, **kwargs)
for in_chan, out_chan in self.in_out_channels
]
)
self.is_return_rep = False # never return representation for vanilla conv
self.is_flatten = is_flatten
self.reset_parameters()
def reset_parameters(self):
weights_init(self)
def _get_in_out_channels(self, in_channels, out_channels, tmp_channels, n_blocks):
"""Return a list of tuple of input and output channels."""
if isinstance(tmp_channels, int):
tmp_channels = [tmp_channels] * (n_blocks - 1)
else:
tmp_channels = list(tmp_channels)
assert len(tmp_channels) == (n_blocks - 1), "tmp_channels: {} != {}".format(
len(tmp_channels), n_blocks - 1
)
channel_list = [in_channels] + tmp_channels + [out_channels]
return list(zip(channel_list, channel_list[1:]))
def forward(self, X):
if self.is_chan_last:
X = channels_to_2nd_dim(X)
X, representation = self.apply_convs(X)
if self.is_chan_last:
X = channels_to_last_dim(X)
if self.is_flatten:
X = torch.flatten(X, start_dim=1)
if self.is_return_rep:
return X, representation
return X
def apply_convs(self, X):
for conv_block in self.conv_blocks:
X = conv_block(X)
return X, None
class UnetCNN(CNN):
"""Unet [1].
Parameters
----------
in_channels : int
Number of input channels
out_channels : int
Number of output channels.
tmp_channels : int or list, optional
Number of temporary channels. If integer then uses always the same. If list then needs to
be of size `n_blocks - 1`, e.g. [16, 32, 64] means that you will have a
`[ConvBlock(in_channels,16), ConvBlock(16,32), ConvBlock(32,64), ConvBlock(64, out_channels)]`.
ConvBlock : nn.Module, optional
Convolutional block (unitialized). Needs to take as input `Should be
initialized with `ConvBlock(in_chan, out_chan)`.
Pool : nn.Module, optional
Pooling layer (unitialized). E.g. torch.nn.MaxPool1d.
upsample_mode : {'nearest', 'linear', bilinear', 'bicubic', 'trilinear'}, optional
The upsampling algorithm: nearest, linear (1D-only), bilinear, bicubic
(2D-only), trilinear (3D-only).
max_nchannels : int, optional
Bounds the maximum number of channels instead of always doubling them at
downsampling block.
pooling_size : int or tuple, optional
Size of the pooling filter.
factor_chan : float, optional
The factor by which to multiply the number of channels after each down block. If it's a float
the number of channels are rounded.
is_force_same_bottleneck : bool, optional
Whether to use the average bottleneck for the same functions sampled at
different context and target. If `True` the first and second halves
of a batch should contain different samples of the same functions (in order).
is_return_rep : bool, optional
Whether to return a summary representation, that corresponds to the
bottleneck + global mean pooling.
is_skip_resize : bool, optional
Whether to skip the resizing steps. Only possible if `in_channels==out_channels==tmp_channels`.
kwargs :
Additional arguments to `ConvBlock`.
References
----------
[1] Ronneberger, Olaf, Philipp Fischer, and Thomas Brox. "U-net: Convolutional
networks for biomedical image segmentation." International Conference on
Medical image computing and computer-assisted intervention. Springer, Cham, 2015.
"""
def __init__(
self,
in_channels,
out_channels,
tmp_channels=32,
ConvBlock=partial(ResConvBlock, Conv=nn.Conv2d),
Pool=nn.AvgPool2d,
upsample_mode="bilinear",
max_nchannels=256,
pooling_size=2,
factor_chan=2,
is_force_same_bottleneck=False,
is_return_rep=False,
is_skip_resize=False,
**kwargs,
):
self.is_skip_resize = is_skip_resize
self.factor_chan = factor_chan
self.max_nchannels = max_nchannels
super().__init__(
in_channels,
out_channels,
tmp_channels=tmp_channels,
ConvBlock=ConvBlock,
**kwargs,
)
self.pooling_size = pooling_size
self.pooling = Pool(self.pooling_size)
self.upsample_mode = upsample_mode
self.is_force_same_bottleneck = is_force_same_bottleneck
self.is_return_rep = is_return_rep
def apply_convs(self, X):
if self.is_skip_resize:
n_tmp_blocks = self.n_blocks
start_block = 0
else:
n_tmp_blocks = self.n_blocks - 2
# Input block
X = self.conv_blocks[0](X)
start_block = 1
n_down_blocks = n_tmp_blocks // 2
residuals = [None] * n_down_blocks
# Down
for i in range(n_down_blocks):
X = self.conv_blocks[start_block + i](X)
residuals[i] = X
X = self.pooling(X)
# Bottleneck
X = self.conv_blocks[n_down_blocks](X)
# Representation before forcing same bottleneck
representation = X.view(*X.shape[:2], -1).mean(-1)
if self.is_force_same_bottleneck and self.training:
# forces the u-net to use the bottleneck by giving additional information
# there. I.e. taking average between bottleneck of different samples
# of the same functions. Because bottleneck should be a global representation
# => should not depend on the sample you chose
batch_size = X.size(0)
batch_1 = X[: batch_size // 2, ...]
batch_2 = X[batch_size // 2:, ...]
X_mean = (batch_1 + batch_2) / 2
X = torch.cat([X_mean, X_mean], dim=0)
# Up
for i in range(n_down_blocks + 1, n_tmp_blocks):
X = F.interpolate(
X,
mode=self.upsample_mode,
scale_factor=self.pooling_size,
align_corners=True,
)
X = torch.cat(
(X, residuals[n_down_blocks - i]), dim=1
) # concat on channels
X = self.conv_blocks[i + start_block](X)
if not self.is_skip_resize:
# Output Block
X = self.conv_blocks[-1](X)
return X, representation
def _get_in_out_channels(self, in_channels, out_channels, tmp_channels, n_blocks):
"""Return a list of tuple of input and output channels for a Unet."""
if self.is_skip_resize:
assert in_channels == out_channels == tmp_channels
n_tmp_blocks = n_blocks
else:
n_tmp_blocks = n_blocks - 2 # removes last and first block for this part
assert n_blocks % 2 == 1, "n_blocks={} not odd".format(n_blocks)
# e.g. if tmp_channels=16, n_tmp_blocks=5: [16, 32, 64]
channel_list = [
round(self.factor_chan ** i * tmp_channels)
for i in range(n_tmp_blocks // 2 + 1)
]
# e.g.: [16, 32, 64, 64, 32, 16]
channel_list = channel_list + channel_list[::-1]
# bound max number of channels by self.max_nchannels
channel_list = [min(c, self.max_nchannels) for c in channel_list]
# e.g.: [(16, 32), (32,64), (64, 64), (64, 32), (32, 16)]
in_out_channels = list(zip(channel_list, channel_list[1:]))
# e.g.: [(16, 32), (32,64), (64, 64), (128, 32), (64, 16)] due to concat
idcs = slice(len(in_out_channels) // 2 + 1, len(in_out_channels))
in_out_channels[idcs] = [
(in_chan * 2, out_chan) for in_chan, out_chan in in_out_channels[idcs]
]
if not self.is_skip_resize:
# Adds in and out block
in_out_channels = (
[(in_channels, tmp_channels)]
+ in_out_channels
+ [(tmp_channels, out_channels)]
)
assert len(in_out_channels) == (n_blocks), "in_out_channels: {} != {}".format(
len(in_out_channels), n_blocks
)
return in_out_channels
### CONV ###
class GaussianConv2d(nn.Module):
def __init__(self, kernel_size=5, **kwargs):
super().__init__()
self.kwargs = kwargs
assert kernel_size % 2 == 1
self.kernel_sizes = (kernel_size, kernel_size)
self.exponent = -(
(torch.arange(0, kernel_size).view(-1, 1).float() - kernel_size // 2) ** 2
)
self.reset_parameters()
def reset_parameters(self):
self.weights_x = nn.Parameter(torch.tensor([1.0]))
self.weights_y = nn.Parameter(torch.tensor([1.0]))
def forward(self, X):
# only switch first time to device
self.exponent = self.exponent.to(X.device)
marginal_x = torch.softmax(self.exponent * self.weights_x, dim=0)
marginal_y = torch.softmax(self.exponent * self.weights_y, dim=0).T
in_chan = X.size(1)
filters = marginal_x @ marginal_y
filters = filters.view(1, 1, *self.kernel_sizes).expand(
in_chan, 1, *self.kernel_sizes
)
return F.conv2d(X, filters, groups=in_chan, **self.kwargs)
# GLOBAL VARIABLES
CONVS = [None, nn.Conv1d, nn.Conv2d, nn.Conv3d]
REVCONVS = [None, nn.ConvTranspose1d, nn.ConvTranspose2d, nn.ConvTranspose3d]
GAUSSIANCONVS = {2: GaussianConv2d} # at the end because defined in this file
FCONVS = [None, F.conv1d, F.conv2d, F.conv3d]
MAXPOOLINGS = [None, nn.MaxPool1d, nn.MaxPool2d, nn.MaxPool2d]
AVGPOOLINGS = [None, nn.AvgPool1d, nn.AvgPool2d, nn.AvgPool2d]
|
decodable_information_bottleneck-main
|
dib/predefined/cnn.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import warnings
import numpy as np
import torch
import torch.nn as nn
from skorch.utils import to_numpy
from dib.utils.helpers import batch_flatten, batch_unflatten, get_activation, tmp_seed
from dib.utils.initialization import (
linear_init,
set_linear_like_,
set_normlayer_like_,
weights_init,
)
from dib.utils.pruning import RandomUnstructured, global_unstructured, is_pruned, remove
from .helper_layers import get_norm_layer
__all__ = ["MLP"]
logger = logging.getLogger(__name__)
class MLP(nn.Module):
"""General MLP class.
Parameters
----------
input_size: int
output_size: int
hidden_size: int or list, optional
Number of hidden neurones. If list, `n_hidden_layers` will be `len(n_hidden_layers)`.
n_hidden_layers: int, optional
Number of hidden layers.
activation: callable, optional
Activation function. E.g. `nn.RelU()`.
is_bias: bool, optional
Whether to use biaises in the hidden layers.
dropout: float, optional
Dropout rate.
is_force_hid_larger : bool, optional
Whether to force the hidden dimension to be larger or equal than in or out.
n_skip : int, optional
Number of layers to skip with residual connection
norm_layer : nn.Module or {"identity","batch"}, optional
Normalizing layer to use.
k_prune : int, optional
Number times to apply 50% pruning on all weight matrices besides the last one.
seed : int, optional
Random seed, only used when pruning
previous_mlp : MLP, optional
Previous MLP to use as initialization. All the layers in common must have the same shapes.
is_rectangle : bool, optional
Wether to use a rectangle scheme for the MLP. If True, uses
n_hidden_layers=hidden_size*2**n_hidden_layers.
is_plot_activation : bool, optional
Whether to store all activations for plotting
is_mult_hid_input : bool, optional
Whether the hidden is a factor that should be multiplied by `input_size` rather than an
absolute number.
"""
def __init__(
self,
input_size,
output_size,
hidden_size=32,
n_hidden_layers=1,
activation=nn.LeakyReLU(),
is_bias=True,
dropout=0,
is_force_hid_larger=False,
n_skip=0,
norm_layer="identity",
k_prune=0,
seed=123,
previous_mlp=None,
is_rectangle=False,
is_plot_activation=False,
is_mult_hid_input=False,
):
super().__init__()
self.input_size = input_size
self.output_size = output_size
self.hidden_size = hidden_size
self.n_hidden_layers = n_hidden_layers
self.n_skip = n_skip
self.norm_layer = get_norm_layer(norm_layer, dim=1)
self.k_prune = k_prune
self.seed = seed
self._to_plot_activation = dict()
self.is_rectangle = is_rectangle
self.is_plot_activation = is_plot_activation
self.is_mult_hid_input = is_mult_hid_input
if self.n_hidden_layers == 0:
self.hidden_size = []
self.to_hidden = nn.Linear(
self.input_size, self.output_size, bias=is_bias)
self.out = nn.Identity()
self.reset_parameters()
return
if self.is_mult_hid_input:
if isinstance(self.hidden_size, int):
self.hidden_size = self.hidden_size * self.input_size
else:
self.hidden_size = [
h * self.input_size for h in self.hidden_size]
if self.is_rectangle:
assert isinstance(self.hidden_size, int)
self.hidden_size = self.hidden_size * (2 ** self.n_hidden_layers)
if isinstance(self.hidden_size, int):
if is_force_hid_larger and self.hidden_size < min(
self.output_size, self.input_size
):
self.hidden_size = min(self.output_size, self.input_size)
txt = "hidden_size={} smaller than output={} and input={}. Setting it to {}."
logger.info(
txt.format(hidden_size, output_size,
input_size, self.hidden_size)
)
self.hidden_size = [self.hidden_size] * self.n_hidden_layers
else:
self.n_hidden_layers = len(self.hidden_size)
self.dropout = nn.Dropout(p=dropout) if dropout > 0 else nn.Identity()
self.activation = get_activation(activation)
self.to_hidden = nn.Linear(
self.input_size, self.hidden_size[0], bias=is_bias)
self.tohid_norm = self.norm_layer(self.hidden_size[0])
self.linears = nn.ModuleList(
[
nn.Linear(in_size, out_size, bias=is_bias)
for in_size, out_size in zip(
self.hidden_size[:][:-1], self.hidden_size[1:]
)
# dirty [:] because omegaconf does not accept [:-1] directly
]
)
self.norm_layers = nn.ModuleList(
[
self.norm_layer(out_size)
for _, out_size in zip(self.hidden_size[:][:-1], self.hidden_size[1:])
]
)
self.out = nn.Linear(
self.hidden_size[-1], self.output_size, bias=is_bias)
self.reset_parameters()
if previous_mlp is not None:
self.set_parameters_like_(previous_mlp)
self.prune_weights_(self.k_prune)
def forward(self, x):
# flatten to make for normalizing layer => only 2 dim
x, shape = batch_flatten(x)
x = self.to_hidden(x)
self.plot_activation_(dict(tohidout=x))
if self.n_hidden_layers == 0:
return batch_unflatten(x, shape)
x = self.tohid_norm(x)
self.plot_activation_(dict(tohinorm=x))
x = self.activation(x)
x = self.dropout(x)
old = x
for i, (linear, norm_layer) in enumerate(
zip(self.linears, self.norm_layers), start=1
):
x = linear(x)
self.plot_activation_({f"linout{i}": x})
x = norm_layer(x)
self.plot_activation_({f"linnorm{i}": x})
x = self.activation(x)
if self.n_skip != 0 and i % self.n_skip == 0:
# divided by 10 reduces chances of nan at start (making sure order of magnitude doesn't depend on # layers)
x = old + x / 10
old = x
x = self.dropout(x)
out = self.out(x)
self.plot_activation_(dict(out=out))
out = batch_unflatten(out, shape)
return out
def reset_parameters(self):
init = linear_init
if self.n_hidden_layers == 0:
init(self.to_hidden)
else:
init(self.to_hidden, activation=self.activation)
for lin in self.linears:
init(lin, activation=self.activation)
init(self.out)
def set_parameters_like_(self, mlp):
"""Given an other MLP that has the same input and output size, set all the parameters in common."""
assert mlp.input_size == self.input_size and mlp.output_size == self.output_size
min_layers = min(len(self.linears), len(mlp.linears))
if self.n_hidden_layers == mlp.n_hidden_layers == 0:
self.to_hidden = mlp.to_hidden
elif self.n_hidden_layers != 0 and mlp.n_hidden_layers != 0:
set_linear_like_(self.to_hidden, mlp.to_hidden)
set_linear_like_(self.out, mlp.out)
for i in range(min_layers):
set_linear_like_(self.linears[i], mlp.linears[i])
set_normlayer_like_(self.norm_layers[i], mlp.norm_layers[i])
else:
logger.info(
"Cannot use `set_parameters_like` when only one of the 2 mlps have 0 hidden layers."
)
def prune_weights_(self, k_prune=1, sparsity_ratio=0.5):
"""Apply in place `k_prune` times a `sparsity_ratio` pruning."""
outs = [(self.to_hidden, "weight")] + [
(linear, "weight") for linear in self.linears
]
# don't put the last layer because it depends on whether X or Y as output
# first make sure that all previous pruning is removed
for m, name in outs:
# `remove` does not actually remove, just sets to 0 the weights (fixes the mask)
# => in case the module was already, pruned, adds some jtter to be sure that not 0 and can learn
if is_pruned(m):
remove(m, name)
with torch.no_grad():
m.weight += torch.randn_like(m.weight) / 100
if sparsity_ratio == 0 or k_prune < 1:
return
with tmp_seed(self.seed):
for k in range(k_prune):
global_unstructured(
outs, pruning_method=RandomUnstructured, amount=sparsity_ratio
)
def plot_activation_(self, activations):
if not self.is_plot_activation:
return
for k, v in activations.items():
# opeartion over batchs
v = to_numpy(v)
self._to_plot_activation[k + "_mean"] = v.mean(0)
self._to_plot_activation[k + "_meanabs"] = np.abs(v).mean(0)
def tensorboard(self, writer, epoch, mode="on_grad_computed"):
name = type(self).__name__
if mode == "on_grad_computed":
for k, v in self._to_plot_activation.items():
writer.add_histogram(
f"activations/{name}/" + k, v, global_step=epoch)
self._to_plot_activation = dict()
writer.add_histogram(
f"weights/{name}/w_tohid", self.to_hidden.weight, global_step=epoch
)
writer.add_histogram(
f"weights/{name}/b_tohid", self.to_hidden.bias, global_step=epoch
)
writer.add_histogram(
f"grad/{name}/w_tohid", self.to_hidden.weight.grad, global_step=epoch
)
writer.add_histogram(
f"grad/{name}/b_tohid", self.to_hidden.bias.grad, global_step=epoch
)
if self.n_hidden_layers != 0:
for i, lin in enumerate(self.linears):
writer.add_histogram(
f"weights/{name}/w_lin{i}", lin.weight, global_step=epoch
)
writer.add_histogram(
f"weights/{name}/b_lin{i}", lin.bias, global_step=epoch
)
writer.add_histogram(
f"weights/{name}/w_out", self.out.weight, global_step=epoch
)
writer.add_histogram(
f"weights/{name}/b_out", self.out.bias, global_step=epoch
)
for i, lin in enumerate(self.linears):
writer.add_histogram(
f"grad/{name}/w_lin{i}", lin.weight.grad, global_step=epoch
)
writer.add_histogram(
f"grad/{name}/b_lin{i}", lin.bias.grad, global_step=epoch
)
writer.add_histogram(
f"grad/{name}/w_out", self.out.weight.grad, global_step=epoch
)
writer.add_histogram(
f"grad/{name}/b_out", self.out.bias.grad, global_step=epoch
)
"""
class MLP(nn.Module):
def __init__(self, input_size, output_size, hidden_size=32, **kwargs):
super().__init__()
if isinstance(hidden_size, list):
hidden_size = hidden_size[0]
hidden_size = 2
self.to_hidden = nn.Linear(input_size, hidden_size)
self.out = nn.Linear(hidden_size, output_size)
def forward(self, x):
# flatten to make for normalizing layer => only 2 dim
x, shape = batch_flatten(x)
out = torch.relu(self.to_hidden(x))
out = self.out(out)
out = batch_unflatten(out, shape)
return out
"""
|
decodable_information_bottleneck-main
|
dib/predefined/mlp.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .ib import *
from .img import *
|
decodable_information_bottleneck-main
|
dib/transformers/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
from functools import partial
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
import torchvision
from dib.predefined import MLP, WideResNet, get_Cnn
from dib.predefined.helper_layers import get_norm_layer
from dib.utils.helpers import prod
__all__ = ["get_img_encoder"]
logger = logging.getLogger(__name__)
# TODO: CLEAN AND DOCUMENT ALL THIS FILE !!!!
class CNNEncoder(nn.Module):
def __init__(self, x_shape=(1, 32, 32), z_dim=256, **kwargs):
super().__init__()
self.core = get_Cnn(is_flatten=True, **kwargs)(x_shape[0], z_dim)
def forward(self, x):
return self.core(x)
class MLPEncoder(nn.Module):
def __init__(self, x_shape=(1, 32, 32), z_dim=256, **kwargs):
super().__init__()
self.core = MLP(prod(x_shape), z_dim, **kwargs)
def forward(self, x):
x = x.flatten(start_dim=1)
return self.core(x)
class TorchvisionEncoder(nn.Module):
def __init__(
self,
TVM,
x_shape=(1, 32, 32),
n_classes=10,
z_dim=256,
norm_layer=None,
is_resnet_converter=False,
**kwargs,
):
super().__init__()
# make every input to TVM has 3 channels
self.converter = nn.Sequential(
nn.Conv2d(x_shape[0], 3, 3, padding=1),
nn.ReLU(),
nn.Conv2d(3, 3, 3, padding=1),
nn.ReLU(),
)
self.tvm = TVM(
norm_layer=get_norm_layer(norm_layer, dim=2), **kwargs
) # will remove the class in any case
if is_resnet_converter:
self.converter = nn.Identity()
self.tvm.conv1 = nn.Conv2d(
x_shape[0], 64, kernel_size=3, stride=1, padding=1, bias=False
)
self.tvm.maxpool = nn.Identity()
if z_dim == self.tvm.fc.in_features:
self.tvm.fc = nn.Identity()
else:
self.tvm.fc = nn.Linear(self.tvm.fc.in_features, z_dim)
def forward(self, x):
return self.tvm(self.converter(x))
def get_img_encoder(name):
name = name.lower()
if "mlp" in name or "width" in name:
return MLPEncoder
elif "cnn" in name:
return CNNEncoder
elif "resnet18" in name:
return partial(
TorchvisionEncoder,
TVM=torchvision.models.resnet18,
is_resnet_converter=True,
)
elif "resnet34" in name:
return partial(
TorchvisionEncoder,
TVM=torchvision.models.resnet34,
is_resnet_converter=True,
)
elif "resnet50" in name:
return partial(
TorchvisionEncoder,
TVM=torchvision.models.resnet50,
is_resnet_converter=True,
)
elif "resnet101" in name:
return partial(
TorchvisionEncoder,
TVM=torchvision.models.resnet101,
is_resnet_converter=True,
)
elif "wideresnet101" in name:
return partial(
TorchvisionEncoder,
TVM=torchvision.models.wide_resnet101_2,
is_resnet_converter=True,
)
elif "wideresnet" in name:
return partial(TorchvisionEncoder, TVM=WideResNet)
else:
raise ValueError(f"Unkown name={name}")
|
decodable_information_bottleneck-main
|
dib/transformers/img.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import math
import random
from itertools import zip_longest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.stats import entropy
from sklearn.metrics import accuracy_score
from skorch.utils import to_numpy
from torch.distributions import Categorical
from torch.distributions.kl import kl_divergence
from torch.nn.parallel import parallel_apply
from dib.predefined import MLP
from dib.utils.distributions import MultivariateNormalDiag, entropy_labels
from dib.utils.helpers import (
BaseRepIthDigits,
Constant,
CrossEntropyLossGeneralize,
Identity,
Nchunk_iterable,
ReturnNotTensor,
extract_target,
get_idx_permuter,
is_sorted,
mean_p_logits,
return_not_tensor,
set_requires_grad,
tmp_seed,
update_dict_copy,
)
from dib.utils.initialization import weights_init
from .dib import IBEncoder
from .helpers import (
BASE_LOG,
CORR_GROUPS,
EPS_MIN,
N_CORR,
NotEnoughHeads,
detach,
mean_p_logits_parallel,
mean_std,
)
from .vib import VIBLoss
__all__ = ["ERMLoss"]
logger = logging.getLogger(__name__)
class ERMLoss(VIBLoss):
"""Empirical risk minimizer Loss.
Parameters
----------
ZYCriterion : nn.Module, optional
Criterion to compute the loss of Q_zy.
map_target_position : dict, optional
Dictionary that maps the type of target (e.g. "index") to its position in the
target.
"""
def forward(self, out, y):
y_pred, z_sample, p_zCx = out
if p_zCx.out is not None:
p_zCx_base = p_zCx.out.base_dist
self._store(
z_norm=z_sample.pow(2).mean(),
z_mean_norm=p_zCx_base.loc.abs().mean(),
z_std=p_zCx_base.scale.mean(),
)
zy_loss = self.compute_zy_loss(y_pred, y)
return zy_loss
|
decodable_information_bottleneck-main
|
dib/transformers/ib/erm.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .dib import *
from .erm import *
from .helpers import *
from .vib import *
|
decodable_information_bottleneck-main
|
dib/transformers/ib/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import logging
import math
import random
from functools import partial
from itertools import zip_longest
import numpy as np
import sklearn
import torch
import torch.nn as nn
import torch.nn.functional as F
from joblib import Parallel, delayed
from scipy.stats import entropy
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import accuracy_score, log_loss
from skorch.utils import to_numpy
from torch.distributions import Categorical
from torch.distributions.kl import kl_divergence
from torch.nn.parallel import parallel_apply
from dib.predefined import MLP
from dib.utils.distributions import MultivariateNormalDiag, label_distribution
from dib.utils.helpers import (
BaseRepIthDigits,
BatchNorm1dLast,
Constant,
CrossEntropyLossGeneralize,
Identity,
Nchunk_iterable,
ReturnNotTensor,
extract_target,
get_idx_permuter,
is_sorted,
mean_p_logits,
no_grad_modules,
return_not_tensor,
set_seed,
tmp_seed,
update_dict_copy,
wrap_batchnorm,
)
from dib.utils.initialization import weights_init
from .helpers import (
BASE_LOG,
CORR_GROUPS,
EPS_MIN,
N_CORR,
NotEnoughHeads,
SingleInputModuleList,
detach,
mean_p_logits_parallel,
mean_std,
)
try:
import higher
except ImportError:
pass
__all__ = [
"DIBLoss",
"IBEncoder",
"DIBLossAltern",
"DIBLossLinear",
"DIBLossAlternLinear",
"DIBLossAlternLinearExact",
"DIBLossAlternHigher",
"DIBLossZX",
]
logger = logging.getLogger(__name__)
class IBEncoder(nn.Module):
"""General class for *IB Encoders.
Parameters
----------
Encoder : nn.Module
Uninitialized module that takes `x_shape` and `z_dim` as input.
Q : nn.Module
Functional family of the classifier. I.e. for sufficiency.
x_shape : tuple, optional
Size of the inputs.
n_classes : int, optional
Number of output classes.
z_dim : int, optional
Size of the representation.
is_stochastic : bool, optional
Whether to use a stochastic encoder.
n_test_samples : int, optional
Number of samples of z to use if `is_stochastic` and testing.
is_avg_trnsf : bool, optional
Whether to return the average representation or all of them. THe former is
useful for plotting.
kwargs:
Additional arguments to Encoder.
Return
------
if is_transform:
z_sample : torch.tensor, shape = [n_samples, batch_size, z_dim]
else :
y_pred : torch.tensor, shape = [batch_size, n_classes]
z_sample : torch.tensor, shape = [n_samples, batch_size, z_dim]
p_zCx : MultivariateNormalDiag, shape = [batch_size, z_dim].
Distribution of p(z|x) None when testing.
"""
def __init__(
self,
Encoder,
Q,
x_shape=(1, 32, 32),
n_classes=10,
z_dim=256,
is_stochastic=True,
n_test_samples=12,
is_avg_trnsf=False,
is_limit_growth=False,
is_wrap_batchnorm=False,
**kwargs,
):
super().__init__()
self.is_transform = False
self.z_dim = z_dim
self.is_stochastic = is_stochastic
self.n_test_samples = n_test_samples
self._to_plot_activation = {}
self.is_avg_trnsf = is_avg_trnsf
self.n_classes = n_classes
self.is_limit_growth = is_limit_growth
self.is_wrap_batchnorm = is_wrap_batchnorm
self.encoder = Encoder(
x_shape=x_shape, z_dim=z_dim * 2 if is_stochastic else z_dim
)
if self.is_wrap_batchnorm:
self.batch_norm = BatchNorm1dLast(num_features=z_dim, affine=False)
self.Q_zy = Q(z_dim, n_classes)
self.reset_parameters()
def reset_parameters(self):
weights_init(self)
def forward(self, X):
batch_size = X.size(0)
n_samples = 1 if self.training else self.n_test_samples
if self.is_stochastic:
z_suff_stat = self.encoder(X)
z_mean, z_std = z_suff_stat.view(batch_size, -1, 2).unbind(-1)
if self.is_limit_growth:
z_mean = torch.tanh(z_mean)
z_std = torch.sigmoid(z_std)
else:
# -5 as in vib + delta
z_std = EPS_MIN + F.softplus(z_std - 5)
p_zCx = MultivariateNormalDiag(z_mean, z_std)
z_sample = p_zCx.rsample([n_samples])
else:
z_sample = self.encoder(X).unsqueeze(0) # unsqueeze as if 1 sample
p_zCx = None
if self.is_wrap_batchnorm:
z_sample = self.batch_norm(z_sample)
if self.is_transform:
if self.is_avg_trnsf:
return z_sample.mean(0)
else:
return z_sample
y_pred = mean_p_logits(self.Q_zy(z_sample))
self.plot_activation_(dict(y_pred=y_pred, z_sample=z_sample.mean(0)))
# by passes the issue that skorch thinks it's tensor
return y_pred, z_sample, return_not_tensor(p_zCx)
def plot_activation_(self, activations):
for k, v in activations.items():
# opeartion over batchs
v = to_numpy(v)
self._to_plot_activation[k + "_mean"] = v.mean(0)
self._to_plot_activation[k + "_meanabs"] = np.abs(v).mean(0)
def tensorboard(self, writer, epoch, mode="on_grad_computed"):
name = type(self).__name__
if mode == "on_grad_computed":
for k, v in self._to_plot_activation.items():
writer.add_histogram(
f"activations/{name}/" + k, v, global_step=epoch)
self._to_plot_activation = dict()
class DIBLoss(nn.Module):
"""DIB Loss.
Parameters
----------
Q : nn.Module
Functional family for minimality.
n_per_target : dict
Number of examples for each target.
beta : float or callable, optional
Regularization weight. If callable, should return a float given `is_training`.
Importantly this only changes the gradients but not the model selection / loss plotting.
n_per_head : list, optional
Number of resampling of optimal nuisance. In practice what it is permutting the indices and
then applying nuisance.
n_classes : int, optional
Number of output classes.
z_dim : int, optional
Size of the representation.
conditional : {None, "H_Q[X|Z,Y]", "H_Q[X|Z]-H_Q[Y|Z]", "H_Q'[X|Z,Y]"}
If None uses DIB. If `"H_Q[X|Z,Y]"` uses a different head for depending on the label, this is
the most correct version of conditional IB but is computationally intensive. If `"H_Q[X|Z,Y]"`
approximate the previous method by giving the label Y as input to the heads, as a result the
heads have architecture Q' instead of the desired Q. `"H_Q[X|Z]-H_Q[Y|Z]"` index all X
differently for each labels, this is is only correct if all
possible labels are independent.
is_optimal_nuisance : bool, optional
Whether to use optimal nuisance instead of randomly hashed ones. I.e. uses the representation
of X index in base |Y|. The number of heads will be the same regardless (to enable comparison).
is_same_indices : bool, optional
Whether to use the same indices for each `n_per_head`.
ZYCriterion : nn.Module, optional
Criterion to compute the loss of Q_zy.
map_target_position : dict, optional
Dictionary that maps the type of target (e.g. "index") to its position in the
target.
warm_Q_zx : int, optional
Number of steps where warming up Q_zx (i.e. only backprop through them).
z_norm_reg : float, optional
Regularizer on the mean of the squared of the representations (when it is larger than 1).
Note that we also use a hard boundary (factor of 10) when the norm mean squared us larger
than 10. This is crucial to get good results : adversarial training makes the representation
go to infinity without that.
seed : int, optional
Random seed.
"""
def __init__(
self,
Q,
n_per_target,
beta=1,
n_per_head=3,
n_classes=10,
z_dim=128,
conditional=None,
seed=123,
is_optimal_nuisance=True,
is_same_indices=False,
ZYCriterion=CrossEntropyLossGeneralize,
map_target_position={"target": 0, "index": 1},
warm_Q_zx=0,
z_norm_reg=0.0,
weight_kl=None, # dev
is_zx_only=False, # DEV
is_use_y_as_n=False, # DEV
threshold_suff=float("inf"), # DEV
is_wrap_batchnorm=False,
**kwargs,
):
super().__init__()
self.n_per_target = n_per_target
self.beta = beta if callable(beta) else Constant(beta)
self.n_per_head = n_per_head
self.n_classes = n_classes
self.z_dim = z_dim
self.conditional = conditional
self.seed = seed
self.is_optimal_nuisance = is_optimal_nuisance
self.is_same_indices = is_same_indices
self.map_target_position = map_target_position
self.is_zx_only = is_zx_only # DEV
self.is_use_y_as_n = is_use_y_as_n # DEV
self.threshold_suff = float(threshold_suff) # DEV
self.warm_Q_zx = warm_Q_zx
self.z_norm_reg = z_norm_reg
self.weight_kl = weight_kl
self.is_wrap_batchnorm = is_wrap_batchnorm
# all the Q heads
self.n_heads = self._get_n_heads()
if self.is_wrap_batchnorm:
Q = wrap_batchnorm(Q)
self.Q_zx = self.get_Q_zx(Q)
self.zy_criterion = ZYCriterion()
self.to_store = dict()
self.nuisances = []
self.precompute_random_labelings_()
self.compute_entropies_()
self.compute_probabilities()
self.reset_parameters()
set_seed(
self.seed
) # ensures same seed as VIB (different due to init of heads)
def get_Q_zx_helper(self, Q):
"""Return one set of classifiers from Z to {\Tilde{Y}_i}_i."""
input_dim = (
(self.z_dim +
1) if self.conditional == "H_Q'[X|Z,Y]" else self.z_dim
)
return nn.ModuleList(
[Q(input_dim, self.n_classes) for _ in range(self.n_heads)]
)
def get_Q_zx(self, Q):
if self.conditional == "H_Q[X|Z,Y]":
# different set of optimal (arg infimum) classifiers for each labels
return nn.ModuleList(
[self.get_Q_zx_helper(Q) for _ in range(self.n_classes)]
)
else:
# same set of optimal classifiers regardless of label
return self.get_Q_zx_helper(Q)
def idcs_to_baseK_nuisance(self, i, idcs, base, n_nuisance):
"""Return the ith nuisance, using the base |Y| decomposition. Computations in numpy"""
if not isinstance(self.nuisances, BaseRepIthDigits):
self.nuisances = BaseRepIthDigits(base)
self._max_idx = base ** n_nuisance
if not np.all(idcs < self._max_idx):
raise NotEnoughHeads(
f"Max idx is base^heads={base}^{n_nuisance}={self._max_idx}. These idcs do not satisfy that : {idcs[(idcs >= self._max_idx)]}"
)
# ith base B expansion of the indices. E.g. for [494,7,58] and i=1 and base=10 would return [9,0,5]
return self.nuisances(idcs, i)
def _store(self, **to_store):
for k, v in to_store.items():
# value and count
if isinstance(v, torch.Tensor):
v = v.item()
self.to_store[k] = self.to_store.get(k, np.array([0.0, 0])) + np.array(
[v, 1]
)
def precompute_random_labelings_(self):
"""Precompute the randomization of indices for different random labelings."""
if not is_sorted([int(k) for k in self.n_per_target.keys()]):
raise ValueError(
f"The keys of `n_per_target` need to be sorted but ={self.n_per_target.keys()}"
)
if self.conditional is None or not self.is_optimal_nuisance:
n_idcs = sum(self.n_per_target.values())
else:
n_idcs = list(self.n_per_target.values())
n_permuters = self.n_per_head if self.is_optimal_nuisance else self.n_heads
# precompute the permutations of indices
if self.is_same_indices:
self._rand_indcs = [get_idx_permuter(
n_idcs, seed=self.seed)] * n_permuters
else:
self._rand_indcs = [
get_idx_permuter(n_idcs, seed=self.seed + i) for i in range(n_permuters)
]
def _get_n_heads(self):
"""Compute the number of heads that will be needed."""
if self.conditional in ["H_Q[X|Z,Y]", "H_Q[X|Z]-H_Q[Y|Z]", "H_Q'[X|Z,Y]"]:
# if using concateate then will use the same heads for each labels but concat the
# to the representation to make sure that you don't remove label information from
# the representation
n_to_cover = max(self.n_per_target.values())
elif self.conditional is None:
# if not conditional then you need to cover all of |X| regardless of how many fall in each labels
n_to_cover = sum(self.n_per_target.values())
else:
raise ValueError(f"Unkown conditional={self.conditional}")
self.n_covering = math.ceil(math.log(n_to_cover, self.n_classes))
n_heads = self.n_per_head * self.n_covering
logger.info(f"nheads: {n_heads}")
return n_heads
def compute_probabilities(self):
n_per_target = np.array(list(self.n_per_target.values()))
n_idcs = n_per_target.sum()
# list of p_{\Tilde{Y}_i}
self.p_ni = []
for i in range(self.n_heads):
n_i = self.get_ith_nuisance(i, torch.arange(n_idcs).long())
self.p_ni.append(label_distribution(n_i, self.n_classes))
# list of list of p_{\Tilde{Y}_i|Y}
self.p_niCY = []
for n_for_target in n_per_target:
p_niCy = [] # for a specific target
for i in range(self.n_heads):
n_i = self.get_ith_nuisance(
i, torch.arange(n_for_target).long())
p_niCy.append(label_distribution(n_i, self.n_classes))
self.p_niCY.append(p_niCy)
def compute_entropies_(self):
# all of these assume uniform distribution
n_per_target = np.array(list(self.n_per_target.values()))
n_idcs = n_per_target.sum()
self.H_x = math.log(n_idcs, BASE_LOG)
self.H_y = entropy(n_per_target, base=BASE_LOG)
self.H_yCx = sum(
math.log(N, BASE_LOG) * N / n_per_target.sum() for N in n_per_target
)
def reset_parameters(self):
weights_init(self)
self._counter_warm_Q_zx = 0
def get_ith_nuisance(self, i, x_idx, is_torch=True):
if not self.is_optimal_nuisance:
if is_torch:
# making sure you are on correct device
self._rand_indcs[i] = self._rand_indcs[i].to(x_idx.device)
n_i = self._rand_indcs[i][x_idx] % self.n_classes
return n_i
# i_rand_idx : which n_per_head (i.e. which group of g=random index). => if n_per_head `n_per_head=1` then `i_rand_idx=0`
# i_in_rand_idx : index in the i_mc group => if n_per_head `n_per_head=1` then `i_in_rand_idx=i`
i_rand_idx, i_in_rand_idx = divmod(i, self.n_covering)
if is_torch:
# making sure you are on correct device
self._rand_indcs[i_rand_idx] = self._rand_indcs[i_rand_idx].to(
x_idx.device)
# to have a better approximation of H_Q[X|Z] we actually compute a (MC approx.)
# expectation over all possible permutation of indices. Indeed, the indices could
# have been given differently which can change the optimization process. Each mc approx
# corresponds to an other possible such index labeling
if max(x_idx) >= len(self._rand_indcs[i_rand_idx]):
raise NotEnoughHeads(
f"max(x_idx)={max(x_idx)}>{len(self._rand_indcs[i_rand_idx])}=len(self._rand_indcs[i_rand_idx])"
)
permuted_x_idx = self._rand_indcs[i_rand_idx][x_idx]
n_i = self.idcs_to_baseK_nuisance(
i_in_rand_idx, to_numpy(
permuted_x_idx), self.n_classes, self.n_covering
)
if is_torch:
n_i = torch.from_numpy(n_i).to(x_idx.device)
return n_i
def compute_zy_loss(self, y_pred, targets):
"""Compute loss for the classifier Z -> Y."""
# H_Q[Y|Z]
H_Q_yCz = self.zy_criterion(y_pred, targets) / math.log(BASE_LOG)
#! H[Y] is the training one (even at test time)
# DI_Q[Y;Z] = H[Y] - H_Q[Y|Z]
DIQ_yz = self.H_y - H_Q_yCz
self._store(H_Q_yCz=H_Q_yCz, DIQ_yz=DIQ_yz)
return H_Q_yCz
def _compute_H_Q_nCzy(self, z_sample, targets, is_cap):
"""Compute \sum_i \sum_y H_Q[\Tilde{Y}_i|z,y]"""
ys = extract_target(targets, self.map_target_position)
idcs = targets[self.map_target_position["index"]]
H_Q_nCzy = 0
DI_Q_nCzy = 0
heads_delta_acc_nCy = 0
present_labels = ys.unique()
for present_label in present_labels:
# select everything based on the label => conditional prediction
selector_cond = ys == present_label
x_idcs_cond = idcs[selector_cond]
z_sample_cond = z_sample[:, selector_cond, :]
Q_zx_cond = self.Q_zx[present_label]
(
H_Q_nCzy_cond,
heads_delta_acc_n_cond,
DI_Q_nCzy_cond,
) = self._compute_H_Q_nCz(
z_sample_cond,
x_idcs_cond,
Q_zx_cond,
is_cap,
targets[self.map_target_position["target"]],
)
H_Q_nCzy = H_Q_nCzy + H_Q_nCzy_cond / len(present_labels)
heads_acc_nCy = heads_delta_acc_nCy + heads_delta_acc_n_cond / len(
present_labels
)
DI_Q_nCzy = DI_Q_nCzy + DI_Q_nCzy_cond / len(present_labels)
return H_Q_nCzy, heads_delta_acc_nCy, DI_Q_nCzy
def _compute_H_Q_ni(self, n_i, i, q_niCz):
"""Estimate the worst case cross entropy (i.e. predicting with the marginal)."""
self.p_ni[i].logits = self.p_ni[i].logits.to(n_i.device)
p_ni = self.p_ni[i].logits.repeat(len(n_i), 1)
H_Q_ni = F.cross_entropy(p_ni, n_i) / math.log(BASE_LOG)
# also return the accuracy of marginal
marginal_acc_ni = accuracy_score(n_i.cpu(), p_ni.argmax(-1).cpu())
return H_Q_ni, marginal_acc_ni
def batch_predict_heads(self, z_sample, Q_zx):
if len(Q_zx) == 0:
return []
#! NOT IN PARALLEL BECAUSE WAS NOT WORKING WELL : still has to see why can't parallelize
return [mean_p_logits(Q_zxi(z_sample)) for Q_zxi in Q_zx]
def _compute_H_Q_nCz(self, z_sample, x_idcs, Q_zx, is_cap, y):
"""Compute \sum_i H_Q[\Tilde{Y}_i|z]"""
# for all heads, predict, and average across num. samples
q_nCz = self.batch_predict_heads(z_sample, Q_zx)
H_Q_nCz = 0
DI_Q_nCz = 0
heads_delta_acc_n = 0
for i, q_niCz in enumerate(q_nCz):
n_i = self.get_ith_nuisance(i, x_idcs)
if self.is_use_y_as_n:
n_i = y
# H_Q[\Tilde{Y}_i|Z]
H_Q_niCz = F.cross_entropy(q_niCz, n_i) / math.log(BASE_LOG)
# H_Q[\Tilde{Y}_i]
H_Q_ni, marginal_acc_ni = self._compute_H_Q_ni(n_i, i, q_niCz)
if is_cap:
# in case your loss is worst than marginal, then don't backprop through encoder
# but still improve the head (because it means that the internal optim is not correct)
# accessorily this will also ensure positivity of estimated decodable information
if H_Q_niCz > H_Q_ni:
H_Q_niCz = H_Q_niCz * 0 + H_Q_ni
# DI_Q[\Tilde{Y}_i <- Z] = H_Q_ni - H_Q_niCz
DI_Q_niz = H_Q_ni - H_Q_niCz
# H_Q[\Tilde{Y}|Z] = \sum_i H_Q[\Tilde{Y}_i|Z]
# DI_Q[\Tilde{Y} <- Z] = \sum_i DI_Q[\Tilde{Y}_i <- Z]
# only divide by self.n_per_head because want to sum all heads besides the n_per_head should avg
H_Q_nCz = H_Q_nCz + H_Q_niCz / self.n_per_head
DI_Q_nCz = DI_Q_nCz + DI_Q_niz / self.n_per_head
# only save delta accuracy with marginal
heads_acc_ni = accuracy_score(n_i.cpu(), q_niCz.argmax(-1).cpu())
heads_delta_acc_ni = heads_acc_ni - marginal_acc_ni
heads_delta_acc_n = heads_delta_acc_n + \
heads_delta_acc_ni / len(q_nCz)
return H_Q_nCz, heads_delta_acc_n, DI_Q_nCz
def compute_zx_loss_helper(self, z_sample, y, is_cap, is_store=True):
if self.conditional == "H_Q[X|Z,Y]":
H_loss, head_delta_acc, DI_loss = self._compute_H_Q_nCzy(
z_sample, y, is_cap
)
else:
# if self.conditional == "H_Q'[X|Z,Y]" actually computing H_Q_nCzy
H_loss, head_delta_acc, DI_loss = self._compute_H_Q_nCz(
z_sample,
y[self.map_target_position["index"]],
self.Q_zx,
is_cap,
y[self.map_target_position["target"]], # DEV
)
if is_store:
# storing for plots
if self.conditional is None:
# H_Q[X|Z] := \sum_i H_Q[\Tilde{Y}_i|Z]
# I_Q[X<-Z] := \sum_i H[\Tilde{Y}_i] - H_Q[\Tilde{Y}_i|Z]
self._store(H_Q_xCz=H_loss,
h_delta_acc=head_delta_acc, DIQ_xz=DI_loss)
elif "[X|Z,Y]" in self.conditional:
# H_Q[X|Z,Y] := \sum_y \sum_{\Tilde{Y}_i \neq Y} H_Q[\Tilde{Y}_i|Z,y]
# I_Q[X<-Z|Y] := \sum_{\Tilde{Y}_i \neq Y} H[\Tilde{Y}_i | Y] - H_Q[X|Z,Y]
self._store(
H_Q_xCzy=H_loss, h_delta_acc=head_delta_acc, DIQ_xzCy=DI_loss
)
elif self.conditional == "H_Q[X|Z]-H_Q[Y|Z]":
# H_Q[X|Z] - H_Q[Y|Z] := \sum_{\Tilde{Y}_i \neq Y} H_Q[\Tilde{Y}_i|Z]
# I_Q[X<-Z] - I_Q[Y<-Z] := \sum_{\Tilde{Y}_i \neq Y} H[\Tilde{Y}_i] - H_Q[\Tilde{Y}_i|Z]
self._store(
d_H_Q_xCz=H_loss, h_delta_acc=head_delta_acc, d_DIQ_xz=DI_loss
)
return H_loss
def compute_zx_loss_encoder(self, z_sample, targets):
"""Compute all losses for the encoder Z -> X."""
curr_beta = self.beta(self.training) if self.beta(
False) is not None else 0
# capping : due to minimax one solution is to get infinitely bad (no need if no minimax)
is_cap = True if curr_beta >= 0 else False
H_loss = self.compute_zx_loss_helper(
z_sample, targets, is_cap=is_cap, is_store=True
)
# the representation should do poorly on predicting the base expansion of the index
# Could use gradient reversal layer, but that would not allow the clamping to random loss
zx_loss = -H_loss
# add some regularization if mean square > 1. If not it explodes due to adversarial training
z_norm = z_sample.pow(2).mean()
if curr_beta >= 0 and self.z_norm_reg > 0 and z_norm > 1:
z_norm_reg = self.z_norm_reg
if z_norm > 10: # hard boudnary at 10
z_norm_reg = 100 * z_norm_reg
# only if positive beta because this causes norm to explode
zx_loss = zx_loss + z_norm_reg * z_norm
zx_loss = curr_beta * zx_loss
return zx_loss
def compute_zx_loss(self, z_sample, targets):
"""Compute all losses Z -> X, for encoder and for the heads."""
zx_loss_heads = self.compute_zx_loss_helper(
z_sample.detach(), # don't backprop through encoder
targets,
is_cap=False,
is_store=False,
)
with no_grad_modules([self.Q_zx]): # don't backprop through heads
zx_loss_enc = self.compute_zx_loss_encoder(z_sample, targets)
if self._counter_warm_Q_zx < self.warm_Q_zx:
zx_loss_enc = detach(zx_loss_enc) # for logging you still want
# - detach to make sure that gradients flow but loss does not cancel when plotting
# also note that `zx_loss_heads` IS NOT SCALED BY BETA
zx_loss = zx_loss_enc + zx_loss_heads - detach(zx_loss_heads)
return zx_loss
def forward(self, out, targets):
if self.training:
self._counter_warm_Q_zx += 1
y_pred, z_sample, p_zCx = out
if p_zCx.out is not None:
p_zCx_base = p_zCx.out.base_dist
# z_norm : elementwise square, z_mean_norm : mean of absolute val, z_std : mean of standard dev
self._store(
z_norm=z_sample.pow(2).mean(),
z_mean_norm=p_zCx_base.loc.abs().mean(),
z_std=p_zCx_base.scale.mean(),
)
if self.conditional == "H_Q'[X|Z,Y]":
target = (
extract_target(targets, self.map_target_position)
.unsqueeze(0)
.unsqueeze(-1)
.float()
)
target = torch.repeat_interleave(target, z_sample.size(0), dim=0)
z_sample = torch.cat([z_sample, target], dim=-1)
try:
zx_loss = self.compute_zx_loss(z_sample, targets)
except NotEnoughHeads as e:
# if not training then don't raise exception (because the indexing might be off in which
# case your predictor cannot comoute zx_loss). But you don't want to never compute this
# loss as for evaluation we give the training data but self.training=False
if self.training:
raise e
zx_loss = 0
if self.weight_kl is not None:
p_zCx = p_zCx.out
mean_0 = torch.zeros_like(p_zCx.base_dist.loc)
std_1 = torch.ones_like(p_zCx.base_dist.scale)
p_z = MultivariateNormalDiag(mean_0, std_1)
kl = kl_divergence(p_zCx, p_z).mean(0) / math.log(BASE_LOG)
zx_loss = zx_loss + self.weight_kl * kl
zy_loss = self.compute_zy_loss(y_pred, targets)
if zy_loss > self.threshold_suff: # DEV
zx_loss = zx_loss * 0 + detach(zx_loss)
if self._counter_warm_Q_zx <= self.warm_Q_zx:
# still return loss but no grad
zy_loss = zy_loss * 0 + detach(zy_loss)
if self.is_zx_only: # DEV
zy_loss = 0 * zy_loss
self._store(aux_loss=zx_loss)
if not self.training:
# when evaluating the loss should be log likelihood for checkpointing
return zy_loss
return zy_loss + zx_loss
class DIBLossZX(DIBLoss):
def forward(self, z_sample, targets):
z_sample = z_sample.unsqueeze(
0
) # when only computing DIQ the samples will be squeezed
if self.conditional == "H_Q'[X|Z,Y]":
target = (
extract_target(targets, self.map_target_position)
.unsqueeze(0)
.unsqueeze(-1)
.float()
)
target = torch.repeat_interleave(target, z_sample.size(0), dim=0)
z_sample = torch.cat([z_sample, target], dim=-1)
try:
zx_loss = self.compute_zx_loss(z_sample, targets)
except NotEnoughHeads as e:
# if not training then don't raise exception (because the indexing might be off in which
# case your predictor cannot comoute zx_loss). But you don't want to never compute this
# loss as for evaluation we give the training data but self.training=False
if self.training:
raise e
zx_loss = 0
self._store(aux_loss=zx_loss)
return zx_loss
class DIBLossAltern(DIBLoss):
def __init__(
self,
*args,
Optimizer=partial(torch.optim.Adam, lr=0.001),
altern_minimax=3,
**kwargs,
):
super().__init__(*args, **kwargs)
self.optimizer = Optimizer(self.Q_zx.parameters())
self.altern_minimax = altern_minimax
def optimize_heads(self, z_sample, targets):
z_sample = z_sample.detach() # don't backprop through encoder
def closure():
self.optimizer.zero_grad()
zx_loss = self.compute_zx_loss_helper(
z_sample, targets, is_cap=False, is_store=False
)
zx_loss.backward()
return zx_loss
for i in range(self.altern_minimax):
self.optimizer.step(closure)
def compute_zx_loss(self, z_sample, targets):
"""Compute all losses Z -> X, for encoder and for the heads."""
if not self.training:
# ensure that no leeking of test information when training on test
Q_zx_old = copy.deepcopy(self.Q_zx)
try:
# make sure grad enabled even when testing (better estimation of H_Q[X|Z])
with torch.enable_grad():
self.optimize_heads(z_sample, targets)
with no_grad_modules([self.Q_zx]):
zx_loss_enc = self.compute_zx_loss_encoder(z_sample, targets)
finally:
if not self.training:
self.Q_zx = Q_zx_old
if self._counter_warm_Q_zx <= self.warm_Q_zx:
zx_loss_enc = zx_loss_enc * 0 + detach(zx_loss_enc)
return zx_loss_enc
class DIBLossAlternHigher(DIBLoss):
def __init__(
self,
*args,
Optimizer=partial(torch.optim.Adam, lr=0.001),
altern_minimax=3,
**kwargs,
):
super().__init__(*args, **kwargs)
self.Optimizer = Optimizer
self.altern_minimax = altern_minimax
def batch_predict_heads(self, z_sample, Q_zx):
if len(Q_zx) == 0:
return []
return [mean_p_logits(pred_i) for pred_i in Q_zx(z_sample)]
def get_Q_zx_helper(self, Q):
"""Return one set of classifiers from Z to {\Tilde{Y}_i}_i."""
input_dim = (
(self.z_dim +
1) if self.conditional == "H_Q'[X|Z,Y]" else self.z_dim
)
return SingleInputModuleList(
[Q(input_dim, self.n_classes) for _ in range(self.n_heads)]
)
def get_Q_zx(self, Q):
if self.conditional == "H_Q[X|Z,Y]":
# different set of optimal (arg infimum) classifiers for each labels
return SingleInputModuleList(
[self.get_Q_zx_helper(Q) for _ in range(self.n_classes)]
)
else:
# same set of optimal classifiers regardless of label
return self.get_Q_zx_helper(Q)
def compute_zx_loss(self, z_sample, targets):
"""Compute all losses Z -> X, for encoder and for the heads."""
Q_zx_curr, self.Q_zx = self.Q_zx, None
inner_opt = self.Optimizer(Q_zx_curr.parameters())
try:
with higher.innerloop_ctx(
Q_zx_curr, inner_opt, track_higher_grads=self.training,
) as (fnet, diffopt):
self.Q_zx = fnet # setting temporary attribute for computations
# make sure grad enabled even when testing (better estimation of H_Q[X|Z])
with torch.enable_grad():
# Take a few gradient steps to find good heads
for _ in range(self.altern_minimax):
zx_loss = self.compute_zx_loss_helper(
z_sample, targets, is_cap=False, is_store=False
)
diffopt.step(zx_loss)
zx_loss_enc = self.compute_zx_loss_encoder(z_sample, targets)
if self.training:
# reloading your weights so that you can do warm start for next step
# not when testing (avoiding leakage of test data)
Q_zx_curr.load_state_dict(fnet.state_dict())
finally:
self.Q_zx = Q_zx_curr
if self._counter_warm_Q_zx <= self.warm_Q_zx:
zx_loss_enc = zx_loss_enc * 0 + detach(zx_loss_enc)
return zx_loss_enc
class DIBLossLinear(DIBLoss):
def batch_predict_heads(self, z_sample, Q_zx):
outs = Q_zx(z_sample)
return [mean_p_logits(el) for el in torch.chunk(outs, self.n_heads, dim=-1)]
def get_Q_zx_helper(self, Q):
"""Return one set of classifiers from Z to {\Tilde{Y}_i}_i."""
input_dim = (
(self.z_dim +
1) if self.conditional == "H_Q'[X|Z,Y]" else self.z_dim
)
return nn.Linear(input_dim, self.n_classes * self.n_heads)
class DIBLossAlternLinear(DIBLossAltern):
batch_predict_heads = DIBLossLinear.batch_predict_heads
get_Q_zx_helper = DIBLossLinear.get_Q_zx_helper
class DIBLossAlternLinearExact(DIBLoss):
def __init__(self, *args, is_Q_zy=False, seed=123, **kwargs):
self.Q = partial(
SGDClassifier, loss="log", random_state=seed, warm_start=True, alpha=0.01,
) # small regularization for stability
super().__init__(*args, seed=seed, **kwargs)
def predict_head(self, z_sample, Q_z):
W = torch.from_numpy(Q_z.coef_).float().to(z_sample.device).T
b = torch.from_numpy(Q_z.intercept_).float().to(z_sample.device)
return z_sample @ W + b
def batch_predict_heads(self, z_sample, Q_zx):
return [mean_p_logits(self.predict_head(z_sample, Q_zxi)) for Q_zxi in Q_zx]
def get_Q_zx(self, Q):
if self.conditional == "H_Q[X|Z,Y]":
# different set of optimal (arg infimum) classifiers for each labels
return [self.get_Q_zx_helper(Q) for _ in range(self.n_classes)]
else:
# same set of optimal classifiers regardless of label
return self.get_Q_zx_helper(Q)
def get_Q_zx_helper(self, Q):
"""Return one set of classifiers from Z to {\Tilde{Y}_i}_i."""
return [self.Q() for _ in range(self.n_heads)]
def optimize_heads(self, z_sample, targets):
z_sample = np.squeeze(
to_numpy(z_sample).astype(np.float64), axis=0
) # squeeze sample (assume one during training)
x_idcs = to_numpy(targets[self.map_target_position["index"]])
for i, Q_zxi in enumerate(self.Q_zx):
n_i = self.get_ith_nuisance(i, x_idcs, is_torch=False)
if len(np.unique(n_i)) > 1:
# if not error + it's useless
self.Q_zx[i] = Q_zxi.fit(z_sample, n_i)
def compute_zx_loss(self, z_sample, targets):
"""Compute all losses Z -> X, for encoder and for the heads."""
if self.training:
self.optimize_heads(z_sample, targets)
zx_loss_enc = self.compute_zx_loss_encoder(z_sample, targets)
return zx_loss_enc
|
decodable_information_bottleneck-main
|
dib/transformers/ib/dib.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import math
import random
from itertools import zip_longest
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from scipy.stats import entropy
from sklearn.metrics import accuracy_score
from skorch.utils import to_numpy
from torch.distributions import Categorical
from torch.distributions.kl import kl_divergence
from torch.nn.parallel import parallel_apply
from dib.predefined import MLP
from dib.utils.distributions import MultivariateNormalDiag
from dib.utils.helpers import (
BaseRepIthDigits,
Constant,
Identity,
Nchunk_iterable,
ReturnNotTensor,
get_idx_permuter,
is_sorted,
mean_p_logits,
return_not_tensor,
set_requires_grad,
set_seed,
tmp_seed,
update_dict_copy,
)
from dib.utils.initialization import weights_init
from .dib import DIBLoss, IBEncoder
from .helpers import BASE_LOG, EPS_MIN
__all__ = ["VIBLoss"]
logger = logging.getLogger(__name__)
class VIBLoss(nn.Module):
"""VIB Loss.
Parameters
----------
beta : float or callable, optional
Regularization weight. If callable, should return a float given `is_training`.
Importanlty this only changes the gradients but not the model selection / loss plotting.
This is 1000x the multiple of beta in usual VIB (to make it comparable with DIB).
n_per_target : dict
Number of examples for each target.
ZYCriterion : nn.Module, optional
Criterion to compute the loss of Q_zy.
seed : int, optional
Random seed.
"""
def __init__(
self, n_per_target, beta=1, ZYCriterion=nn.CrossEntropyLoss, seed=123, **kwargs
):
super().__init__()
self.n_per_target = n_per_target
self.beta = beta if callable(beta) else Constant(beta)
self.to_store = dict()
self.zy_criterion = ZYCriterion()
self.compute_entropies_()
self._scale_beta = 1e-3
self.seed = seed
set_seed(self.seed) # ensure seed is same as for DIB
compute_zy_loss = DIBLoss.compute_zy_loss
_store = DIBLoss._store
def compute_entropies_(self):
# all of these assume uniform distribution
n_per_target = np.array(list(self.n_per_target.values()))
self.H_x = math.log(n_per_target.sum(), BASE_LOG)
self.H_y = entropy(n_per_target, base=BASE_LOG)
self.H_xCy = sum(
math.log(N, BASE_LOG) * N / n_per_target.sum() for N in n_per_target
)
def compute_z_loss(self, p_zCx):
"""Compute loss of Z."""
# H[Z|X]
H_zCx = p_zCx.entropy().mean(0) / math.log(BASE_LOG)
mean_0 = torch.zeros_like(p_zCx.base_dist.loc)
std_1 = torch.ones_like(p_zCx.base_dist.scale)
p_z = MultivariateNormalDiag(mean_0, std_1)
kl = kl_divergence(p_zCx, p_z).mean(0) / math.log(BASE_LOG)
# I[Z,X] \approx KL[p(Z|x) || r(z)]
# would be equal if r(z) (the prior) was replaced with the marginal p(z)
I_xz = kl
self._store(H_zCx=H_zCx, I_xz=I_xz)
curr_beta = self.beta(self.training) if self.beta(
False) is not None else 0
curr_beta = self._scale_beta * curr_beta
return curr_beta * I_xz
def forward(self, out, y):
#! dirty trick to get back the non tensor outputs
out = [el.out if isinstance(el, ReturnNotTensor) else el for el in out]
y_pred, z_sample, p_zCx = out
p_zCx_base = p_zCx.base_dist
self._store(
z_norm=z_sample.pow(2).mean(),
z_mean_norm=p_zCx_base.loc.abs().mean(),
z_std=p_zCx_base.scale.mean(),
)
z_loss = self.compute_z_loss(p_zCx)
zy_loss = self.compute_zy_loss(y_pred, y)
self._store(aux_loss=z_loss)
return zy_loss + z_loss
|
decodable_information_bottleneck-main
|
dib/transformers/ib/vib.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import numpy as np
import torch
from dib.utils.helpers import mean_p_logits
__all__ = ["BASE_LOG", "N_CORR"]
logger = logging.getLogger(__name__)
EPS_MIN = 1e-7
BASE_LOG = 2 # to convert to understandable measure of information
N_CORR = 3
CORR_GROUPS = ["lin", "q", "hid16"] # "lay3", "hid4lay1"
def mean_std(arr):
if len(arr) == 0:
return 0, 0
if len(arr) == 1:
return arr[0], 1
if isinstance(arr[0], torch.Tensor):
arr = torch.stack(arr, 0)
return arr.mean(), arr.std()
mean = np.mean(arr)
std = np.std(arr, ddof=1)
return mean, std
class NotEnoughHeads(Exception):
"""Raised when the input value is too small"""
pass
def detach(x):
try:
return x.detach()
except AttributeError:
return x
def mean_p_logits_parallel(heads, X):
"""
Compute all the heads in parallel and take the average across n samples in probablity space.
"""
if len(heads) == 0:
return []
#! NOT IN PARALLEL BECAUSE WAS NOT WORKING WELL : still has to see why can't parallelize
# in a single GPU
return [mean_p_logits(head(X)) for head in heads]
# return [mean_p_logits(out) for out in parallel_apply(heads, [X] * len(heads))]
class SingleInputModuleList(torch.nn.ModuleList):
"""Wrapper around `ModuleList` which takes a single input (i.e. has a forward).
Useful for `higher` monkey patching, which doesn't work with ModuleList."""
def forward(self, inp):
return [m(inp) for m in self]
|
decodable_information_bottleneck-main
|
dib/transformers/ib/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .mcclf import *
|
decodable_information_bottleneck-main
|
dib/classifiers/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
import torch
import torch.nn as nn
from dib.utils.helpers import mean_p_logits
__all__ = ["MCTrnsfClassifier"]
class MCTrnsfClassifier(nn.Module):
"""Class that merges a pretrained stochastic transformer with a classifier. It does
this by sampling multiple times from the transformer and passing it to the classifier.
Parameters
----------
transformer : nn.Module
Transformer that return the representation. It should have a property `is_transform.`
Classifier : nn.Module
Uninitialized classifier.
z_dim : int, optional
Output size of the transformer (input to classifier).
n_classes : int, optional
Number of output classes.
n_test_samples : int, optional
Number of Monte Carlo samples from the transformer during test. The transformer should have
an attribute `n_test_samples` and return all the samples in the first dimension (before batch).
is_freeze_transformer : bool, optional
Whether to freeze the transformer or train it.
kwargs : dict, optional
Additional arguments to the classifier.
"""
def __init__(
self,
transformer,
Classifier,
z_dim,
n_classes,
n_test_samples=1,
is_freeze_transformer=True,
**kwargs
):
super().__init__()
self.transformer = transformer
self.is_freeze_transformer = is_freeze_transformer
self.clf = Classifier(z_dim, n_classes, **kwargs)
self.transformer.n_test_samples = n_test_samples
def forward(self, X):
self.transformer.is_transform = True
if self.is_freeze_transformer:
with torch.no_grad():
Z = self.transformer(X)
else:
Z = self.transformer(X)
out = mean_p_logits(self.clf(Z)) # average over samples
return out
|
decodable_information_bottleneck-main
|
dib/classifiers/mcclf.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .data import *
from .evaluate import *
from .train import *
from .visualize import *
|
decodable_information_bottleneck-main
|
utils/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import os
import shutil
import skorch
import torch
from skorch.callbacks import EarlyStopping, LoadInitState, ProgressBar
from skorch.helper import predefined_split
from dib.training.helpers import FixRandomSeed
from dib.utils.helpers import set_seed
from .helpers import TensorBoard, clean_end_run, get_checkpoint
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
pass
__all__ = ["train_load"]
MOD_SUMM_FILENAME = "model_summary.txt"
logger = logging.getLogger(__name__)
def train_load(
Model,
datasets,
chckpnt_dirnames,
is_train=True,
is_continue_train=False,
is_continue_best=False,
patience=None,
is_progressbar=False,
checkpoint_epochs=None,
load_epoch=None,
tensorboard_dir=None,
seed=123,
device="cuda" if torch.cuda.is_available else "cpu",
callbacks=[],
clean_after_run="training",
monitor_best="valid_loss_best",
is_load_criterion=True, # DEV,
is_return_init=False,
**kwargs,
):
"""Train or load the model.
Parameters
----------
Model : sklearn.base.BaseEstimator
Uninitialized model to train.
datasets : dictionary of torch.utils.data.Dataset
Dictionary of the `"train"`, `"valid"`, and `"test"`.
chckpnt_dirnames : list of str, optional
Directories where checkpoints will be saved.
patience : int, optional
Patience for early stopping. Only if given a a validation set.
is_train : bool, optional
Whether to train rather than load a pretrained model. If False, reverts the order of
chckpnt_dirnames. Such that loads from first file.
is_continue_train : bool, optional
Whether to continue training from the last checkpoint of the previous run.
is_continue_best : bool, optional
Whether to continue training from the best model rather than last. If `is_continue_best`
continues from the first checkpoint directory (i.e. result dir), but last one if not
(i.e. tmp dir).
is_progressbar : bool, optional
Whether to train with a progressbar.
checkpoint_epochs : list of int, optional
List of int saves at each of these epochs.
tensorboard_dir : str, optional
Directory for saving tensorboard logs.
load_epoch : int, optional
What epoch to load if not `is_retrain` and saved multiple epochs with a
suffix `_epoch{e}`. By default : last.
device : str, optional
Device on which to run the model.
seed : int, optional
Pseudo random seed.
callbacks : list, optional
Initial callbacks.
clean_after_run : ["training","all",None], optional
Cleans the directory. If "training" removes all the checkpoiting needed for training
(last epoch models and all the optimizer). If "all" also removes the best_epoch model.
monitor_best : {"valid_loss_best", "valid_acc_best", "train_loss_best", "last", int}, optional
What should be monitored for the best model. If int this is simply a given epoch.
kwargs :
Additional arguments to the model.
"""
set_seed(seed)
logger.info(f"Using {chckpnt_dirnames} for checkpoint.")
for chckpnt_dirname in chckpnt_dirnames:
os.makedirs(chckpnt_dirname, exist_ok=True)
if not is_train:
# to load reverse file order
chckpnt_dirnames = chckpnt_dirnames[::-1]
callbacks = get_callbakcs(callbacks, chckpnt_dirnames, is_continue_train, is_continue_best,
checkpoint_epochs, datasets, monitor_best, patience, seed,
is_progressbar, tensorboard_dir, is_train)
train_split = predefined_split(
datasets["valid"]) if "valid" in datasets else None
trainer = Model(
callbacks=callbacks, train_split=train_split, device=device, **kwargs
)
if is_return_init:
trainer.initialize()
return trainer
if is_train:
trainer.fit(datasets["train"], y=None)
trainer = load_trainer(trainer, datasets, chckpnt_dirnames,
load_epoch, monitor_best, is_load_criterion=is_load_criterion)
with open(os.path.join(chckpnt_dirnames[0], MOD_SUMM_FILENAME), "w") as f:
f.write(str(trainer.module_))
clean_end_run(clean_after_run, chckpnt_dirnames)
return trainer
def get_callbakcs(callbacks, chckpnt_dirnames, is_continue_train,
is_continue_best, checkpoint_epochs, datasets,
monitor_best, patience, seed, is_progressbar,
tensorboard_dir, is_train):
for chckpnt_dirname in chckpnt_dirnames:
chckpt_last = get_checkpoint(chckpnt_dirname, monitor="last")
callbacks.append(chckpt_last)
# loading from previous checkpoint to continue training
if is_continue_train:
if is_continue_best:
chckpt_cont = get_checkpoint(
chckpnt_dirnames[0], monitor=monitor_best)
else:
chckpt_cont = chckpt_last
# will continue from last dirname
load_state = LoadInitState(chckpt_cont)
callbacks.append(load_state)
# checkpoint from a given epoch
if checkpoint_epochs is not None:
for chckpnt_dirname in chckpnt_dirnames:
callbacks.append(
get_checkpoint(chckpnt_dirname, monitor=checkpoint_epochs)
)
# Nota Bene : the best checkpoint added will be the one logged with a "+"
if "valid" in datasets:
for chckpnt_dirname in chckpnt_dirnames:
chckpt_best = get_checkpoint(chckpnt_dirname, monitor=monitor_best)
callbacks.append(chckpt_best)
if patience is not None:
callbacks.append(EarlyStopping(patience=patience))
if seed is not None:
callbacks.append(FixRandomSeed(seed))
if is_progressbar:
callbacks.append(ProgressBar())
if tensorboard_dir is not None and is_train:
if os.path.exists(tensorboard_dir) and os.path.isdir(tensorboard_dir):
shutil.rmtree(tensorboard_dir)
writer = SummaryWriter(tensorboard_dir)
callbacks.append(TensorBoard(writer))
return callbacks
def load_trainer(trainer, datasets, chckpnt_dirnames, load_epoch,
monitor_best, is_load_criterion=True):
trainer.initialize()
# loads from last dirname
if load_epoch is not None:
# checkpoint at a specific epoch
chckpt = get_checkpoint(chckpnt_dirnames[-1], monitor=load_epoch)
elif "valid" in datasets:
# if the best checkpoint was saved then load it
chckpt = get_checkpoint(chckpnt_dirnames[-1], monitor=monitor_best)
else:
# load from first dirname for mode="last"
chckpt = get_checkpoint(chckpnt_dirnames[-1], monitor="last")
trainer = load_chkpnt_model_(
trainer, chckpt, is_load_criterion=is_load_criterion)
return trainer
def load_chkpnt_model_(trainer, chckpt, is_load_criterion=True):
# don't load optimizer
trainer.load_params(f_history=chckpt.f_history_)
trainer.load_params(
f_params=chckpt.get_formatted_files(trainer)["f_params"])
if is_load_criterion:
trainer.load_params(
f_criterion=chckpt.get_formatted_files(trainer)["f_criterion"])
return trainer
|
decodable_information_bottleneck-main
|
utils/train.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import glob
import logging
import math
import os
from functools import partial, partialmethod
import numpy as np
import pandas as pd
import scipy
import skorch
import torch
import torch.nn as nn
from sklearn.metrics import accuracy_score, classification_report, log_loss
from skorch.callbacks import Freezer, LRScheduler
from skorch.callbacks.scoring import ScoringBase
from skorch.dataset import unpack_data, uses_placeholder_y
from skorch.history import History
from torch.nn.utils.fusion import fuse_conv_bn_eval
from dib.predefined import MLP
from dib.training import NeuralNetTransformer
from dib.training.helpers import clone_trainer
from dib.training.trainer import _get_params_for_optimizer
from dib.transformers import BASE_LOG, DIBLoss, DIBLossZX
from dib.utils.helpers import (
CrossEntropyLossGeneralize,
extract_target,
set_seed,
to_numpy,
)
from utils.helpers import (
SFFX_TOAGG,
BatchNormConv,
StoreVarGrad,
add_noise_to_param_,
batchnorms2convs_,
clip_perturbated_param_,
cont_tuple_to_tuple_cont,
dict_none_toNaN,
get_device,
get_exponential_decay_gamma,
get_float_value,
merge_dicts,
save_pattern,
set_requires_grad,
)
try:
from torch.utils.tensorboard import SummaryWriter
except ImportError:
pass
__all__ = ["eval_loglike", "eval_clf"]
FILE_SCORE = "score.csv"
FILE_CLF_REP = "clf_report.csv"
logger = logging.getLogger(__name__)
def eval_corr_gen(trainer, dataset, mode="train"):
"""
Evaluates a classifier for correlation with generalization using some of the best predictors
of generalization from each section of "FANTASTIC GENERALIZATION MEASURES AND WHERE TO FIND THEM.
Also does the usual classifier evaluation
"""
# measure usual classification (i.e. how well generalizes)
out_eval_clf = eval_clf(trainer, dataset)
if mode == "test":
# only do correlation measure if "train mode"
return out_eval_clf
trainer = clone_trainer(trainer)
logger.info(f"len(dataset)={len(dataset)}")
# Variance of gradients (for classifier and transformer)
logger.info("var_grad")
var_grad = get_var_grad(trainer, dataset)
logger.info(logger)
logger.info("d_H_Q_xCz")
# before freezing the net
d_H_Q_xCz = get_H_Q_xCz(
trainer, dataset, "d_H_Q_xCz", conditional="H_Q[X|Z]-H_Q[Y|Z]"
)
logger.info("H_Q_xCz")
# H_Q[X|Z]
H_Q_xCz = get_H_Q_xCz(trainer, dataset, "H_Q_xCz")
# H_Q+[X|Z]
logger.info("d_H_Q+_xCz")
d_H_Qp_xCz = get_H_Q_xCz(
trainer,
dataset,
"H_Q_xCz",
Q_zx=partial(
MLP, hidden_size=2048, n_hidden_layers=trainer.module_.clf.n_hidden_layers
),
)
# H_Q-[X|Z]
logger.info("d_H_Q-_xCz")
d_H_Qm_xCz = get_H_Q_xCz(
trainer,
dataset,
"H_Q_xCz",
Q_zx=partial(
MLP, hidden_size=2, n_hidden_layers=trainer.module_.clf.n_hidden_layers
),
)
# freezes all batchnorm layers by converting them to convolutions
trainer.module_.eval()
batchnorms2convs_(trainer.module_)
# Entropy of the logits
logger.info("entropy")
y_pred_proba = trainer.predict_proba(dataset)
y_pred_ent = scipy.stats.entropy(
y_pred_proba, axis=1, base=BASE_LOG).mean()
# Path Norm (for classifier and transformer)
logger.info("path_norm")
path_norm = get_path_norm(trainer, dataset)
# Sharpness magnitude => max (relative) change in weights that cause less than 1 diff in log like
logger.info("sharp_mag")
sharp_mag = get_sharp_mag(trainer, dataset)
return dict(
y_pred_ent=y_pred_ent,
path_norm=path_norm,
var_grad=var_grad,
sharp_mag=sharp_mag,
H_Q_xCz=H_Q_xCz,
d_H_Qp_xCz=d_H_Qp_xCz,
d_H_Qm_xCz=d_H_Qm_xCz,
d_H_Q_xCz=d_H_Q_xCz,
**out_eval_clf,
)
def eval_clf(trainer, dataset, **kwargs):
"""Evaluates a classifier on a dateset."""
y_pred_proba = trainer.predict_proba(dataset)
loglike = -log_loss(dataset.targets, y_pred_proba)
y_pred = y_pred_proba.argmax(-1)
accuracy = accuracy_score(dataset.targets, y_pred)
top5_acc = top_n_accuracy_score(dataset.targets, y_pred_proba, n=5)
return dict(accuracy=accuracy, top5_acc=top5_acc, loglike=loglike)
def eval_trnsf(trainer, dataset, **kwargs):
"""
Evaluates a transformer on a dateset by returning everything in history that starts with `valid_`.
Difference with `eval_clf` is that saves all temporary variables.
"""
trainer.check_data(dataset, None)
trainer.notify("on_epoch_begin", dataset_train=dataset,
dataset_valid=dataset)
trainer._single_epoch(dataset, training=False, epoch=0)
# don't call "on epoch end" because should not checkpoint (but still want scoring)
for _, cb in trainer.callbacks_:
# score everything on validation because you didn't train
if isinstance(cb, ScoringBase) and not cb.on_train:
cb.on_epoch_end(trainer, dataset_train=dataset,
dataset_valid=dataset)
return {
k.replace("valid_", ""): v
for k, v in trainer.history[-1].items()
if k.startswith("valid_")
and k not in ["valid_batch_count"]
and "_best" not in k
}
def eval_trainer_log(
trainer,
dataset,
csv_score_pattern,
chckpnt_dirnames,
hyperparameters,
evaluator=eval_clf,
tensorboard_dir=None,
is_append=False,
mode="test",
epoch="last",
file_score=FILE_SCORE,
file_clf_rep=FILE_CLF_REP,
**kwargs,
):
"""Evaluate a trainer and log it."""
to_log = evaluator(trainer, dataset, mode=mode, **kwargs)
# first save the header then actual
if not is_append:
save_pattern(chckpnt_dirnames, csv_score_pattern, file_score)
for metric_name, score in to_log.items():
save_pattern(
chckpnt_dirnames,
csv_score_pattern,
file_score,
formatting=dict(epoch=epoch, metric=metric_name,
mode=mode, score=score),
logger=logger,
)
if tensorboard_dir is not None:
with SummaryWriter(log_dir=tensorboard_dir + "hypopt/") as w:
w.add_hparams(
# tensorboard does not accept None
dict_none_toNaN(hyperparameters),
{f"hparam/{metric_name}": score},
)
def eval_loglike(trainer, dataset, seed=123, **kwargs):
"""Return the log likelihood for each image in order."""
set_seed(seed) # make sure same order and indices for context and target
trainer.module_.to(trainer.device)
trainer.criterion_.is_return_all = True
y_valid_is_ph = uses_placeholder_y(dataset)
all_losses = []
trainer.notify("on_epoch_begin", dataset_valid=dataset)
for data in trainer.get_iterator(dataset, training=False):
Xi, yi = unpack_data(data)
yi_res = yi if not y_valid_is_ph else None
trainer.notify("on_batch_begin", X=Xi, y=yi_res, training=False)
step = trainer.validation_step(Xi, yi, **kwargs)
trainer.notify("on_batch_end", X=Xi, y=yi_res, training=False, **step)
all_losses.append(-step["loss"]) # use log likelihood instead of NLL
trainer.criterion_.is_return_all = False
return torch.cat(all_losses, dim=0).detach().cpu().numpy()
# credits : https://github.com/scikit-learn/scikit-learn/pull/8234
def top_n_accuracy_score(y_true, y_pred, n=5, normalize=True):
"""top N Accuracy classification score.
For multiclass classification tasks, this metric returns the
number of times that the correct class was among the top N classes
predicted.
Parameters
----------
y_true : 1d array-like, or label indicator array / sparse matrix
Ground truth (correct) labels.
y_pred : array-like, where for each sample, each row represents the
likelihood of each possible label.
The number of columns must be at least as large as the set of possible
label values.
normalize : bool, optional (default=True)
If ``False``, return the number of top N correctly classified samples.
Otherwise, return the fraction of top N correctly classified samples.
Returns
-------
score : float
If ``normalize == True``, return the proportion of top N correctly
classified samples, (float), else it returns the number of top N
correctly classified samples (int.)
The best performance is 1 with ``normalize == True`` and the number
of samples with ``normalize == False``.
See also
--------
accuracy_score
Notes
-----
If n = 1, the result will be the same as the accuracy_score. If n is the
same as the number of classes, this score will be perfect and meaningless.
In cases where two or more classes are assigned equal likelihood, the
result may be incorrect if one of those classes falls at the threshold, as
one class must be chosen to be the nth class and the class chosen may not
be the correct one.
Examples
--------
>>> import numpy as np
>>> y_pred = np.array([[0.1, 0.3, 0.4, 0.2],
... [0.4, 0.3, 0.2, 0.1],
... [0.2, 0.3, 0.4, 0.1],
... [0.8, 0.1, 0.025, 0.075]])
>>> y_true = np.array([2, 2, 2, 1])
>>> top_n_accuracy_score(y_true, y_pred, n=1)
0.5
>>> top_n_accuracy_score(y_true, y_pred, n=2)
0.75
>>> top_n_accuracy_score(y_true, y_pred, n=3)
1.0
>>> top_n_accuracy_score(y_true, y_pred, n=2, normalize=False)
3
"""
num_obs, num_labels = y_pred.shape
idx = num_labels - n - 1
counter = 0
argsorted = np.argsort(y_pred, axis=1)
for i in range(num_obs):
if y_true[i] in argsorted[i, idx + 1:]:
counter += 1
if normalize:
return counter / num_obs
else:
return counter
def _load_something(
get_rows_cols_agg, pattern, base_dir="", mark_to_agg=lambda c: c + SFFX_TOAGG
):
"""Load the something from all the saved values .
Parameters
----------
get_rows_toaggreg : callable
function that takes files, raw_files as input and return all the rows as well as the name
of columns over which to aggregate.
pattern : str
Pattern of files to load. Needs to start with `tmp_results`.
base_dir : str, optional
Base directory to prepend to pattern.
aggregate : list of string, optional
Aggregation methods. If singleton will show the results as a single level. If empty list,
will not aggregate anything.
mark_to_agg : callable, optional
Function that marks columns to aggregate.
"""
raw_files = glob.glob(base_dir + pattern, recursive=True)
# rm results/ and /score.csv and add experiment_ name
files = [
"/".join(
col if i > 0 else f"experiment_{col}"
for i, col in enumerate(f[len(base_dir):].split("/")[1:-1])
)
for f in raw_files
]
rows, columns, toaggreg = get_rows_cols_agg(files, raw_files)
# the first column (experiment will be wrong if underscores in the experiments )
results = pd.DataFrame(rows, columns=columns)
results = results.apply(pd.to_numeric, errors="ignore")
results = results.apply(to_bool)
def fn_rename(col):
if col in toaggreg:
return mark_to_agg(col)
return col
return results.rename(columns=fn_rename)
def to_bool(s):
if s.dtypes != "object":
return s
return s.replace({"True": True, "False": False})
def load_results(
pattern="tmp_results/**/score.csv",
metrics=["test_acc", "test_loss"],
metric_column_name="{mode}_{metric}",
**kwargs,
):
"""Load the results from the folder.
Parameters
----------
pattern : str, optional
Pattern of files to load. Needs to start with `tmp_results`.
metrics : list of string, optional
Precomputed metrics to load. E.g. ["test_acc","test_loss","test_top5_acc","train_loss"].
metric_column_name : str, optional
Name of the column containing the metric.
"""
def get_rows_cols_agg(files, raw_files, metrics=metrics):
rows = []
for raw_file, file in zip(raw_files, files):
# hyperparameters
row = [file.split("/")[0][len("experiment") + 1:]]
# hyperparameters
row += [str_to_val(folder.split("_")[-1])
for folder in file.split("/")[1:]]
# metrics
score = pd.read_csv(raw_file)
# only keep numeric
score = score[pd.to_numeric(
score["{score}"], errors="coerce").notnull()]
score["{score}"] = pd.to_numeric(score["{score}"])
score = score.pivot_table(
columns=metric_column_name, values="{score}", index="{epoch}"
)
score = score.reindex(columns=metrics).reset_index()
for _, r in score.iterrows():
rows.append(row + list(r.values))
columns = (
["experiment"]
+ ["_".join(folder.split("_")[:-1])
for folder in files[0].split("/")][1:]
+ ["epochs"]
+ metrics
)
return rows, columns, metrics
results = _load_something(get_rows_cols_agg, pattern=pattern, **kwargs)
return results
def load_histories(
pattern="tmp_results/**/transformer/last_epoch_history.json", **kwargs
):
"""Load the history of a model (validation and train variables saved at every epoch).
Parameters
----------
pattern : str, optional
Pattern of files to load. Needs to start with `tmp_results`.
kwargs : list of string, optional
Additional arguments to `_load_something`.
"""
def is_plot(key, values):
"""Columns to plot from history."""
if not (key.startswith("train") or key.startswith("valid")):
return False
if len(set(values)) == 1:
return False
if not all(isinstance(v, float) for v in values):
return False
return True
def get_rows_cols_agg(files, raw_files):
to_plot_col = set()
rows = []
for raw_file, file in zip(raw_files, files):
const_row = dict()
const_row["experiment"] = file.split(
"/")[0][len("experiment") + 1:]
# hyperparameters
for col in file.split("/")[1:]:
key = "_".join(col.split("_")[:-1])
value = col.split("_")[-1]
const_row[key] = str_to_val(value)
history = History.from_file(raw_file)
# prepare for line plots
history_to_plot = {
key: history[:, key]
for key in history[0].keys()
if is_plot(key, history[:, key])
}
# Checking same number epoch
for i, (k, v) in enumerate(history_to_plot.items()):
if i == 0:
old_k = k
old_len = len(v)
if old_len != len(v):
raise ValueError(
f"Number of epochs not the same for (at least) {old_k} and {k}."
)
for epoch, history_per_epoch in enumerate(
cont_tuple_to_tuple_cont(history_to_plot)
):
row = const_row.copy()
row["epochs"] = epoch
for key, value in history_per_epoch.items():
row[key] = value
to_plot_col.add(key)
rows.append(row)
return rows, None, list(to_plot_col)
results = _load_something(get_rows_cols_agg, pattern=pattern, **kwargs)
return results
def str_to_val(s):
if s == "None":
return None
return s
def accuracy_filter_train(model, X, y, map_target_position, **kwargs):
"""
Helper function that computes the accuracy but only the training examples.
"""
target = to_numpy(extract_target(y, map_target_position))
try:
is_train = y[:, map_target_position["constant"]] == 1
target = target[is_train]
y_pred = model.predict_proba(X)[is_train]
out = accuracy_score(target, y_pred.argmax(-1), **kwargs)
except (IndexError, KeyError):
out = accuracy(model, X, target)
return out
def accuracy(model, X, y, **kwargs):
"""
Compute the accuracy score of a Sklearn Classifier on a given dataset.
"""
y_pred = model.predict_proba(X)
return accuracy_score(y, y_pred.argmax(-1), **kwargs)
def loglike(model, X, y, **kwargs):
"""Compute the loglikelihood (base e) score of a Sklearn Classifier on a given dataset."""
y_pred_proba = model.predict_proba(X)
return -log_loss(y, y_pred_proba, **kwargs)
def get_path_norm(trainer, dataset):
"""Compute the pathnorm as described in "FANTASTIC GENERALIZATION MEASURES AND WHERE TO FIND THEM".
I.e. squares all parameters, foreward pass all ones and then take sqrt of output."""
trainer = clone_trainer(trainer)
# use the mean instead of sampling to make sure that not negative
trainer.module_.transformer.is_use_mean = True
# square all parameters
with torch.no_grad():
for name, W in trainer.module_.named_parameters():
W.pow_(2)
all_ones = dataset[0][0].unsqueeze(0).fill_(1)
logits = trainer.forward(all_ones)[0]
sum_logits = logits.sum().item()
return sum_logits ** 0.5
def get_var_grad(trainer, dataset):
"""Compute the variance of the gradients."""
trainer = clone_trainer(trainer)
# compute also the gradients of the transformer
trainer.module_.is_freeze_transformer = False
# compute elementwise variance of parameters
trainer.callbacks_.append(("store_grad", StoreVarGrad()))
trainer.check_data(dataset, None)
trainer.notify("on_epoch_begin", dataset_train=dataset,
dataset_valid=dataset)
trainer._single_epoch(dataset, training=True, epoch=0)
last_epoch = trainer.history[-1]["epoch"]
# don't call "on epoch end" because should not checkpoint (but still want to compute variance)
for _, cb in trainer.callbacks_:
if isinstance(cb, StoreVarGrad):
cb.on_epoch_end(trainer, dataset_train=dataset,
dataset_valid=dataset)
var_grad = np.concatenate([v.flatten()
for v in cb.var_grads.values()])
return var_grad.mean() # take mean over all parameters
class NegCrossEntropyLoss(nn.CrossEntropyLoss):
def forward(self, input, target):
# select label from the targets
return -super().forward(input, target[0])
def get_sharp_mag(
trainer,
dataset,
sigma_min=0,
sigma_max=2,
target_deviation=0.1,
n_restart_perturbate=3,
max_binary_search=50,
is_relative=True,
):
"""
Compute the sharpness magnitude 1/alpha'^2 described in [1].
Notes
-----
- This is slightly different than [1] because the target deviation is on cross-entropy instead
of accuracy (as we don't care about accuracy in our paper).
Parameters
----------
trainer : skorch.NeuralNet
dataset : torch.utils.data.Dataset
sigma_min : float, optional
Minimum standard deviation of perturbation.
sigma_max : float, optional
Maximum standard deviation of perturbation.
n_adv_perturbate : int, optional
Number of steps to perform adversarial perturbation for.
n_restart_perturbate : int, optional
Number of times restarting the perturbation (different initialization for adv perturbate).
target_deviation : float, optional
Maximum difference of log likelihood allowed.
max_binary_search : int, optional
Maximum number of binary search tries.
References
----------
[1] Jiang, Yiding, et al. "Fantastic Generalization Measures and Where to Find Them."
arXiv preprint arXiv:1912.02178 (2019).
"""
trainer = clone_trainer(trainer)
acc = accuracy(trainer, dataset, dataset.targets)
# compute also the gradients of the transformer
trainer.module_.is_freeze_transformer = False
# reverses cross entropy to MAXIMIZE (adversarial)
trainer.criterion = NegCrossEntropyLoss
for bin_search in range(max_binary_search):
sigma_min, sigma_max = get_sharp_mag_interval(
trainer,
acc,
dataset,
sigma_min,
sigma_max,
target_deviation,
n_restart_perturbate,
is_relative,
)
if sigma_min > sigma_max or math.isclose(sigma_min, sigma_max, rel_tol=1e-2):
# if interval for binary search is very small stop
break
if bin_search == max_binary_search - 1:
logger.info(
f"Stopped early beacuase reached max_binary_search={max_binary_search}. [sigma_min,sigma_max]=[{sigma_min},{sigma_max}]"
)
return 1 / (sigma_max ** 2)
def get_sharp_mag_interval(
unperturbated_trainer,
unperturbated_acc,
dataset,
sigma_min,
sigma_max,
target_deviation,
n_restart_perturbate,
is_relative,
):
sigma_new = (sigma_min + sigma_max) / 2
worst_acc = math.inf
unperturbated_params = {
name: param.detach()
for name, param in unperturbated_trainer.module_.named_parameters()
}
for _ in range(n_restart_perturbate):
trainer = clone_trainer(unperturbated_trainer,
is_reinit_besides_param=True)
# add half of the possible noise to give some space for gradient ascent
add_noise_to_param_(
trainer.module_, sigma=sigma_new / 2, is_relative=is_relative
)
for i, data in enumerate(trainer.get_iterator(dataset, training=True)):
Xi, yi = unpack_data(data)
step = trainer.train_step(Xi, yi)
# clipping perturbation value of added parameters to |w_i * sigma| or |sigma|
clip_perturbated_param_(
trainer.module_,
unperturbated_params,
sigma_new,
is_relative=is_relative,
)
if not torch.isfinite(step["loss"]) or step["loss"].abs() > (
abs(unperturbated_acc) + 10 * target_deviation
):
# if loss is very large for one batch then no need to finish this loop
return sigma_min, sigma_new
curr_acc = accuracy(trainer, dataset, dataset.targets)
worst_acc = min(worst_acc, curr_acc)
deviation = abs(curr_acc - worst_acc)
if math.isclose(unperturbated_acc, worst_acc, rel_tol=1e-2):
# if not deviation is nearly zero can stop
return sigma_new, sigma_new
if deviation > target_deviation:
sigma_max = sigma_new
else:
sigma_min = sigma_new
return sigma_min, sigma_max
def get_H_Q_xCz(
trainer,
dataset,
select,
n_per_head=1,
batch_size=256,
lr=1e-2,
max_epochs=100,
Q_zx=None,
**kwargs,
):
trainer = clone_trainer(trainer) # ensure not changing (shouldn't)
trainer.module_.transformer.is_transform = False # DIB Loss expects pred of label
model = trainer.module_.transformer
z_dim = model.z_dim
def get_Q(*args, **kwargs):
Q = copy.deepcopy(trainer.module_.clf)
Q.reset_parameters() # shouldn't be needed
return Q
count_targets = dataset.count_targets()
n_per_target = {str(k): int(v) for k, v in count_targets.items()}
dib = partial(
DIBLoss,
Q_zx if Q_zx is not None else get_Q,
n_per_target,
n_per_head=n_per_head,
n_classes=dataset.n_classes,
z_dim=z_dim,
map_target_position=dataset.map_target_position,
ZYCriterion=partial(
CrossEntropyLossGeneralize,
map_target_position=dataset.map_target_position,
gamma=0,
),
**kwargs,
)
# making sure that training of the parameter of the criterion
NeuralNetTransformer._get_params_for_optimizer = partialmethod(
_get_params_for_optimizer, is_add_criterion=True
)
set_requires_grad(model, False)
net = NeuralNetTransformer(
module=model,
criterion=dib,
optimizer=torch.optim.Adam,
lr=lr,
max_epochs=max_epochs,
train_split=None,
batch_size=batch_size,
device=trainer.device,
iterator_valid__batch_size=batch_size * 2,
callbacks=[
LRScheduler(
torch.optim.lr_scheduler.ExponentialLR,
gamma=get_exponential_decay_gamma(100, max_epochs),
)
],
)
net.fit(dataset, y=None)
out = eval_trnsf(net, dataset)
return out[select]
class HQxCz:
def __init__(self, trainer, dataset):
trainer = clone_trainer(trainer) # ensure not changing (shouldn't)
self.Q_zy = copy.deepcopy(trainer.module_.clf)
trainer.module_ = trainer.module_.transformer
# using squeezing n_z_samples to work with forward call (working in determinstic setting )
trainer.module_.is_avg_trnsf = True
self.trainer = trainer
Z = trainer.forward(dataset, training=False, device=trainer.device)
targets = torch.stack([torch.tensor(y) for _, y in dataset])
self.trnsf_dataset = skorch.dataset.Dataset(
Z, [targets[:, i] for i in range(targets.size(1))]
) # compute transformed data onces
self.z_dim = self.trnsf_dataset[0][0].size(-1)
count_targets = dataset.count_targets()
self.n_per_target = {str(k): int(v) for k, v in count_targets.items()}
self.n_classes = dataset.n_classes
self.map_target_position = dataset.map_target_position
def __call__(
self, select, n_per_head=1, batch_size=256, lr=1e-2, max_epochs=100, **kwargs
):
def get_Q(*args, **kwargs):
Q = copy.deepcopy(self.Q_zy)
Q.reset_parameters() # shouldn't be needed
return Q
dib = partial(
DIBLossZX,
get_Q,
self.n_per_target,
n_per_head=n_per_head,
n_classes=self.n_classes,
z_dim=self.z_dim,
map_target_position=self.map_target_position,
ZYCriterion=partial(
CrossEntropyLossGeneralize,
map_target_position=self.map_target_position,
gamma=0,
),
**kwargs,
)
# making sure that training of the parameter of the criterion
NeuralNetTransformer._get_params_for_optimizer = partialmethod(
_get_params_for_optimizer, is_add_criterion=True
)
net = NeuralNetTransformer(
module=nn.Identity,
criterion=dib,
optimizer=torch.optim.Adam,
lr=lr,
max_epochs=max_epochs,
train_split=None,
batch_size=batch_size,
device=self.trainer.device,
iterator_valid__batch_size=batch_size * 2,
callbacks=[
LRScheduler(
torch.optim.lr_scheduler.ExponentialLR,
gamma=get_exponential_decay_gamma(100, max_epochs),
)
],
)
net.fit(self.trnsf_dataset, y=None)
out = eval_trnsf(net, self.trnsf_dataset)
return out[select]
|
decodable_information_bottleneck-main
|
utils/evaluate.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import collections
import copy
import glob
import logging
import math
import os
import random
import shutil
import types
from collections import ChainMap, defaultdict
from contextlib import contextmanager, suppress
from multiprocessing import Pool, cpu_count
import numpy as np
import omegaconf
import pandas as pd
import skorch
import torch
import torch.nn as nn
from omegaconf import OmegaConf
from skorch.callbacks import Callback
from skorch.callbacks.scoring import check_scoring
from skorch.dataset import get_len, unpack_data, uses_placeholder_y
from torch.optim.optimizer import Optimizer, required
from dib import UNLABELLED_CLASS
from dib.training.helpers import Checkpoint
from dib.training.trainer import _single_epoch
from dib.utils.helpers import *
SKLEARN_MODEL = "model.joblib"
SFFX_TOAGG = "_toagg"
logger = logging.getLogger(__name__)
def get_float_value(x):
"""Convert to float"""
if isinstance(x, torch.Tensor):
x = x.item()
elif not isinstance(x, float):
x = float(x)
return x
def add_noise_to_param_(module, sigma, is_relative=True):
"""Add uniform noise with standard deviation `sigma` to each weight."""
with torch.no_grad():
for param in module.parameters():
if is_relative:
unif = torch.distributions.uniform.Uniform(
0, param.abs() * sigma)
noise = unif.sample()
else:
unif = torch.distributions.uniform.Uniform(0, sigma)
noise = unif.sample(param.shape)
param.add_(noise)
def force_generalization(datasets):
"""Force the (anti)-generalization of a model by adding the test set to the training set. It
also adds a label `is_train` that says whether the example is from the training `is_train=1` or
testing `is_train=0` set.
Parameters
----------
datasets : dictionary of torch.utils.data.Dataset
Dictionary containing at least the `"train"` and `"test"` set.
Returns
-------
train_data : torch.utils.data.Dataset
"""
# make sure don't change the dataset for eval and clf
datasets = copy.deepcopy(datasets)
datasets["test"].set_const_target_(0)
datasets["train"].set_const_target_(1)
datasets["train"].append_(datasets["test"])
return datasets["train"]
def clip_perturbated_param_(
module, unperturbated_params, clip_factor, is_relative=True
):
"""
Element wise clipping of the absolute value of the difference in weight. `unperturbated_params`
needs to be a dictionary of unperturbated param. Use `is_relative` if clip factor should multipy
the unperturbated param.
"""
with torch.no_grad():
for name, param in module.named_parameters():
w = unperturbated_params[name]
# delta_i = (delta+w)_i - w_i
delta = param - w
max_abs_delta = clip_factor * w.abs() if is_relative else clip_factor
clipped_delta = torch.where(
delta.abs() > max_abs_delta, delta.sign() * max_abs_delta, delta
)
# inplace replace
param.fill_(0)
param.add_(w)
param.add_(clipped_delta)
def get_device(module):
"""return device of module."""
return next(module.parameters()).device
def batchnorms2convs_(module):
"""Converts all the batchnorms to frozen convolutions."""
for name, m in module.named_children():
if isinstance(m, nn.modules.batchnorm._BatchNorm):
module._modules[name] = BatchNormConv(m)
else:
batchnorms2convs_(module._modules[name])
class BatchNormConv(nn.Module):
"""Replace a batchnorm layer with a frozen convolution."""
def __init__(self, batchnorm):
super().__init__()
if isinstance(batchnorm, nn.BatchNorm2d):
conv = nn.Conv2d(
batchnorm.num_features,
batchnorm.num_features,
1,
groups=batchnorm.num_features,
)
elif isinstance(batchnorm, nn.BatchNorm1d):
conv = nn.Conv1d(
batchnorm.num_features,
batchnorm.num_features,
1,
groups=batchnorm.num_features,
)
conv.eval()
nn.init.ones_(conv.weight)
nn.init.zeros_(conv.bias)
conv.to(get_device(batchnorm))
self.bn = nn.utils.fusion.fuse_conv_bn_eval(conv, batchnorm)
def forward(self, x):
return self.bn(x)
def rm_clf_experiment(experiment):
"""Remove all the classifier files for an experiment."""
for f in glob.glob(f"tmp_results/{experiment}/**/clf_*/**", recursive=True):
try:
shutil.rmtree(f)
except FileNotFoundError:
pass
def invert_dict(d):
return {v: k for k, v in d.items()}
def flip_nested_dict(nested_dict):
"""Flip nested dictionary inside out."""
flipped = dict()
for key, subdict in nested_dict.items():
for k, v in subdict.items():
flipped[k] = flipped.get(k, dict())
flipped[k][key] = v
return flipped
# Credits https://stackoverflow.com/questions/17215400/python-format-string-unused-named-arguments/17215533#17215533
class PartialFormatMap(dict):
"""Dictionary used to do partial formatting of string in python.
E.g. `"{keep} {modify}".format_map(SafeDict(modify='done')) == '{keep} done'`
"""
def __missing__(self, key):
return "{" + key + "}"
# credits : https://gist.github.com/simon-weber/7853144
@contextmanager
def all_logging_disabled(highest_level=logging.CRITICAL):
"""
A context manager that will prevent any logging messages
triggered during the body from being processed.
:param highest_level: the maximum logging level in use.
This would only need to be changed if a custom level greater than CRITICAL
is defined.
"""
# two kind-of hacks here:
# * can't get the highest logging level in effect => delegate to the user
# * can't get the current module-level override => use an undocumented
# (but non-private!) interface
previous_level = logging.root.manager.disable
logging.disable(highest_level)
try:
yield
finally:
logging.disable(previous_level)
# credits : https://stackoverflow.com/questions/5543651/computing-standard-deviation-in-a-stream
class OnlineVariance:
"""
Welford's algorithm computes the sample variance incrementally.
"""
def __init__(self, iterable=None, ddof=1):
self.ddof, self.n, self.mean, self.M2 = ddof, 0, 0.0, 0.0
if iterable is not None:
for datum in iterable:
self.include(datum)
def include(self, datum):
self.n += 1
self.delta = datum - self.mean
self.mean += self.delta / self.n
self.M2 += self.delta * (datum - self.mean)
@property
def variance(self):
return self.M2 / (self.n - self.ddof)
@property
def std(self):
return np.sqrt(self.variance)
class StoreVarGrad(Callback):
"""Callback which applies a function on all gradients, stores the variance during each epoch."""
def __init__(self):
self.online_vars = dict()
self.n = 0
self.var_grads = dict()
def initialize(self):
self.online_vars = dict()
self.n = 0
self.var_grads = dict()
def on_grad_computed(self, net, **kwargs):
for name, param in net.module_.named_parameters():
if param.grad is not None:
if name not in self.online_vars:
self.online_vars[name] = OnlineVariance()
self.online_vars[name].include(
param.grad.cpu().detach().flatten().numpy()
)
def on_epoch_end(self, net, parent=None, **kwargs):
epoch = net.history[-1]["epoch"]
self.n += 1
self.var_grads = {k: v.variance for k, v in self.online_vars.items()}
self.online_vars = dict()
class StoreGrad(Callback):
"""Callback which applies a function on all gradients, stores the output at each epoch and
then takes an average across epochs."""
def __init__(self, fn=Identity()):
self.curr_epoch = dict()
self.prev_epochs = dict()
self.fn = fn
def initialize(self):
self.curr_epoch = dict()
self.prev_epoch = dict()
def on_grad_computed(self, net, **kwargs):
for name, param in net.module_.named_parameters():
if param.grad is not None:
if name not in self.curr_epoch:
self.curr_epoch[name] = OnlineVariance()
self.curr_epoch[name].include(param.grad.cpu().flatten())
def on_epoch_end(self, net, parent=None, **kwargs):
epoch = net.history[-1]["epoch"]
self.prev_epochs[epoch] = {
k: v.variance for k, v in self.curr_epoch.items()}
self.curr_epoch = dict()
class StopAtThreshold(skorch.callbacks.Callback):
"""Callback for stopping training when `monitor` reaches threshold."""
def __init__(
self, monitor="train_loss", threshold=0.01, lower_is_better=True, sink=print
):
self.monitor = monitor
self.threshold = threshold
self.sink = sink
self.lower_is_better = lower_is_better
def on_epoch_end(self, net, **kwargs):
current_score = net.history[-1, self.monitor]
if self._is_score_improved(current_score):
self._sink(
"Stopping since {} reached {}".format(
self.monitor, self.threshold),
verbose=net.verbose,
)
raise KeyboardInterrupt
def _is_score_improved(self, score):
if self.lower_is_better:
return score < self.threshold
return score > self.threshold
def _sink(self, text, verbose):
# We do not want to be affected by verbosity if sink is not print
if (self.sink is not print) or verbose:
self.sink(text)
class TensorBoard(skorch.callbacks.TensorBoard):
def on_epoch_end(self, net, parent=None, **kwargs):
epoch = net.history[-1]["epoch"]
for m in net.module_.modules():
try:
m.tensorboard(self.writer, epoch, mode="on_epoch_end")
except AttributeError:
pass
super().on_epoch_end(net, **kwargs) # call super last
if parent is not None and hasattr(parent.criterion_, "to_store"):
for k, v in parent.criterion_.to_store.items():
with suppress(NotImplementedError):
# pytorch raises NotImplementedError on wrong types
self.writer.add_scalar(
tag=f"Loss/partial/{k}",
scalar_value=v[0] / v[1],
global_step=epoch, # avg
)
parent.criterion_.to_store = dict()
def on_grad_computed(self, net, **kwargs):
epoch = net.history[-1]["epoch"]
for m in net.module_.modules():
if hasattr(m, "tensorboard") and callable(m.tensorboard):
try:
m.tensorboard(self.writer, epoch, mode="on_grad_computed")
except NotImplementedError: # if frozen
pass
def clean_end_run(clean_after_run, chckpnt_dirnames):
"""Clean the checkpoiting directories after running.
Parameters
----------
clean_after_run : ["training","all",None]
Cleans the directory. If "training" removes all the checkpoiting needed for training
(last epoch models and all the optimizer). If "all" also removes the best_epoch model.
chckpnt_dirnames : list of str
Directories where checkpoints were saved.
"""
for chckpnt_dirname in chckpnt_dirnames:
if clean_after_run == "all":
patterns = ["*.pt"]
elif clean_after_run == "training":
patterns = ["*_optimizer.pt"]
elif clean_after_run is None:
continue
else:
raise ValueError(f"Unkown chckpnt_dirnames={chckpnt_dirnames}")
for pattern in patterns:
for f in glob.glob(os.path.join(chckpnt_dirname, pattern)):
os.remove(f)
def hyperparam_to_path(hyperparameters):
"""Return a string of all hyperparameters that can be used as a path extension."""
return "/".join([f"{k}_{v}" for k, v in hyperparameters.items()])
def format_container(to_format, formatter, k=None):
"""Format a container of string.
Parameters
----------
to_format : str, list, dict, or omegaconf.Config
(list of) strings to fromat.
formatter : dict
Dict of keys to replace and values with which to replace.
"""
if isinstance(to_format, str):
out = to_format.format(**formatter)
elif to_format is None:
out = None
else:
if isinstance(to_format, omegaconf.Config):
to_format = OmegaConf.to_container(to_format, resolve=True)
if isinstance(to_format, list):
out = [
format_container(path, formatter, k=i)
for i, path in enumerate(to_format)
]
elif isinstance(to_format, dict):
out = {
k: format_container(path, formatter, k=k)
for k, path in to_format.items()
}
else:
raise ValueError(f"Unkown to_format={to_format}")
return out
# Change _scoring for computing validation only at certain epochs
def _scoring(self, net, X_test, y_test):
"""Resolve scoring and apply it to data. Use cached prediction
instead of running inference again, if available."""
scorer = check_scoring(net, self.scoring_)
if y_test is None:
return float(
"nan"
) # ! Only difference : make sure no issue if valid not computed
return scorer(net, X_test, y_test)
def _single_epoch_skipvalid(
self,
dataset,
training,
epoch,
save_epochs=(
list(range(10))
+ list(range(9, 100, 10))
+ list(range(99, 1000, 50))
+ list(range(999, 10000, 500))
),
**fit_params,
):
if not training and epoch not in save_epochs:
return
_single_epoch(self, dataset, training, epoch, **fit_params)
def get_checkpoint(chckpnt_dirname, monitor="valid_loss_best", **kwargs):
"""Return the correct checkpoint.
Parameters
----------
chckpnt_dirname : str
monitor : {"valid_loss_best", "valid_acc_best", "train_loss_best", "last"} or list of int or int
"*_best" saves the model with the best *. "last" saves the last model.
If list of int saves at each of these epochs. If int saves at a specific epoch (useful for
loading).
"""
if monitor == "last":
return Checkpoint(
dirname=chckpnt_dirname, monitor=None, fn_prefix="last_epoch_", **kwargs
)
elif isinstance(monitor, str):
return Checkpoint(
dirname=chckpnt_dirname, monitor=monitor, fn_prefix="best_", **kwargs
)
else:
def _monitor_epoch(net):
epoch = net.history[-1]["epoch"]
return epoch in monitor
if isinstance(monitor, int):
f_params = f"params_epoch{monitor}.pt"
f_optimizer = f"optimizer_epoch{monitor}.pt"
f_criterion = f"criterion_epoch{monitor}.pt"
monitor = [monitor]
else:
f_params = "params_epoch{last_epoch[epoch]}.pt"
f_optimizer = "optimizer_epoch{last_epoch[epoch]}.pt"
f_criterion = "criterion_epoch{last_epoch[epoch]}.pt"
return Checkpoint(
dirname=chckpnt_dirname,
f_params=f_params,
f_optimizer=f_optimizer,
f_criterion=f_criterion,
monitor=_monitor_epoch,
**kwargs,
)
def count_parameters(model):
"""Count the number of parameters in a model."""
return sum([p.numel() for p in model.parameters()])
def count_prune_parameters(model):
"""Count the number of parameters that were pruned out."""
return sum(torch.nn.utils.parameters_to_vector(model.buffers()) == 0)
def merge_dicts(*dicts):
"""Merge multiple dictionaries. If key is repeated, first appearance will be used."""
return dict(ChainMap(*dicts))
def rm_const_col(df):
"""Remove constant columns in a dataframe"""
# nan need specific dropping
df = df.dropna(axis=1, how="all")
return df.loc[:, (df != df.iloc[0]).any()].copy()
def update_prepending(to_update, new):
"""Update a dictionary with another. the difference with .update, is that it puts the new keys
before the old ones (prepending)."""
# makes sure don't update arguments
to_update = to_update.copy()
new = new.copy()
# updated with the new values appended
to_update.update(new)
# remove all the new values => just updated old values
to_update = {k: v for k, v in to_update.items() if k not in new}
# keep only values that ought to be prepended
new = {k: v for k, v in new.items() if k not in to_update}
# update the new dict with old one => new values are at the begining (prepended)
new.update(to_update)
return new
def aggregate_table(table, aggregate=["mean", "std"], match_to_agg=SFFX_TOAGG):
"""Aggregates all the results in all columns containing `match_to_agg`."""
table = table.copy()
toaggreg = [c for c in table.columns if "_toagg" in c]
#! Hacky way of dealing with NaN in groupby while waiting for https://github.com/pandas-dev/pandas/pull/30584
hacky_nan = -7878 # has to be something that will not appear anywhere else
groupby_idx = [c for c in table.columns if c not in (["run"] + toaggreg)]
table[groupby_idx] = table[groupby_idx].fillna(hacky_nan)
table = table.groupby(
[c for c in table.columns if c not in (["run"] + toaggreg)]
).agg(
merge_dicts({k: aggregate for k in toaggreg}, {"run": "count"})
) # make sure that add counts
table.columns = [
"_".join(col).strip().replace("_toagg", "") for col in table.columns.values
]
table = table.reset_index()
#! Reset the nan (careful when replacing due to float precision)
numeric_col = table.select_dtypes(np.number).columns
table[numeric_col] = table[numeric_col].mask(
np.isclose(table[numeric_col].values, hacky_nan)
)
# in case replacement was in object col
table = table.mask(table == hacky_nan)
return table
def append_sffx(l, sffx):
"""Append a suffix to a list of strings."""
return [el + sffx for el in l]
def save_pattern(folders, pattern, filename, formatting={}, logger=None):
"""Save the pattern (formatted) to file. If `formatting` is not empty then append."""
for i, folder in enumerate(folders):
file = os.path.join(folder, filename)
with open(file, "w" if len(formatting) == 0 else "a") as f:
if len(formatting) > 0:
pattern = pattern.format(**formatting)
f.write(pattern + "\n")
if logger is not None and i == 0:
logger.info(f"Saving {pattern} to {file.split('/')[-1]}")
def get_exponential_decay_gamma(scheduling_factor, max_epochs):
"""Return the exponential learning rate factor gamma.
Parameters
----------
scheduling_factor :
By how much to reduce learning rate during training.
max_epochs : int
Maximum number of epochs.
"""
return (1 / scheduling_factor) ** (1 / max_epochs)
def replace_None_with_all(df, column):
"""Duplicate rows with `None` in `columns` to have one for all unique values.
Parameters
----------
df : pd.DataFrame
Dataframe from which to replace the values.
column : str
Name of the column in which to search for None.
Return
------
df : pd.DataFrame
Dataframe with the replicated rows.
Examples
--------
>>> df = pd.DataFrame([["model1",2,0.01],["model1",3,0.1],["model2",5,None]], columns=["Model","N Layers","Col"])
>>> df
Model N Layers Col
0 model1 2 0.01
1 model1 3 0.10
2 model2 5 NaN
>>> replace_None_with_all(df, "Col")
Model N Layers Col
0 model1 2 0.01
1 model1 3 0.10
2 model2 5 0.01
3 model2 5 0.10
"""
to_replicate = df[df[column].isin([None])].copy()
df = df[~df[column].isin([None])]
replicated = []
for val in df[column].unique():
to_replicate[column] = val
replicated.append(to_replicate.copy())
df = pd.concat([df, *replicated], ignore_index=True)
return df
def dict_none_toNaN(d):
return {k: v if v is not None else float("nan") for k, v in d.items()}
class SetLR(torch.optim.lr_scheduler._LRScheduler):
"""Set the learning rate of each parameter group.
When last_epoch=-1, sets initial lr as lr.
Args:
optimizer (Optimizer): Wrapped optimizer.
lr_lambda (function or list): A function which computes the new lr
given an integer parameter epoch, the current lr and the base lr,
or a list of such functions, one for each group in optimizer.param_groups.
last_epoch (int): The index of last epoch. Default: -1.
Example:
>>> # Assuming optimizer has two groups.
>>> lmbda = lambda epoch, cur_lr, base_lr: 0.95
>>> scheduler = SetLR(optimizer, lr_lambda=lmbda)
>>> for epoch in range(100):
>>> train(...)
>>> validate(...)
>>> scheduler.step()
"""
def __init__(self, optimizer, lr_lambda, last_epoch=-1):
self.optimizer = optimizer
if not isinstance(lr_lambda, list) and not isinstance(lr_lambda, tuple):
self.lr_lambdas = [lr_lambda] * len(optimizer.param_groups)
else:
if len(lr_lambda) != len(optimizer.param_groups):
raise ValueError(
"Expected {} lr_lambdas, but got {}".format(
len(optimizer.param_groups), len(lr_lambda)
)
)
self.lr_lambdas = list(lr_lambda)
self.last_epoch = last_epoch
super().__init__(optimizer, last_epoch)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
The learning rate lambda functions will only be saved if they are callable objects
and not if they are functions or lambdas.
"""
state_dict = {
key: value
for key, value in self.__dict__.items()
if key not in ("optimizer", "lr_lambdas")
}
state_dict["lr_lambdas"] = [None] * len(self.lr_lambdas)
for idx, fn in enumerate(self.lr_lambdas):
if not isinstance(fn, types.FunctionType):
state_dict["lr_lambdas"][idx] = fn.__dict__.copy()
return state_dict
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
lr_lambdas = state_dict.pop("lr_lambdas")
self.__dict__.update(state_dict)
for idx, fn in enumerate(lr_lambdas):
if fn is not None:
self.lr_lambdas[idx].__dict__.update(fn)
def get_lr(self):
if self.last_epoch > 0:
return [
lmbda(self.last_epoch, group["lr"], base_lr)
for base_lr, lmbda, group in zip(
self.base_lrs, self.lr_lambdas, self.optimizer.param_groups
)
]
else:
return [base_lr for base_lr in self.base_lrs]
|
decodable_information_bottleneck-main
|
utils/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import logging
import sys
import warnings
from collections import defaultdict
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import LinearSegmentedColormap, ListedColormap
from dib.utils.helpers import tmp_seed
__all__ = ["plot_2D_decision_boundary"]
logger = logging.getLogger(__name__)
def make_cmap(cmap_dflt, alpha=1):
if isinstance(cmap_dflt, list):
colors = cmap_dflt
else:
colors = cmap_dflt(np.linspace(0, 1, 256), alpha=alpha)
cm = LinearSegmentedColormap.from_list("colormap", colors)
cm.set_under(alpha=0)
cm.set_over(alpha=0)
return cm
def get_sequential_colors(n):
"""
Return a list of n sequential color maps, the extreme color associated
with it (or similar color) and a bright similar color.
"""
assert n <= 10
# for binary classification same as using plt.cm.RdBu
cmaps = [
make_cmap(plt.cm.Blues),
make_cmap(plt.cm.Reds),
make_cmap(plt.cm.Greens),
make_cmap(plt.cm.Purples),
make_cmap(["white", "xkcd:dark grey"]),
make_cmap(plt.cm.Oranges),
make_cmap(["white", "xkcd:olive"]),
make_cmap(["white", "xkcd:brown"]),
make_cmap(["white", "xkcd:dark turquoise"]),
make_cmap(["white", "xkcd:bordeaux"]),
]
extreme_colors = [
"xkcd:darkish blue",
"xkcd:darkish red",
"xkcd:darkish green",
"xkcd:indigo",
"xkcd:dark grey",
"xkcd:dark orange",
"xkcd:olive",
"xkcd:brown",
"xkcd:dark turquoise",
"xkcd:bordeaux",
]
bright_colors = [
"xkcd:bright blue",
"xkcd:bright red",
"xkcd:green",
"xkcd:bright purple",
"k",
"xkcd:bright orange",
"xkcd:bright olive",
"xkcd:golden brown",
"xkcd:bright turquoise",
"xkcd:purple red",
]
return cmaps[:n], extreme_colors[:n], bright_colors[:n]
def plot_2D_decision_boundary(
X,
y,
model,
title=None,
ax=None,
n_mesh=50,
is_only_wrong=False,
is_force_no_proba=False,
n_max_scatter=100,
scatter_unlabelled_kwargs={
"c": "whitesmoke",
"alpha": 0.4,
"linewidths": 0.5,
"s": 10,
"marker": "o",
},
scatter_labelled_kwargs={"linewidths": 0.7,
"s": 50, "marker": "o", "alpha": 0.7, },
seed=123,
delta=0.5,
test=None,
):
"""Plot the 2D decision boundaries of a sklearn classification model.
Parameters
----------
X: array-like
2D input data
y: array-like
Labels, with `-1` for unlabeled points. Currently works with max 10 classes.
model: sklearn.BaseEstimator
Trained model. If `None` plot the dataset only.
title: str, optional
Title to add.
ax: matplotlib.axes, optional
Axis on which to plot.
n_mesh: int, optional
Number of points in each axes of the mesh. Increase to increase the quality.
50 is a good valuen for nice quality, 10 is faster but still ok.
is_only_wrong : bool, optional
Whether to plot only the wrong data points for simplicity.
is_force_no_proba : bool, optional
Whether not to plot probabilistic decision boundaries even if could.
n_max_scatter : int, optional
Maximum number of points to plot.
seed : int, optional
Pseudorandom seed. E.g. for selecting which points to plot.
delta : float, optional
How much space to add on the side of each points.
test : tuple of array like, optional
(X_test, y_train). If given will plot some test datapoints. Will also plot n max scatter
of them. Still in dev.
"""
X = np.array(X)
y = np.array(y)
if ax is None:
F, ax = plt.subplots(1, 1, figsize=(7, 7))
x_min, x_max = X[:, 0].min(), X[:, 0].max()
y_min, y_max = X[:, 1].min(), X[:, 1].max()
if test is not None:
X_test = np.array(test[0])
y_test = np.array(test[1])
x_min = min(x_min, X_test[:, 0].min())
x_max = min(x_max, X_test[:, 0].max())
y_min = min(y_min, X_test[:, 1].min())
y_max = min(y_max, X_test[:, 1].max())
x_min, x_max = x_min - delta, x_max + delta
y_min, y_max = y_min - delta, y_max + delta
xx, yy = np.meshgrid(
np.linspace(x_min, x_max, num=n_mesh), np.linspace(
y_min, y_max, num=n_mesh)
)
cmaps, extreme_colors, bright_colors = get_sequential_colors(max(y) + 1)
if model is not None:
if is_force_no_proba or not hasattr(model, "predict_proba"):
y_hat = model.predict(
np.c_[xx.ravel(), yy.ravel()].astype("float32"))
contourf_kwargs = dict(alpha=1, antialiased=True)
cmaps = [ListedColormap(extreme_colors)]
y_hat = y_hat.reshape(xx.shape)
# contourf does not work well without proba
plt.pcolormesh(
xx, yy, y_hat, cmap=ListedColormap(extreme_colors), **contourf_kwargs
)
else:
y_hat = model.predict_proba(
np.c_[xx.ravel(), yy.ravel()].astype("float32"))
y_hat = y_hat.reshape(xx.shape + (-1,))
y_argmax = y_hat.argmax(-1)
vmin = y_hat.max(-1).min()
contourf_kwargs = dict(
vmin=vmin,
vmax=1,
extend="neither",
levels=y_hat.shape[-1],
antialiased=True,
)
for i in range(y_hat.shape[-1]):
mask_plot = y_argmax == i
y_i = y_hat[:, :, i]
y_i[~mask_plot] = np.nan # don't plot if not predicted
with warnings.catch_warnings():
# warnings because of nan
warnings.simplefilter("ignore")
ax.contourf(
xx, yy, y_hat[:, :, i], cmap=cmaps[i], **contourf_kwargs
)
args_scatter = [
n_max_scatter,
model,
is_only_wrong,
seed,
scatter_unlabelled_kwargs,
scatter_labelled_kwargs,
bright_colors,
]
ax = plot_scatter_data(X, y, ax, *args_scatter)
if test is not None:
scatter_labelled_kwargs["marker"] = "*"
ax = plot_scatter_data(test[0], test[1], ax, *args_scatter,)
ax.set_xlim(xx.min(), xx.max())
ax.set_ylim(yy.min(), yy.max())
ax.set_xticks(())
ax.set_yticks(())
if title is not None:
ax.set_title(title)
return ax
def plot_scatter_data(
X,
y,
ax,
n_max_scatter,
model,
is_only_wrong,
seed,
scatter_unlabelled_kwargs,
scatter_labelled_kwargs,
bright_colors,
):
if is_only_wrong:
y_pred = model.predict(X.astype("float32"))
wrong_pred = y_pred != np.array(y)
# randomly select n_max_scatter
mask_select = np.zeros_like(y).astype(bool)
mask_select[:n_max_scatter] = True
with tmp_seed(seed):
np.random.shuffle(mask_select)
for i in np.unique(y):
idx = y == i
if is_only_wrong:
idx = np.logical_and(idx, wrong_pred)
idx = np.logical_and(idx, mask_select)
if i == -1:
scatter_kwargs = scatter_unlabelled_kwargs
else:
scatter_kwargs = scatter_labelled_kwargs
scatter_kwargs["c"] = bright_colors[i]
ax.scatter(X[idx, 0], X[idx, 1], edgecolors="k", **scatter_kwargs)
return ax
|
decodable_information_bottleneck-main
|
utils/visualize/visualize_clf.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
from .visualize_clf import *
from .visualize_imgs import *
|
decodable_information_bottleneck-main
|
utils/visualize/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import numpy as np
# example : https://github.com/matplotlib/matplotlib/issues/7008
def kwargs_log_xscale(x_data, mode="equidistant", base=None):
"""Return arguments to set log_scale as one would wish. mode=["smooth","equidistant"]."""
# if constant diff don't use logscale
if base == 1 or np.diff(x_data).var() == 0:
return dict(value="linear")
# automatically compute base
if base is None:
# take avg multiplier between each consecutive elements as base i.e 2,8,32 would be 4
# but 0.1,1,10 would be 10
base = int((x_data[x_data > 0][1:] /
x_data[x_data > 0][:-1]).mean().round())
if (x_data <= 0).any():
min_nnz_x = np.abs(x_data[x_data != 0]).min()
if mode == "smooth":
linscalex = np.log(np.e) / np.log(base) * (1 - (1 / base))
elif mode == "equidistant":
linscalex = 1 - (1 / base)
else:
raise ValueError(f"Unkown mode={mode}")
return dict(
value="symlog",
linthreshx=min_nnz_x,
basex=base,
subsx=list(range(base)),
linscalex=linscalex,
)
else:
return dict(value="log", basex=base, subsx=list(range(base)))
|
decodable_information_bottleneck-main
|
utils/visualize/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import random
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
import numpy as np
import seaborn as sns
import torch
from skorch.dataset import unpack_data
from torchvision.utils import make_grid
from dib.utils.helpers import prod, set_seed
__all__ = ["plot_dataset_samples_imgs"]
DFLT_FIGSIZE = (17, 9)
def remove_axis(ax, is_rm_ticks=True, is_rm_spines=True):
"""Remove all axis but not the labels."""
if is_rm_spines:
ax.spines["right"].set_visible(False)
ax.spines["top"].set_visible(False)
ax.spines["bottom"].set_visible(False)
ax.spines["left"].set_visible(False)
if is_rm_ticks:
ax.tick_params(bottom="off", left="off")
def plot_dataset_samples_imgs(
dataset, n_plots=4, figsize=DFLT_FIGSIZE, ax=None, pad_value=1, seed=123, title=None
):
"""Plot `n_samples` samples of the a datset."""
set_seed(seed)
if ax is None:
fig, ax = plt.subplots(figsize=figsize)
img_tensor = torch.stack(
[dataset[random.randint(0, len(dataset) - 1)][0] for i in range(n_plots)], dim=0
)
grid = make_grid(img_tensor, nrow=2, pad_value=pad_value)
ax.imshow(grid.permute(1, 2, 0).numpy())
ax.axis("off")
if title is not None:
ax.set_title(title, fontsize=18)
|
decodable_information_bottleneck-main
|
utils/visualize/visualize_imgs.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import glob
import logging
import os
import numpy as np
import torch
from PIL import Image
from torch.utils.data import Dataset
from torchvision import datasets, transforms
from dib.utils.helpers import to_numpy
from .base import BaseDataset
from .helpers import bw_to_color, get_masks_drop_features, overlay_save_datasets
COLOUR_BLACK = torch.tensor([0.0, 0.0, 0.0])
COLOUR_WHITE = torch.tensor([1.0, 1.0, 1.0])
COLOUR_BLUE = torch.tensor([0.0, 0.0, 1.0])
DATASETS_DICT = {
"mnist": "MNIST",
"cifar10": "CIFAR10",
"cifar100": "CIFAR100",
"bincifar100": "BinaryCIFAR100",
"binsvhn": "BinarySVHN",
"binmnist": "BinaryMNIST",
"coloredmnist": "ColoredMNIST",
"svhn": "SVHN",
"bincifar10mnist": "BinCifar10Mnist",
"cifar10mnist": "Cifar10Mnist",
"cifar10mnistshift": "Cifar10MnistShift",
"bincifar10mnistdep9": "BinCifar10MnistDep9",
"bincifar10mnistdep8": "BinCifar10MnistDep8",
"bincifar10mnistdep7": "BinCifar10MnistDep7",
"bincifar10mnistdep5": "BinCifar10MnistDep5",
"bincifar10mnistdep3": "BinCifar10MnistDep3",
"cifar10mnistdep9": "Cifar10MnistDep9",
"cifar10mnistdep8": "Cifar10MnistDep8",
"cifar10mnistdep7": "Cifar10MnistDep7",
"cifar10mnistdep5": "Cifar10MnistDep5",
"cifar10mnistdep3": "Cifar10MnistDep3",
}
DATASETS = list(DATASETS_DICT.keys())
logger = logging.getLogger(__name__)
# HELPERS
def get_Dataset(dataset):
"""Return the correct uninstantiated datasets."""
dataset = dataset.lower()
try:
# eval because stores name as string in order to put it at top of file
return eval(DATASETS_DICT[dataset])
except KeyError:
raise ValueError("Unkown dataset: {}".format(dataset))
def get_img_size(dataset):
"""Return the correct image size."""
return get_Dataset(dataset).shape
class ImgDataset(BaseDataset):
"""Image dataset wrapper that adds nice functionalitites.
Parameters
----------
is_augment : bool, optional
Whether to transform the training set (and thus validation).
split : {'train', 'test', ...}, optional
According dataset is selected.
translation : int or sequence of int, optional
Max translation of the image translate the images when using data augmentation.
is_flip : int, optional
Whether to apply horizontal flipping. Only applied if not a number
rotation : float or sequence of float, optional
Range of degrees to select from.
is_normalize : bool, optional
Whether to normalize the dataset.
target_transform : callable, optional
Transformation of the target.
"""
is_numbers = False
def __init__(
self,
*args,
is_augment=True,
split="train",
translation=4,
is_flip=True,
rotation=15,
is_normalize=True,
target_transform=None,
**kwargs,
):
super().__init__(*args, **kwargs)
self.is_augment = is_augment
self.split = split
self.is_drop_features = False # by default return all features
self.translation = translation
self.is_flip = is_flip and not self.is_numbers
self.rotation = rotation
self.is_normalize = is_normalize
self.target_transform = target_transform
if self.is_augment and self.split == "train":
self.transform = transforms.Compose(self.get_train_transforms())
else:
self.transform = transforms.Compose(self.get_test_transforms())
def get_train_transforms(self):
"""Return the training transformation."""
return [
transforms.Resize((self.shape[1], self.shape[2])),
# the following performs translation
transforms.RandomCrop(
(self.shape[1], self.shape[2]), padding=self.translation
),
# don't flip if working with numbers
transforms.RandomHorizontalFlip() if self.is_flip else torch.nn.Identity(),
transforms.RandomRotation(self.rotation),
transforms.ToTensor(),
transforms.Normalize(self.mean, self.std)
if self.is_normalize
else torch.nn.Identity(),
]
def get_test_transforms(self):
"""Return the testing transformation."""
return [
transforms.Resize((self.shape[1], self.shape[2])),
transforms.ToTensor(),
transforms.Normalize(self.mean, self.std)
if self.is_normalize
else torch.nn.Identity(),
]
def rm_transformations(self):
"""Completely remove transformation. Used to plot or compute mean and variance."""
self.transform = transforms.Compose([transforms.ToTensor()])
def make_test(self):
"""Make the data a test set."""
self.transform = transforms.Compose(self.get_test_transforms())
def drop_features_(self, drop_size):
"""Drop part of the features (pixels in images).
Note
----
- this function actually just precomputes the `self.to_drop` of values that should be droped
the dropping is in `__get_item__`.
Parameters
----------
drop_size : float or int or tuple, optional
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to
drop. If int, represents the number of datapoints to drop. If tuple, same as before
but give bounds (min and max). 0 means keep all.
"""
self.logger.info(f"drop_features_ {drop_size} features...")
assert not self.is_drop_features, "cannot drop multiple times the features"
self.is_drop_features = True
self.to_drop = get_masks_drop_features(
drop_size, [self.shape[1], self.shape[2]], len(self), seed=self.seed
)
def __getitem__(self, index):
if self.targets.ndim > 1:
multi_target = self.targets
# often datasets have code that can only deal with a single target
self.targets = multi_target[:, 0]
X, target = super().__getitem__(index)
self.targets = multi_target # set back multi targets
multi_target = (target,) + tuple(self.targets[index, 1:])
else:
X, target = super().__getitem__(index)
multi_target = (target,)
multi_target = self.add_index(multi_target, index)
if self.is_drop_features:
X[:, self.to_drop[index]] = float("nan")
return X, multi_target
# TORCHVISION DATASETS
class SVHN(ImgDataset, datasets.SVHN):
"""SVHN wrapper. Docs: `datasets.SVHN.`
Parameters
----------
kwargs:
Additional arguments to `ImgDataset`.
Examples
--------
>>> data = SVHN(split="train") #doctest:+ELLIPSIS
Using ...
>>> len(data)
73257
>>> len(data) == len(data.data) == len(data.targets)
True
>>> [type(i) for i in data[0]]
[<class 'torch.Tensor'>, <class 'int'>]
>>> from .helpers import get_mean_std
>>> mean, std = get_mean_std(data)
>>> (str(list(mean)) == str(data.mean)) and (str(list(std)) == str(data.std))
True
>>> train, valid = data.train_test_split(size=1000, is_stratify=True)
>>> len(valid)
1000
>>> data.drop_labels_(0.9)
>>> round(len([t for t in data.targets if t == -1]) / len(data), 1)
0.9
>>> data.balance_labels_()
>>> len(data)
131864
>>> data.drop_unlabelled_()
>>> len(data)
65932
>>> data.drop_features_(0.7)
>>> round((torch.isnan(data[0][0])).float().mean().item(), 1)
0.7
>>> data.set_test_transforms() # for replicability
>>> data[0][0][0] # showing image for one channel
tensor([[ nan, nan, 0.2850, ..., nan, nan, nan],
[ nan, nan, nan, ..., nan, nan, nan],
[ nan, 0.2652, nan, ..., nan, nan, nan],
...,
[ 0.1067, nan, 0.1860, ..., -0.4477, nan, nan],
[ nan, nan, nan, ..., nan, nan, nan],
[ nan, nan, nan, ..., nan, 0.1067, nan]])
>>> data[0][1]
1
>>> data.randomize_targets_()
>>> data[0][1]
8
"""
shape = (3, 32, 32)
missing_px_color = COLOUR_BLACK
n_classes = 10
n_train = 73257
mean = [0.43768448, 0.4437684, 0.4728041]
std = [0.19803017, 0.20101567, 0.19703583]
is_numbers = True
def __init__(self, **kwargs):
ImgDataset.__init__(self, **kwargs)
datasets.SVHN.__init__(
self,
self.root,
download=True,
split=self.split,
transform=self.transform,
target_transform=self.target_transform,
)
self.labels = to_numpy(self.labels)
if self.is_random_targets:
self.randomize_targets_()
@property
def targets(self):
# make compatible with CIFAR10 dataset
return self.labels
@targets.setter
def targets(self, values):
self.labels = values
class CIFAR10(ImgDataset, datasets.CIFAR10):
"""CIFAR10 wrapper. Docs: `datasets.CIFAR10.`
Parameters
----------
kwargs:
Additional arguments to `datasets.CIFAR10` and `ImgDataset`.
Examples
--------
See SVHN for more examples.
>>> data = CIFAR10(split="train") #doctest:+ELLIPSIS
Files ...
>>> from .helpers import get_mean_std
>>> mean, std = get_mean_std(data)
>>> list(std)
[0.24703279, 0.24348423, 0.26158753]
>>> (str(list(mean)) == str(data.mean)) and (str(list(std)) == str(data.std))
True
"""
shape = (3, 32, 32)
n_classes = 10
missing_px_color = COLOUR_BLACK
n_train = 50000
mean = [0.4914009, 0.48215896, 0.4465308]
std = [0.24703279, 0.24348423, 0.26158753]
def __init__(self, **kwargs):
ImgDataset.__init__(self, **kwargs)
datasets.CIFAR10.__init__(
self,
self.root,
download=True,
train=self.split == "train",
transform=self.transform,
target_transform=self.target_transform,
)
self.targets = to_numpy(self.targets)
if self.is_random_targets:
self.randomize_targets_()
class CIFAR100(ImgDataset, datasets.CIFAR100):
"""CIFAR100 wrapper. Docs: `datasets.CIFAR100.`
Parameters
----------
root : str, optional
Path to the dataset root. If `None` uses the default one.
split : {'train', 'test'}, optional
According dataset is selected.
kwargs:
Additional arguments to `datasets.CIFAR100` and `ImgDataset`.
Examples
--------
See SVHN for more examples.
>>> data = CIFAR100(split="train") #doctest:+ELLIPSIS
Files ...
>>> from .helpers import get_mean_std
>>> mean, std = get_mean_std(data)
>>> (str(list(mean)) == str(data.mean)) and (str(list(std)) == str(data.std))
True
"""
shape = (3, 32, 32)
n_classes = 100
n_train = 50000
missing_px_color = COLOUR_BLACK
mean = [0.5070754, 0.48655024, 0.44091907]
std = [0.26733398, 0.25643876, 0.2761503]
def __init__(self, **kwargs):
ImgDataset.__init__(self, **kwargs)
datasets.CIFAR100.__init__(
self,
self.root,
download=True,
train=self.split == "train",
transform=self.transform,
target_transform=self.target_transform,
)
self.targets = to_numpy(self.targets)
if self.is_random_targets:
self.randomize_targets_()
class MNIST(ImgDataset, datasets.MNIST):
"""MNIST wrapper. Docs: `datasets.MNIST.`
Parameters
----------
root : str, optional
Path to the dataset root. If `None` uses the default one.
split : {'train', 'test', "extra"}, optional
According dataset is selected.
kwargs:
Additional arguments to `datasets.MNIST` and `ImgDataset`.
Examples
--------
See SVHN for more examples.
>>> data = MNIST(split="train")
>>> from .helpers import get_mean_std
>>> mean, std = get_mean_std(data)
>>> (str(list(mean)) == str(data.mean)) and (str(list(std)) == str(data.std))
True
"""
shape = (1, 32, 32)
n_classes = 10
n_examples = 60000
missing_px_color = COLOUR_BLUE
mean = [0.13066062]
std = [0.30810776]
is_numbers = True
def __init__(self, **kwargs):
ImgDataset.__init__(self, **kwargs)
datasets.MNIST.__init__(
self,
self.root,
download=True,
train=self.split == "train",
transform=self.transform,
target_transform=self.target_transform,
)
self.targets = to_numpy(self.targets)
if self.is_random_targets:
self.randomize_targets_()
def append_(self, other):
"""Append a dataset to the current one."""
# mnist data is in torch format
self.data = torch.cat([self.data, other.data], dim=0)
self.targets = np.append(self.targets, other.targets, axis=0)
class ColoredMNIST(MNIST):
"""Colored binary MNIST where the color is a noisy predictor of the label. Binary label is larger
or smaller than 10.
Parameters
----------
root : str, optional
Path to the dataset root. If `None` uses the default one.
split : {'train', 'test', "extra"}, optional
According dataset is selected.
noise : float, optional
Probability of being wrong when only considering the color.
is_fixed : bool, optional
Whether to resample colors at every epoch though the noise.
is_wrong_test : bool, optional
Whether to use a test set where color is useless (wrong 100 %) of the time.
kwargs:
Additional arguments to `datasets.MNIST` and `ImgDataset`.
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> n = 2000
>>> data = ColoredMNIST(split="train", noise=0.2)
>>> df = pd.DataFrame([(data[i][1][0], int(data[i][0][0].sum() == 0)) for i in range(n)], columns = ["lab","col"])
>>> out = df.groupby(["lab","col"]).size().values / n
>>> (np.array([0.37,0.07,0.07,0.37]) < out).all() and (out < np.array([0.43,0.13,0.13,0.43])).all()
True
>>> data = ColoredMNIST(split="test", noise=0.2, is_noisy_test=True)
>>> df = pd.DataFrame([(data[i][1][0], int(data[i][0][0].sum() == 0)) for i in range(n)], columns = ["lab","col"])
>>> out = df.groupby(["lab","col"]).size().values / n
>>> (np.array([0.00,0.47,0.47,0.00]) <= out).all() and (out < np.array([0.08,0.48,0.48,0.08])).all()
True
"""
shape = (3, 32, 32)
n_classes = 2
n_examples = 60000
def __init__(self, noise=0.1, is_fixed=True, is_noisy_test=True, **kwargs):
super().__init__(**kwargs)
self.targets = (self.targets < 5).astype(int)
self.is_fixed = is_fixed
self.noise = noise
self.is_noisy_test = is_noisy_test
if self.is_noisy_test and self.split == "test":
self.noise = 1
@property
def raw_folder(self):
# use mnist data
return os.path.join(self.root, "MNIST", "raw")
@property
def processed_folder(self):
# use mnist data
return os.path.join(self.root, "MNIST", "processed")
def __getitem__(self, index):
X, multi_target = super().__getitem__(index)
# by using the index for the seed ensures that same across epochs
seed = index if self.is_fixed else None
if multi_target[0] == 0:
X = bw_to_color(
X, rgb_proba=[1 - self.noise, self.noise, 0], seed=seed)
else:
X = bw_to_color(
X, rgb_proba=[self.noise, 1 - self.noise, 0], seed=seed)
return X, multi_target
# OTHER DATASETS
class BinaryCIFAR100(CIFAR100):
"""Like CIFAR100 but 2 class (odd or even)."""
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinarySVHN(SVHN):
"""Like SVHN but 2 class (odd or even)."""
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinaryMNIST(MNIST):
"""Like MNIST but 2 class (odd or even)."""
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
@property
def raw_folder(self):
# use mnist data
return os.path.join(self.root, "MNIST", "raw")
@property
def processed_folder(self):
# use mnist data
return os.path.join(self.root, "MNIST", "processed")
class OverlayedDatasetBase(ImgDataset, Dataset):
"""Overlays 2 other datasets.
Note
----
- Randomization of targets / stratification / ... Will still be done on the actual
targets instead of the distractor.
- only overlaying train and test and not other splits.
- The second value of the target will be the distractor. Index is always last.
- This a base class that should be inherited from. The only required changed are
adding class generic attributed such as Overlayed, Background, shift.
See Cifar10Mnist for an example.
Parameters.
----------
kwargs :
Additional arguments to ImgDataset.
Attributes
----------
Background : ImgDataset
Dataset to use as background.
Overlayed : ImgDataset
Dataset to overlay on the background. Currently the following assumptions are made:
- the overlaid images have to be at most as big as the background ones (i.e.
`height_bckgrnd <= height_bckgrnd` and `<= width_bckgrnd`).
- The overlayed images are also used as mask. This is especially good for black
and white images : whiter pixels (~1) are the ones to be overlayed. In the case
of colored image, this still hold but channel wise.
dependence : dict of float, optional
Whether to overlay in a way where there is dependencies between the background and the overlayed data.
Dictionary because can be different for each split.
"""
add_dist = True # whether to add the distractor to the target
named_dir = None
# Whether to randomly shift all overlayed images or to keep them on the bottom right.
is_shift = False
dependence = dict(train=0, test=0)
def __init__(self, **kwargs):
ImgDataset.__init__(self, **kwargs)
name = self.named_dir if self.named_dir is not None else type(
self).__name__
self.dir = os.path.join(self.root, name)
if not os.path.isdir(self.dir):
self.make_dataset()
self.data = np.load(os.path.join(self.dir, f"{self.split}_x.npy"))
self.targets = np.load(os.path.join(self.dir, f"{self.split}_y.npy"))
self.distractor = np.load(
os.path.join(self.dir, f"{self.split}_y_distractor.npy")
)
if self.is_random_targets:
self.randomize_targets_() # doesn't randomize distractors
def __len__(self):
return len(self.targets)
@property
def map_target_position(self):
"""
Return a dictionary that maps the type of target (e.g. "index") to its position in the
outputted target.
"""
map_target_position = super().map_target_position
if self.add_dist:
map_target_position["distractor"] = len(map_target_position)
return map_target_position
def add_distractor(self, y, index):
"""Append the distractor to the targets."""
if self.add_dist:
try:
y = tuple(y) + (self.distractor[index],)
except TypeError:
y = [y, self.distractor[index]]
return y
def _switch_distractor_target(self):
"""Switch the distractor and the target."""
self.targets, self.distractor = self.distractor, self.targets
def keep_indcs_(self, indcs):
super().keep_indcs_(indcs)
self.distractor = self.distractor[indcs]
def __getitem__(self, index):
img = self.data[index]
target = self.targets[index]
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
# make sure it's a tuple
try:
target = tuple(target)
except TypeError:
target = (target,)
target = self.add_index(target, index)
target = self.add_distractor(target, index)
if self.is_drop_features:
img[:, self.to_drop[index]] = float("nan")
return img, target
def make_dataset(self):
logger.info("Overlaying the datasets...")
overlay_train = self.Overlayed()
bckgrnd_train = self.Background()
overlay_train.rm_transformations()
bckgrnd_train.rm_transformations()
overlay_test = self.Overlayed(split="test")
bckgrnd_test = self.Background(split="test")
overlay_test.rm_transformations()
bckgrnd_test.rm_transformations()
bckgrnd_datasets = (
[bckgrnd_train.data, bckgrnd_train.targets],
[bckgrnd_test.data, bckgrnd_test.targets],
)
overlay_datasets = (
[overlay_train.data, overlay_train.targets],
[overlay_test.data, overlay_test.targets],
)
overlay_save_datasets(
bckgrnd_datasets,
overlay_datasets,
folder=self.dir,
is_shift=self.is_shift,
dependence=self.dependence,
)
def append_(self, other):
super().append_(other)
self.distractor = np.append(self.distractor, other.distractor, axis=0)
class Cifar10Mnist(OverlayedDatasetBase):
shape = (3, 32, 32)
n_classes = 10
missing_px_color = COLOUR_BLACK
n_train = 50000
mean = [0.52897614, 0.5220055, 0.49050677]
std = [0.2650898, 0.263235, 0.28332546]
Overlayed = MNIST
Background = CIFAR10
is_shift = False
class Cifar10MnistShift(Cifar10Mnist):
is_shift = True
class BinCifar10Mnist(Cifar10Mnist):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10Mnist" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
# Correlated overlayed
class Cifar10MnistDep9(Cifar10Mnist):
"""Cifar10mnist where the mnist is correlated with cifar10."""
dependence = dict(train=0.9, test=0)
class Cifar10MnistDep8(Cifar10Mnist):
"""Cifar10mnist where the mnist is correlated with cifar10."""
dependence = dict(train=0.8, test=0)
class Cifar10MnistDep7(Cifar10Mnist):
"""Cifar10mnist where the mnist is correlated with cifar10."""
dependence = dict(train=0.7, test=0)
class Cifar10MnistDep5(Cifar10Mnist):
"""Cifar10mnist where the mnist is correlated with cifar10."""
dependence = dict(train=0.5, test=0)
class Cifar10MnistDep3(Cifar10Mnist):
"""Cifar10mnist where the mnist is correlated with cifar10."""
dependence = dict(train=0.3, test=0)
class BinCifar10MnistDep9(Cifar10MnistDep9):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10MnistDep9" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinCifar10MnistDep8(Cifar10MnistDep8):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10MnistDep8" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinCifar10MnistDep7(Cifar10MnistDep7):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10MnistDep7" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinCifar10MnistDep5(Cifar10MnistDep5):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10MnistDep5" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
class BinCifar10MnistDep3(Cifar10MnistDep3):
"""Like Cifar10Mnist but 2 class (odd or even)."""
named_dir = "Cifar10MnistDep3" # same dataset as Cifar10Mnist => don't recreate
n_classes = 2
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.targets = self.targets % 2
|
decodable_information_bottleneck-main
|
utils/data/imgs.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
def get_train_dev_test_datasets(dataset, data_type, valid_size=0.1, **kwargs):
"""Return the correct instantiated train, validation, test dataset
Parameters
----------
dataset : str
Name of the dataset to load.
data_type : {"imgs"}
Type of dataset.
valid_size : float or int, optional
Size of the validation set. If float, should be between 0.0 and 1.0 and represent the
proportion of the dataset. If int, represents the absolute number of valid samples.
0 if no validation.
Returns
-------
datasets : dictionary of torch.utils.data.Dataset
Dictionary of the `"train"`, `"valid"`, and `"valid"`.
"""
datasets = dict()
if data_type == "imgs":
from .imgs import get_Dataset
Dataset = get_Dataset(dataset)
dataset = Dataset(split="train", **kwargs)
if valid_size != 0:
datasets["train"], datasets["valid"] = dataset.train_test_split(size=valid_size)
else:
datasets["train"], datasets["valid"] = dataset, None
datasets["test"] = Dataset(split="test", **kwargs)
return datasets
|
decodable_information_bottleneck-main
|
utils/data/__init__.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import os
import numpy as np
import torch
from dib.utils.datasplit import RandomMasker
from dib.utils.helpers import tmp_seed, to_numpy
def get_masks_drop_features(drop_size, mask_shape, n_masks, n_batch=32, seed=123):
"""
Parameters
----------
drop_size : float or int or tuple, optional
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to
drop. If int, represents the number of datapoints to drop. If tuple, same as before
but give bounds (min and max). 0 means keep all.
mask_shape : tuple of int or callable
Shape of the mask for one example. If callable, it is given the current index.
n_masks : int, optional
Number of masks to return.
n_batch : int, optional
Size of the batches of masks => number fo concsecutive examples with the same abount of
kept features.
seed : int, optional
Random seed.
Returns
-------
to_drops : list of torch.BoolTensor
List of length n_masks where each element is a boolean tensor of shape `mask_shape` with
1s where features should be droped.
Examples
--------
>>> get_masks_drop_features(0.5, (10,), 1, n_batch=1)
[tensor([ True, False, False, False, True, True, False, True, True, False])]
"""
try:
mask_shape(0)
except TypeError:
def mask_shape(_, ret=mask_shape):
return ret
if drop_size == 0:
return [torch.zeros(1, *mask_shape(i)).bool() for i in n_batch]
with tmp_seed(seed):
try:
droper = RandomMasker(
min_nnz=drop_size[0], max_nnz=drop_size[1], is_batch_share=False
)
except TypeError:
droper = RandomMasker(
min_nnz=drop_size, max_nnz=drop_size, is_batch_share=False
)
to_drops = []
for i in range(0, n_masks, n_batch):
to_drop = droper(n_batch, mask_shape(i))
to_drops.extend(torch.unbind(to_drop.bool(), dim=0))
return to_drops
def get_mean_std(dataset):
"""Return the mean and std of a datset.
Examples
--------
>>> from .imgs import get_Dataset
>>> import numpy as np
>>> cifar10 = get_Dataset("cifar10")(split="test")
Files already downloaded and verified
>>> get_mean_std(cifar10)
(array([0.49421427, 0.4851322 , 0.45040992], dtype=float32), array([0.24665268, 0.24289216, 0.2615922 ], dtype=float32))
"""
dataset.rm_transformations()
data = torch.stack([el[0] for el in dataset], dim=0)
return np.mean(data.numpy(), axis=(0, 2, 3)), np.std(data.numpy(), axis=(0, 2, 3))
def overlay_save_datasets(
bckgrnd_datasets,
to_overlay_datasets,
folder="data/",
split_names=["train", "test"],
dependence=dict(train=0, test=0),
**kwargs
):
"""Overlay corresponding train and test datasetsand save the output to file.
Parameters
----------
bckgrnd_datasets : tuple of tuple of array like
Background datasets on which to overlay the others. The exterior tuple corresponds to the
split (train, test, ...), the interior tuple is imgs/label: `((train_imgs, train_labels), ...)`,
image arrays should be of shape [n_bckgrnd, height_bckgrnd, width_bckgrnd, ...] and
dtype=uint8. Labels should be shape=[n_imgs, ...] and dtype=*.
to_overlay_datasets : tuple of array like
Datasets to overlay. Same shape and form as the previous argument `bckgrnd_datasets`.
folder : str, optional
Folder to which to save the images.
split_names : list of str, optional
Names of all the splits, should be at least as long as len(bckgrnd_datasets).
dependence : dict of float, optional
Whether to overlay in a way where there is dependencies between the background and the overlayed data.
Dictionary because can be different for each split.
kwargs :
Additional arguments to `overlay_img`.
"""
is_missing_names = len(split_names) < len(bckgrnd_datasets)
if is_missing_names or len(bckgrnd_datasets) != len(to_overlay_datasets):
err = "Sizes don't agree `len(split_names)={}, len(bckgrnd_datasets)={}, len(to_overlay_datasets)={}`."
raise ValueError(
err.format(
len(split_names), len(bckgrnd_datasets), len(
to_overlay_datasets)
)
)
if not os.path.exists(folder):
os.makedirs(folder)
for i, (bckgrnd, to_overlay, name) in enumerate(
zip(bckgrnd_datasets, to_overlay_datasets, split_names)
):
if to_overlay[0] is not None and bckgrnd[0] is not None:
if dependence is not None and dependence[name] != 0:
out, idcs = overlay_img_dependencies(
bckgrnd[0],
to_overlay[0],
bckgrnd[1],
to_overlay[1],
dependence=dependence[name],
**kwargs
)
else:
# no dependecies between bckground and overlay
out, idcs = overlay_img(bckgrnd[0], to_overlay[0], **kwargs)
np.save(os.path.join(folder, name + "_x.npy"),
out, allow_pickle=False)
np.save(
os.path.join(folder, name + "_y.npy"),
to_numpy(bckgrnd[1]),
allow_pickle=False,
)
np.save(
os.path.join(folder, name + "_y_distractor.npy"),
to_numpy(to_overlay[1])[idcs],
allow_pickle=False,
)
def overlay_img_dependencies(
bckgrnd,
to_overlay,
bckgrnd_labels,
to_overlay_labels,
dependence=0.5,
seed=123,
**kwargs
):
"""Overlays an image `to_overlay` on a `bckgrnd` with dependencies between the labels.
Parameters
----------
bckgrnd : array like, shape=[n_bckgrnd, height_bckgrnd, width_bckgrnd, ...], dtype=uint8
Background images. Each image will have one random image from `to_overlay` overlayed on it.
to_overlay : array like, shape=[n_overlay, height_overlay, width_overlay, ...], dtype=uint8
Images to overlay. Currently the following assumptions are made:
- the overlaid images have to be at most as big as the background ones (i.e.
`height_bckgrnd <= height_bckgrnd` and `<= width_bckgrnd`).
- The overlayed images are also used as mask. This is especially good for black
and white images : whiter pixels (~1) are the ones to be overlayed. In the case
of colored image, this still hold but channel wise.
bckgrnd_labels : array like, shape=[n_bckgrnd]
Labels of the background images.
to_overlay_labels : array like, shape=[n_overlay]
Labels of the images to overlay. The number of unique labels need to be larger than the unique
labels of background images.
dependence : float, optional
Level of positive dependence in [0,1]. If 0 no dependence. If 1 then label of overlayed
is the same as label of background all the time.
seed : int, optional
Pseudo random seed.
kwargs :
Additional arguments to `overlay_img`.
"""
bckgrnd_labels = to_numpy(bckgrnd_labels)
to_overlay_labels = to_numpy(to_overlay_labels)
bckgrnd = to_numpy(bckgrnd)
to_overlay = to_numpy(to_overlay)
out_imgs = np.zeros_like(bckgrnd)
out_idcs = np.zeros_like(bckgrnd_labels)
for i in np.unique(bckgrnd_labels):
bckgrnd_i = bckgrnd[bckgrnd_labels == i]
to_overlay_i = to_overlay[to_overlay_labels == i]
n_bckgrnd_i = bckgrnd_i.shape[0]
n_overlay_i = to_overlay_i.shape[0]
with tmp_seed(seed):
n_dependent = int(dependence * n_bckgrnd_i)
idx_to_overlay_i = np.random.choice(
range(n_overlay_i), size=n_dependent)
to_overlay_i = to_overlay_i[idx_to_overlay_i]
out, idcs = overlay_img(
bckgrnd_i[:n_dependent], to_overlay_i, seed=seed, **kwargs
)
# indices in terms of the actual indices
idcs = idx_to_overlay_i[idcs]
idcs = np.flatnonzero(to_overlay_labels == i)[idcs]
if n_dependent < n_bckgrnd_i:
with tmp_seed(seed):
# sampling without dependency => from the entire set
n_independent = n_bckgrnd_i - n_dependent
idx_to_overlay_i = np.random.choice(
range(to_overlay.shape[0]), size=n_independent
)
out_indep, idcs_indep = overlay_img(
bckgrnd_i[n_dependent:],
to_overlay[idx_to_overlay_i],
seed=seed,
**kwargs
)
# indices in terms of the actual indices
idcs_indep = idx_to_overlay_i[idcs_indep]
out = np.concatenate([out, out_indep])
idcs = np.concatenate([idcs, idcs_indep])
# put it in order compared to initial order of background
out_imgs[bckgrnd_labels == i] = out
out_idcs[bckgrnd_labels == i] = idcs
return out_imgs, out_idcs
def overlay_img(bckgrnd, to_overlay, is_shift=False, seed=123):
"""Overlays an image with black background `to_overlay` on a `bckgrnd`
Parameters
----------
bckgrnd : array like, shape=[n_bckgrnd, height_bckgrnd, width_bckgrnd, ...], dtype=uint8
Background images. Each image will have one random image from `to_overlay` overlayed on it.
to_overlay : array like, shape=[n_overlay, height_overlay, width_overlay, ...], dtype=uint8
Images to overlay. Currently the following assumptions are made:
- the overlaid images have to be at most as big as the background ones (i.e.
`height_bckgrnd <= height_bckgrnd` and `<= width_bckgrnd`).
- The overlayed images are also used as mask. This is especially good for black
and white images : whiter pixels (~1) are the ones to be overlayed. In the case
of colored image, this still hold but channel wise.
is_shift : bool, optional
Whether to randomly shift all overlayed images or to keep them on the bottom right.
seed : int, optional
Pseudo random seed.
Return
------
imgs : np.array, shape=[n_bckgrnd, height, width, 3], dtype=uint8
Overlayed images.
selected : np.array, shape=[n_bckgrnd], dtype=int64
Indices of the selected overlayed images.
"""
bckgrnd = to_numpy(bckgrnd)
to_overlay = to_numpy(to_overlay)
with tmp_seed(seed):
n_bckgrnd = bckgrnd.shape[0]
n_overlay = to_overlay.shape[0]
selected = np.random.choice(np.arange(n_overlay), size=n_bckgrnd)
to_overlay = to_overlay[selected, ...]
bckgrnd = ensure_color(bckgrnd).astype(np.float32)
to_overlay = ensure_color(to_overlay).astype(np.float32)
over_shape = to_overlay.shape[1:]
bck_shape = bckgrnd.shape[1:]
def get_margin(i): return (bck_shape[i] - over_shape[i]) // 2
def get_max_shift(i): return get_margin(i) + over_shape[i] // 3
get_shift = (
lambda i: np.random.randint(-get_max_shift(i), get_max_shift(i))
if is_shift
else get_max_shift(i) // 2
)
resized_overlay = np.zeros((n_bckgrnd,) + bck_shape[:2] + over_shape[2:])
resized_overlay[
:,
get_margin(0): -get_margin(0) or None,
get_margin(1): -get_margin(1) or None,
] = to_overlay
for i in range(2): # shift x and y
resized_overlay = np.stack(
[np.roll(im, get_shift(i), axis=i) for im in resized_overlay]
)
mask = resized_overlay / 255
return (mask * resized_overlay + (1 - mask) * bckgrnd).astype(np.uint8), selected
def at_least_ndim(arr, ndim):
"""Ensures that a numpy array is at least `ndim`-dimensional."""
padded_shape = arr.shape + (1,) * (ndim - len(arr.shape))
return arr.reshape(padded_shape)
def ensure_color(imgs):
"""
Ensures that a batch of colored (3 channels) or black and white (1 channels) numpy uint 8 images
is colored (3 channels).
"""
imgs = at_least_ndim(imgs, 4)
if imgs.shape[-1] == 1:
imgs = np.repeat(imgs, 3, axis=-1)
return imgs
def bw_to_color(img, rgb_proba=[1, 0, 0], seed=None):
"""Transform black and white image to red green or blue with a given probability."""
with tmp_seed(seed):
channel_to_color = np.random.choice(3, size=1, p=rgb_proba)
frame = torch.zeros_like(img.expand(3, -1, -1))
frame[channel_to_color] = img
return frame
|
decodable_information_bottleneck-main
|
utils/data/helpers.py
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
This source code is licensed under the MIT license found in the
LICENSE file in the root directory of this source tree.
"""
import copy
import logging
import os
import random
import numpy as np
import torch
from sklearn.model_selection import train_test_split
from sklearn.utils import resample
from dib import UNLABELLED_CLASS
from dib.utils.helpers import tmp_seed
DIR = os.path.abspath(os.path.dirname(__file__))
class BaseDataset:
"""BaseDataset that should be inherited by the format-specific ones.
Parameters
----------
root : str, optional
Root to the data directory.
logger : logging.Logger, optional
Logger
is_return_index : bool, optional
Whether to return the index in addition to the labels.
is_random_targets : bool, optional
Whether to use random targets for the dataset.
seed : int, optional
Random seed.
"""
unlabelled_class = UNLABELLED_CLASS
def __init__(
self,
root=os.path.join(DIR, "../../../../data/"),
logger=logging.getLogger(__name__),
is_return_index=False,
is_random_targets=False,
seed=123,
):
self.seed = seed
self.logger = logger
self.root = root
self.is_return_index = is_return_index
self.is_random_targets = is_random_targets
self._is_return_constant = False
@property
def map_target_position(self):
"""
Return a dictionary that maps the type of target (e.g. "index") to its position in the
outputted target.
"""
target_names = {"target": 0}
if self._is_return_constant:
target_names["constant"] = len(target_names)
if self.is_return_index:
target_names["index"] = len(target_names)
return target_names
def randomize_targets_(self):
"""Randomize the targets in place"""
with tmp_seed(self.seed):
idcs = list(range(len(self.targets)))
random.shuffle(idcs)
self.targets = self.targets[idcs]
def rm_all_transformations_(self):
"""Completely remove transformation."""
pass
def make_test_(self):
"""Make the data a test set."""
pass
def append_(self, other):
"""Append a dataset to the current one."""
self.data = np.append(self.data, other.data, axis=0)
self.targets = np.append(self.targets, other.targets, axis=0)
def train_test_split(self, size=0.1, is_stratify=True, is_test_size=True):
"""Split the dataset into train and test (without data augmentation).
Parameters
----------
size : float or int, optional
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the test split. If int, represents the absolute
size of the test dataset.
is_stratify : bool, optional
Whether to stratify splits based on class label.
is_test_size : bool, optional
Whether size should be the test size or the training one.
Returns
-------
train : BaseDataset
Train dataset containing the complement of `test_size` examples.
test : BaseDataset
Test dataset containing `test_size` examples.
"""
idcs_all = list(range(len(self)))
stratify = self.targets if is_stratify else None
idcs_train, indcs_test = train_test_split(
idcs_all, stratify=stratify, test_size=size, random_state=self.seed
)
if not is_test_size:
indcs_test, idcs_train = idcs_train, indcs_test
train = self.clone()
train.keep_indcs_(idcs_train)
test = self.clone()
test.keep_indcs_(indcs_test)
test.make_test_()
return train, test
def drop_labels_(self, drop_size, is_stratify=True):
"""Drop part of the labels to make the dataset semisupervised.
Parameters
----------
drop_size : float or int or tuple, optional
If float, should be between 0.0 and 1.0 and represent the proportion of the labels to
drop. If int, represents the number of labels to drop. 0 means keep all.
is_stratify : bool, optional
Whether to stratify splits based on class label.
"""
if drop_size == 0:
return
self.logger.info(f"Dropping {drop_size} labels...")
idcs_all = list(range(len(self)))
stratify = self.targets if is_stratify else None
idcs_label, idcs_unlabel = train_test_split(
idcs_all, stratify=stratify, test_size=drop_size, random_state=self.seed
)
self.targets[idcs_unlabel] = self.unlabelled_class
def balance_labels_(self):
"""
Balances the number of labelled and unlabbeld data by updasmpling labeled. Only works if
number of labelled data is smaller than unlabelled.
"""
self.logger.info(f"Balancing the semi-supervised labels...")
idcs_unlab = [i for i, t in enumerate(
self.targets) if t == UNLABELLED_CLASS]
idcs_lab = [i for i, t in enumerate(
self.targets) if t != UNLABELLED_CLASS]
assert len(idcs_unlab) > len(idcs_lab)
resampled_idcs_lab = resample(
idcs_lab,
replace=True,
n_samples=len(idcs_unlab),
stratify=self.targets[idcs_lab],
random_state=self.seed,
)
self.keep_indcs_(idcs_unlab + resampled_idcs_lab)
def drop_unlabelled_(self):
"""Drop all the unlabelled examples."""
self.logger.info(f"Drop all the unlabelled examples...")
idcs_lab = [i for i, t in enumerate(
self.targets) if t != UNLABELLED_CLASS]
self.keep_indcs_(idcs_lab)
def get_subset(self, size, is_stratify=True):
"""Return a subset of `size` that does not share its memory.
Parameters
----------
size : float or int, optional
If float, should be between 0.0 and 1.0 and represent the proportion of
the dataset to include in the subset. If int, represents the absolute
size of the subset. If -1, return all.
is_stratify : bool, optional
Whether to stratify splits based on class label.
"""
if size == -1:
return self
subset, _ = self.train_test_split(
size=size, is_stratify=is_stratify, is_test_size=False
)
return subset
def clone(self):
"""Returns a deepcopy of the daatset."""
return copy.deepcopy(self)
def keep_indcs_(self, indcs):
"""Keep the given indices.
Parameters
----------
indcs : array-like int
Indices to keep. If the multiplicity of the indices is larger than 1 then will duplicate
the data.
"""
self.data = self.data[indcs]
self.targets = self.targets[indcs]
def drop_features_(self, drop_size):
"""Drop part of the features (e.g. pixels in images).
Parameters
----------
drop_size : float or int or tuple, optional
If float, should be between 0.0 and 1.0 and represent the proportion of the dataset to
drop. If int, represents the number of datapoints to drop. If tuple, same as before
but give bounds (min and max). 1 means drop all.
"""
if drop_size == 0:
return
raise NotImplementedError(
"drop_features_ not implemented for current dataset")
def add_index(self, y, index):
"""Append the index to the targets (if needed)."""
if self.is_return_index:
y = tuple(y) + (index,)
return y
def set_const_target_(self, target):
"""Set a constant target `target` to all targets."""
if self.targets.ndim == 1:
self.targets = np.expand_dims(self.targets, 1)
self.targets = np.append(
self.targets, self.targets * 0 + target, axis=1)
self._is_return_constant = True
def sort_data_(self, by="targets"):
"""Sort the data by {"targets"}. E.g. the first |X|/|Y| examples will be from the same target."""
targets = self.targets
if self.targets.ndim > 1:
targets = targets[:, 0]
if by == "targets":
idcs = list(np.argsort(targets))
self.targets = self.targets[idcs]
self.data = self.data[idcs]
else:
raise ValueError(f"Unkown by={by}")
def count_targets(self):
"""Return a dictionary where the keys are the targets and values the number of each target."""
targets, counts = np.unique(self.targets, return_counts=True)
return {t: c for t, c in zip(targets, counts)}
|
decodable_information_bottleneck-main
|
utils/data/base.py
|
AVT-main
|
__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Launch script to run arguments stored in txt files."""
import argparse
import subprocess
import os
import socket
import glob
from omegaconf import OmegaConf
import inquirer
import pathlib
from hydra.core.override_parser.overrides_parser import OverridesParser
from hydra._internal.core_plugins.basic_sweeper import BasicSweeper
CODE_DIR = str(pathlib.Path(__file__).parent.resolve())
BASE_RUN_DIR = f'{CODE_DIR}/OUTPUTS'
def parse_args():
"""Parse arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('-c',
'--cfg',
type=str,
required=True,
help='Overrides config file')
parser.add_argument('-l',
'--local',
action='store_true',
help='Run locally instead of launching to cluster')
parser.add_argument('-g',
'--debug',
action='store_true',
help='Run in debug mode: 1 GPU, when locally')
parser.add_argument('-t',
'--test',
action='store_true',
help='Run testing mode (will pick the last ckpt)')
parser.add_argument('-p',
'--partition',
type=str,
default=None,
help='Specify SLURM partition to run on')
parser.add_argument('--tb',
action='store_true',
help='Run tensorboard on this directory')
parser.add_argument('-f',
'--fl',
action='store_true',
help='View the folder (run a python server)')
parser.add_argument('-d',
'--delete',
action='store_true',
help='Delete the folder')
parser.add_argument('-k',
'--kill',
action='store_true',
help='Kill jobs running this config.')
parser.add_argument('--profile',
action='store_true',
help='Run with kernprof. Decorate fn with @profile')
parser.add_argument('--cls',
action='store_true',
help='Gen classification file and run that')
parser.add_argument('--run_id',
type=int,
default=None,
help='Run for this specific run_id, if known')
parser.add_argument('rest', nargs=argparse.REMAINDER)
args = parser.parse_args()
if args.debug:
args.local = True
return args
def get_sweep_param_from_combinations(clis):
"""
Returns:
[(run_id, overrides_dict)]. The run_id can be None if unsure what hydra
would use.
"""
sweeper = BasicSweeper(max_batch_size=None)
parser = OverridesParser.create()
overrides = parser.parse_overrides(clis)
run_args = sweeper.split_arguments(overrides, max_batch_size=None)[0]
res = []
for i, run_arg in enumerate(run_args):
res.append((i, dict([el.split('=') for el in run_arg])))
return res
def get_sweep_param_from_runs(conf_path):
exp_path = os.path.join(BASE_RUN_DIR, conf_path)
run_dirs = glob.glob(os.path.join(exp_path, r'[0-9]*'))
if len(run_dirs) == 0:
return []
res = []
for run_dir in run_dirs:
run_id = int(os.path.basename(run_dir))
override_fpath = os.path.join(run_dir, '.hydra/overrides.yaml')
if not os.path.exists(override_fpath):
# Likely deleted, so run_dirs may not be useful..
# Happens when we delete the output folder, but the run folders
# don't get deleted, because I opened in toplog and the nfs
# files aren't deleted until I kill the toplog on that folder
return []
conf = OmegaConf.load(override_fpath)
res.append((run_id, dict([el.split('=') for el in conf])))
return res
def subselect_dict_keys_diff(run_id_param_dicts):
"""Select keys from the param_dicts that actually change between configs."""
key_vals = {}
for _, param_dict in run_id_param_dicts:
for key, val in param_dict.items():
if key not in key_vals:
key_vals[key] = []
key_vals[key].append(val)
keys_to_keep = [
key for key, vals in key_vals.items() if len(set(vals)) > 1
]
return [(el[0], {key: el[1][key]
for key in keys_to_keep}) for el in run_id_param_dicts]
def escape_str(input_str):
return f"'{input_str}'"
def choose_single_run(clis, fpath, run_id):
"""
clis are a list of flags provided in the config overrides file.
Args:
clis: List of clis from the txt file
run_id: If known which model to run locally, the run_id of that sweep
"""
run_id_param_dicts = get_sweep_param_from_combinations(clis)
if len(run_id_param_dicts) == 1:
final_run_id, param_dict = run_id_param_dicts[0]
assert run_id is None or run_id == final_run_id
elif run_id is not None:
final_run_id = run_id
param_dicts = [el[1] for el in run_id_param_dicts if el[0] == run_id]
assert len(param_dicts) == 1, 'run_id not found, or multiple found'
param_dict = param_dicts[0]
else:
# Show options to the user and let her pick
run_id_param_dicts_diff = subselect_dict_keys_diff(run_id_param_dicts)
print('Choose from: \n' +
'\n'.join([str(el) for el in run_id_param_dicts_diff]))
qst = [
inquirer.List(
'r',
message='Which sweep config to use?',
choices=range(len(run_id_param_dicts)),
carousel=True,
),
]
final_run_id, param_dict = run_id_param_dicts[inquirer.prompt(qst)
['r']]
return final_run_id, [f'{key}={val}' for key, val in param_dict.items()]
def read_file_into_cli(fpath, running_local=False, run_id=None):
"""Read cli from file into a string."""
res = []
with open(fpath, 'r') as fin:
for line in fin:
args = line.split('#')[0].strip()
if len(args) == 0:
continue
res.append(args)
if running_local:
final_run_id, res = choose_single_run(res, fpath, run_id)
else:
final_run_id = None # not local, launch all, so run_id is irrelevant
return final_run_id, res
def get_models_dir(dpath):
"""Go inside the dpath to get the model dir."""
runs = sorted([el for el in next(os.walk(dpath))[1] if el.isdigit()])
if len(runs) > 1:
# Ask which run to use
question = [
inquirer.List(
'run',
message='Which run to use?',
choices=runs,
),
]
answers = inquirer.prompt(question)
else:
answers = dict(run=runs[0])
return dpath + '/' + answers['run']
def is_port_in_use(port):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
return s.connect_ex(('localhost', port)) == 0
def get_free_port():
# Make sure to forward these ports in et
potential_ports = range(30303, 30399)
for port in potential_ports:
if not is_port_in_use(port):
return port
raise ResourceWarning('No empty port found')
def num_gpus():
output = subprocess.run('nvidia-smi --query-gpu=name --format=csv,noheader'
'| wc -l', shell=True, capture_output=True)
return int(output.stdout.decode().strip())
def construct_cmd(args):
"""Construct the cmd as provided in args."""
if args.cfg:
assert args.cfg.startswith('expts'), 'Must be wrt this directory'
agent_folder = '{}/{}'.format(BASE_RUN_DIR,
args.cfg if args.cfg else 'default')
if args.kill:
slurm_ids = os.listdir(os.path.join(agent_folder, '.submitit/'))
shall = input("Kill %s (y/N) " % slurm_ids).lower() == 'y'
if shall:
return 'scancel {}'.format(' '.join(slurm_ids))
if args.tb: # Run tensorboard only
# Clear the cli and just run tensorboard
cli = ('cd {agent_folder}; tensorboard --logdir . --port {port} '
'--max_reload_threads 10 --window_title {name} ').format(
agent_folder=agent_folder,
port=get_free_port(),
name=args.cfg)
return cli
if args.fl: # Visualize the folder only
# Clear the cli and just run tensorboard
cli = 'cd {}; python -m http.server {}'.format(agent_folder,
get_free_port())
return cli
if args.delete:
cli = 'rm -r {f}/* {f}/.*'.format(f=agent_folder)
shall = input("Run %s (y/N) " % cli).lower() == 'y'
if shall:
return cli
return ''
# Else, it is the general train command
run_id, cli_stuff = read_file_into_cli(args.cfg,
running_local=args.local,
run_id=args.run_id)
cli_stuff = [escape_str(el) for el in cli_stuff]
cli_stuff = ' '.join(cli_stuff)
if args.debug:
if args.test:
# If args.test, then might be testing a model from other dir
agent_folder = os.path.join(agent_folder, str(run_id))
else:
agent_folder = os.path.join(agent_folder, 'local')
# Delete the sync file if it exists
clear_cmd = f'find {agent_folder} -iname sync_file_init -delete'
print(f'Clearing out the sync files using: {clear_cmd}')
subprocess.call(clear_cmd, shell=True)
cli = (
'export NCCL_SOCKET_IFNAME=; export GLOO_SOCKET_IFNAME=; '
' HYDRA_FULL_ERROR=1 '
' {} train_net.py hydra.run.dir={} ').format(
'kernprof -l ' if args.profile else 'python ', agent_folder)
cli += cli_stuff
if args.test:
cli += ' test_only=True '
if args.local:
cli += (' hydra.launcher.nodes=1 '
f' hydra.launcher.gpus_per_node={num_gpus()} '
' hydra/launcher=submitit_local ')
else:
cli += (' hydra.launcher.max_num_timeout=3 ')
if args.partition is not None and not args.local:
cli += f' +hydra.launcher.partition="{args.partition}" '
if args.debug:
cli += (' data_train.workers=0 data_eval.workers=0 ')
cli += ' ' + ' '.join(args.rest)
# This must go at the end, the other args must go before
if not args.debug:
cli += ' -m '
return cli
def main():
"""Main func."""
args = parse_args()
# if args.cls:
# args = gen_cls_override_file(args)
cmd = construct_cmd(args)
print('>> Running "{}"'.format(cmd))
subprocess.call(cmd, shell=True)
if __name__ == '__main__':
main()
|
AVT-main
|
launch.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Main training entry."""
import os
import logging
import random
import subprocess
import torch
import hydra
from omegaconf import DictConfig, OmegaConf
import func
OmegaConf.register_new_resolver('minus', lambda x, y: x - y)
# Multiply and cast to integer
OmegaConf.register_new_resolver('times_int', lambda x, y: int(x * y))
@hydra.main(config_path='conf', config_name='config')
def main(cfg: DictConfig) -> None:
# Since future runs might corrupt the stored hydra config, copy it over
# for backup.
if not os.path.exists('.hydra.orig'):
subprocess.call('cp -r .hydra .hydra.orig', shell=True)
random.seed(cfg.seed)
torch.manual_seed(cfg.seed)
try:
print(subprocess.check_output('nvidia-smi'))
except subprocess.CalledProcessError:
print('Could not run nvidia-smi..')
# cudnn.deterministic = True # Makes it slow..
getattr(func, cfg.train.fn).main(cfg)
if __name__ == "__main__":
logging.basicConfig(format=('%(asctime)s %(levelname)-8s'
' {%(module)s:%(lineno)d} %(message)s'),
level=logging.DEBUG,
datefmt='%Y-%m-%d %H:%M:%S')
torch.multiprocessing.set_start_method('spawn')
main() # pylint: disable=no-value-for-parameter # Uses hydra
|
AVT-main
|
train_net.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""The Epic Kitchens dataset loaders."""
from typing import List, Dict, Sequence, Tuple, Union
from datetime import datetime, date
from collections import OrderedDict
import pickle as pkl
import csv
import logging
from pathlib import Path
import lmdb
import pandas as pd
import numpy as np
from omegaconf import OmegaConf
import torch
import torch.nn as nn
from .base_video_dataset import BaseVideoDataset, RULSTM_TSN_FPS
from .reader_fns import Reader
EGTEA_VERSION = -1 # This class also supports EGTEA Gaze+
EPIC55_VERSION = 0.1
EPIC100_VERSION = 0.2
class EPICKitchens(BaseVideoDataset):
"""EPICKitchens dataloader."""
def __init__(
self,
annotation_path: Sequence[Path],
only_keep_persons: str = None,
only_keep_videos: Path = None,
action_labels_fpath: Path = None,
annotation_dir: Path = None,
rulstm_annotation_dir: Path = None,
_precomputed_metadata: Path = None,
version: float = EPIC55_VERSION,
**other_kwargs,
):
"""
Args:
label_type (str): The type of label to return
only_keep_persons (str): If None, ignore. Else, will only keep
videos of persons P<start> to P<end> (both included), where this
string is "<start>-<end>". This is used to create
the train_minus_val and val sets, as per
https://arxiv.org/abs/1806.06157
only_keep_videos (Path): Path to a file with list of videos to keep.
This was used to define the val set as used in anticipation
in https://arxiv.org/abs/1905.09035
action_labels_fpath (Path): Path to map the verb and noun labels to
actions. It was used in the anticipation paper, that defines
a set of actions and train for action prediction, as opposed
to verb and noun prediction.
annotation_dir: Where all the other annotations are typically stored
"""
self.version = version
df = pd.concat([self._load_df(el) for el in annotation_path])
df.reset_index(inplace=True, drop=True) # to combine all of them
df = self._subselect_df_by_videos(
self._subselect_df_by_person(df, only_keep_persons),
only_keep_videos)
# If no specific annotation_dir specified, use the parent dir of
# the first annot path
if annotation_dir is None:
self.annotation_dir = Path(annotation_path[0]).parent
else:
self.annotation_dir = Path(annotation_dir)
self.rulstm_annotation_dir = rulstm_annotation_dir
epic_postfix = ''
if self.version == EPIC100_VERSION:
epic_postfix = '_100'
if self.version != EGTEA_VERSION:
verb_classes = self._load_class_names(
self.annotation_dir / f'EPIC{epic_postfix}_verb_classes.csv')
noun_classes = self._load_class_names(
self.annotation_dir / f'EPIC{epic_postfix}_noun_classes.csv')
else:
verb_classes, noun_classes = [], []
# Create action classes
if action_labels_fpath is not None:
load_action_fn = self._load_action_classes
if self.version == EGTEA_VERSION:
load_action_fn = self._load_action_classes_egtea
action_classes, verb_noun_to_action = (
load_action_fn(action_labels_fpath))
else:
action_classes, verb_noun_to_action = self._gen_all_actions(
verb_classes, noun_classes)
# Add the action classes to the data frame
if ('action_class' not in df.columns
and {'noun_class', 'verb_class'}.issubset(df.columns)):
df.loc[:, 'action_class'] = df.loc[:, (
'verb_class', 'noun_class')].apply(
lambda row: (verb_noun_to_action[
(row.at['verb_class'], row.at['noun_class'])]
if (row.at['verb_class'], row.at['noun_class']
) in verb_noun_to_action else -1),
axis=1)
elif 'action_class' not in df.columns:
df.loc[:, 'action_class'] = -1
df.loc[:, 'verb_class'] = -1
df.loc[:, 'noun_class'] = -1
num_undefined_actions = len(df[df['action_class'] == -1].index)
if num_undefined_actions > 0:
logging.error(
'Did not found valid action label for %d/%d samples!',
num_undefined_actions, len(df))
assert _precomputed_metadata is None, 'Not supported yet'
other_kwargs['verb_classes'] = verb_classes
other_kwargs['noun_classes'] = noun_classes
other_kwargs['action_classes'] = action_classes
super().__init__(df, **other_kwargs)
# following is used in the notebooks for marginalization, so save it
self.verb_noun_to_action = verb_noun_to_action
logging.info('Created EPIC %s dataset with %d samples', self.version,
len(self))
@property
def primary_metric(self) -> str:
if self.version == EPIC100_VERSION:
# For EK100, we want to optimize for AR5
return 'final_acc/action/AR5'
return super().primary_metric
@property
def class_mappings(self) -> Dict[Tuple[str, str], torch.FloatTensor]:
num_verbs = len(self.verb_classes)
if num_verbs == 0:
num_verbs = len(
set([el[0] for el, _ in self.verb_noun_to_action.items()]))
num_nouns = len(self.noun_classes)
if num_nouns == 0:
num_nouns = len(
set([el[1] for el, _ in self.verb_noun_to_action.items()]))
num_actions = len(self.action_classes)
if num_actions == 0:
num_actions = len(
set([el for _, el in self.verb_noun_to_action.items()]))
verb_in_action = torch.zeros((num_actions, num_verbs),
dtype=torch.float)
noun_in_action = torch.zeros((num_actions, num_nouns),
dtype=torch.float)
for (verb, noun), action in self.verb_noun_to_action.items():
verb_in_action[action, verb] = 1.0
noun_in_action[action, noun] = 1.0
return {
('verb', 'action'): verb_in_action,
('noun', 'action'): noun_in_action
}
@property
def classes_manyshot(self) -> OrderedDict:
"""
In EPIC-55, the recall computation was done for "many shot" classes,
and not for all classes. So, for that version read the class names as
provided by RULSTM.
Function adapted from
https://github.com/fpv-iplab/rulstm/blob/57842b27d6264318be2cb0beb9e2f8c2819ad9bc/RULSTM/main.py#L386
"""
if self.version != EPIC55_VERSION:
return super().classes_manyshot
# read the list of many shot verbs
many_shot_verbs = {
el['verb']: el['verb_class']
for el in pd.read_csv(self.annotation_dir /
'EPIC_many_shot_verbs.csv').to_dict(
'records')
}
# read the list of many shot nouns
many_shot_nouns = {
el['noun']: el['noun_class']
for el in pd.read_csv(self.annotation_dir /
'EPIC_many_shot_nouns.csv').to_dict(
'records')
}
# create the list of many shot actions
# an action is "many shot" if at least one
# between the related verb and noun are many shot
many_shot_actions = {}
action_names = {val: key for key, val in self.action_classes.items()}
for (verb_id, noun_id), action_id in self.verb_noun_to_action.items():
if (verb_id in many_shot_verbs.values()) or (
noun_id in many_shot_nouns.values()):
many_shot_actions[action_names[action_id]] = action_id
return {
'verb': many_shot_verbs,
'noun': many_shot_nouns,
'action': many_shot_actions,
}
@staticmethod
def _load_action_classes(
action_labels_fpath: Path
) -> Tuple[Dict[str, int], Dict[Tuple[int, int], int]]:
"""
Given a CSV file with the actions (as from RULSTM paper), construct
the set of actions and mapping from verb/noun to action
Args:
action_labels_fpath: path to the file
Returns:
class_names: Dict of action class names
verb_noun_to_action: Mapping from verb/noun to action IDs
"""
class_names = {}
verb_noun_to_action = {}
with open(action_labels_fpath, 'r') as fin:
reader = csv.DictReader(fin, delimiter=',')
for lno, line in enumerate(reader):
class_names[line['action']] = lno
verb_noun_to_action[(int(line['verb']),
int(line['noun']))] = int(line['id'])
return class_names, verb_noun_to_action
@staticmethod
def _load_action_classes_egtea(
action_labels_fpath: Path
) -> Tuple[Dict[str, int], Dict[Tuple[int, int], int]]:
"""
Given a CSV file with the actions (as from RULSTM paper), construct
the set of actions and mapping from verb/noun to action
Args:
action_labels_fpath: path to the file
Returns:
class_names: Dict of action class names
verb_noun_to_action: Mapping from verb/noun to action IDs
"""
class_names = {}
verb_noun_to_action = {}
with open(action_labels_fpath, 'r') as fin:
reader = csv.DictReader(
fin,
delimiter=',',
# Assuming the order is verb/noun
# TODO check if that is correct
fieldnames=['id', 'verb_noun', 'action'])
for lno, line in enumerate(reader):
class_names[line['action']] = lno
verb, noun = [int(el) for el in line['verb_noun'].split('_')]
verb_noun_to_action[(verb, noun)] = int(line['id'])
return class_names, verb_noun_to_action
@staticmethod
def _gen_all_actions(
verb_classes: List[str], noun_classes: List[str]
) -> Tuple[Dict[str, int], Dict[Tuple[int, int], int]]:
"""
Given all possible verbs and nouns, construct all possible actions
Args:
verb_classes: All verbs
noun_classes: All nouns
Returns:
class_names: list of action class names
verb_noun_to_action: Mapping from verb/noun to action IDs
"""
class_names = {}
verb_noun_to_action = {}
action_id = 0
for verb_id, verb_cls in enumerate(verb_classes):
for noun_id, noun_cls in enumerate(noun_classes):
class_names[f'{verb_cls}:{noun_cls}'] = action_id
verb_noun_to_action[(verb_id, noun_id)] = action_id
action_id += 1
return class_names, verb_noun_to_action
def _load_class_names(self, annot_path: Path):
res = {}
with open(annot_path, 'r') as fin:
reader = csv.DictReader(fin, delimiter=',')
for lno, line in enumerate(reader):
res[line['class_key' if self.version ==
EPIC55_VERSION else 'key']] = lno
return res
def _load_df(self, annotation_path):
if annotation_path.endswith('.pkl'):
return self._init_df_orig(annotation_path)
elif annotation_path.endswith('.csv'):
# Else, it must be the RULSTM annotations (which are a
# little different, perhaps due to quantization into frames)
return self._init_df_rulstm(annotation_path)
else:
raise NotImplementedError(annotation_path)
def _init_df_gen_vidpath(self, df):
# generate video_path
if self.version == EGTEA_VERSION:
df.loc[:, 'video_path'] = df.apply(
lambda x: Path(x.video_id + '.mp4'),
axis=1,
)
else: # For the EPIC datasets
df.loc[:, 'video_path'] = df.apply(
lambda x: (Path(x.participant_id) / Path(x.video_id + '.MP4')),
axis=1,
)
return df
def _init_df_rulstm(self, annotation_path):
logging.info('Loading RULSTM EPIC csv annotations %s', annotation_path)
df = pd.read_csv(
annotation_path,
names=[
'uid',
'video_id',
'start_frame_30fps',
'end_frame_30fps',
'verb_class',
'noun_class',
'action_class',
],
index_col=0,
skipinitialspace=True,
dtype={
'uid': str, # In epic-100, this is a str
'video_id': str,
'start_frame_30fps': int,
'end_frame_30fps': int,
'verb_class': int,
'noun_class': int,
'action_class': int,
})
# Make a copy of the UID column, since that will be needed to gen
# output files
df.reset_index(drop=False, inplace=True)
# Convert the frame number to start and end
df.loc[:, 'start'] = df.loc[:, 'start_frame_30fps'].apply(
lambda x: x / RULSTM_TSN_FPS)
df.loc[:, 'end'] = df.loc[:, 'end_frame_30fps'].apply(
lambda x: x / RULSTM_TSN_FPS)
# Participant ID from video_id
df.loc[:, 'participant_id'] = df.loc[:, 'video_id'].apply(
lambda x: x.split('_')[0])
df = self._init_df_gen_vidpath(df)
df.reset_index(inplace=True, drop=True)
return df
def _init_df_orig(self, annotation_path):
"""
Loading the original EPIC Kitchens annotations
"""
def timestr_to_sec(s, fmt='%H:%M:%S.%f'):
timeobj = datetime.strptime(s, fmt).time()
td = datetime.combine(date.min, timeobj) - datetime.min
return td.total_seconds()
# Load the DF from annot path
logging.info('Loading original EPIC pkl annotations %s',
annotation_path)
with open(annotation_path, 'rb') as fin:
df = pkl.load(fin)
# Make a copy of the UID column, since that will be needed to gen
# output files
df.reset_index(drop=False, inplace=True)
# parse timestamps from the video
df.loc[:, 'start'] = df.start_timestamp.apply(timestr_to_sec)
df.loc[:, 'end'] = df.stop_timestamp.apply(timestr_to_sec)
# original annotations have text in weird format - fix that
if 'noun' in df.columns:
df.loc[:, 'noun'] = df.loc[:, 'noun'].apply(
lambda s: ' '.join(s.replace(':', ' ').split(sep=' ')[::-1]))
if 'verb' in df.columns:
df.loc[:, 'verb'] = df.loc[:, 'verb'].apply(
lambda s: ' '.join(s.replace('-', ' ').split(sep=' ')))
df = self._init_df_gen_vidpath(df)
df.reset_index(inplace=True, drop=True)
return df
@staticmethod
def _subselect_df_by_person(df, only_keep_persons):
if only_keep_persons is None:
return df
start, end = [int(el) for el in only_keep_persons.split('-')]
df = df.loc[df['participant_id'].isin(
['P{:02d}'.format(el) for el in range(start, end + 1)]), :]
df.reset_index(inplace=True, drop=True)
return df
@staticmethod
def _subselect_df_by_videos(df, videos_fpath):
if videos_fpath is None:
return df
with open(videos_fpath, 'r') as fin:
videos_to_keep = [el.strip() for el in fin.read().splitlines()]
df = df.loc[df['video_id'].isin(videos_to_keep), :]
df.reset_index(inplace=True, drop=True)
return df
class EpicRULSTMFeatsReader(Reader):
def __init__(self,
lmdb_path: Union[Path, List[Path]] = None,
read_type: str = 'exact_rulstm',
warn_if_using_closeby_frame: bool = True):
"""
Args:
feats_lmdb_path: LMDB path for RULSTM features. Must be
specified if using rulstm_tsn_feat input_type. Could be a
list, in which case it will concat all those features together.
read_type: [rulstm_exact/normal] This specifies what style of
feature reading for RULSTM features. Until Oct 22, I have been
exactly reading 11 frames at 0.25s, but that is not scalable to
learn language models, so making it more generic to read all
frames and let the base_video_dataset code figure how to
re-sample to get the relevant frames. Not making it default
to be able to repro older results.
"""
super().__init__()
if OmegaConf.get_type(lmdb_path) != list:
lmdb_path = [lmdb_path]
self.lmdb_envs = [
lmdb.open(el, readonly=True, lock=False) for el in lmdb_path
]
self.read_type = read_type
self.warn_if_using_closeby_frame = warn_if_using_closeby_frame
def forward(self, *args, **kwargs):
return self._read_rulstm_features(*args, **kwargs)
@staticmethod
def get_frame_rate(video_path: Path) -> float:
del video_path
return RULSTM_TSN_FPS
def read_representations(self, frames, env, frame_format):
"""Reads a set of representations, given their frame names and an LMDB
environment.
From https://github.com/fpv-iplab/rulstm/blob/96e38666fad7feafebbeeae94952dba24771e512/RULSTM/dataset.py#L10
"""
features = []
# for each frame
for frame_id in frames:
# read the current frame
with env.begin() as e:
# Need to search for a frame that has features stored,
# the exact frame may not have.
# To avoid looking at the future when training/testing,
# (important for anticipation), look only for previous to
# current position.
dd = None
search_radius = 0
for search_radius in range(10):
dd = e.get(
frame_format.format(
frame_id - search_radius).strip().encode('utf-8'))
if dd is not None:
break
if dd is not None and search_radius > 0:
if self.warn_if_using_closeby_frame:
logging.warning('Missing %s, but used %d instead',
frame_format.format(frame_id),
frame_id - search_radius)
if dd is None:
logging.error(
'Missing %s, Only specific frames are stored in lmdb :(',
frame_format.format(frame_id))
features.append(None)
else:
# convert to numpy array
data = np.frombuffer(dd, 'float32')
# append to list
features.append(data)
# For any frames we didn't find a feature, use a series of 0s
features_not_none = [el for el in features if el is not None]
assert len(features_not_none) > 0, (
f'No features found in {frame_format} - {frames}')
feature_not_none = features_not_none[0] # any
features = [
np.zeros_like(feature_not_none) if el is None else el
for el in features
]
# convert list to numpy array
features = np.array(features)
# Add singleton dimensions to make it look like a video, so
# rest of the code just works
features = features[:, np.newaxis, np.newaxis, :]
# Make it torch Tensor to be consistent
features = torch.as_tensor(features)
return features
def _read_rulstm_features(self,
video_path: Path,
start_sec: float,
end_sec: float,
fps: float,
df_row: pd.DataFrame,
pts_unit='sec'):
del pts_unit # Not supported here
if self.read_type == 'exact_rulstm':
# get frames every 0.25s between start and end frames
# 0.25 comes from their code, and they typically do 2.5s total
# observation time. 14 is the sequence length they use.
time_stamps = end_sec - np.arange(0.0, 0.25 * 11, 0.25)[::-1]
frames = np.floor(time_stamps * fps).astype(int)
elif self.read_type == 'normal':
# Read every single frame between the start and end, the
# base_video_dataset code will deal with how to sample into 4fps
# (i.e. 0.25s steps)
# Rather than first computing the timestamps, just compute the
# frame ID of the start and end, and do a arange .. that avoids
# any repeated frames due to quantization/floor
time_stamps = None
start_frame = np.floor(start_sec * fps)
end_frame = np.floor(end_sec * fps)
frames = np.arange(end_frame, start_frame, -1).astype(int)[::-1]
else:
raise NotImplementedError(f'Unknown {self.read_type}')
# If the frames go below 1, replace them with the lowest time pt
assert frames.max() >= 1, (
f'The dataset shouldnt have cases otherwise. {video_path} '
f'{start_sec} {end_sec} {df_row} {frames} {time_stamps} ')
frames[frames < 1] = frames[frames >= 1].min()
# Get the features
all_feats = []
for lmdb_env in self.lmdb_envs:
all_feats.append(
self.read_representations(
frames, lmdb_env,
Path(video_path).stem + '_frame_{:010d}.jpg'))
final_feat = torch.cat(all_feats, dim=-1)
# Must return rgb, audio, info; so padding with empty dicts for those
return final_feat, {}, {}
|
AVT-main
|
datasets/epic_kitchens.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""The base dataset loader."""
from typing import Tuple, Union, Sequence, Dict
import logging
from pathlib import Path
from collections import OrderedDict
import operator
from multiprocessing import Manager
import math
import h5py
import pandas as pd
import numpy as np
import torch
import torch.nn as nn
import torchvision
from omegaconf import OmegaConf
import hydra
from hydra.types import TargetConf
from common.utils import get_video_info, get_world_size, get_rank
SAMPLE_STRAT_CNTR = 'center_clip'
SAMPLE_STRAT_RAND = 'random_clip'
SAMPLE_STRAT_LAST = 'last_clip'
SAMPLE_STRAT_FIRST = 'first_clip'
FUTURE_PREFIX = 'future' # to specify future videos
# This is specific to EPIC kitchens
RULSTM_TSN_FPS = 30.0 # The frame rate the feats were stored by RULSTM
# This is important for some datasets, like Breakfast, where reading using the
# pyAV reader leads to jerky videos for some reason. This requires torchvision
# to be compiled from source, instructions in the top level README
torchvision.set_video_backend('video_reader')
def convert_to_anticipation(df: pd.DataFrame,
root_dir: Sequence[Path],
tau_a: float,
tau_o: float,
future_clip_ratios: Sequence[float] = (1.0, ),
drop_style='correct'
) -> Tuple[pd.DataFrame, pd.DataFrame]:
"""
Based on the definition in the original paper
https://arxiv.org/pdf/1804.02748.pdf, convert the start and end
video times to as used in anticipation.
tau_a (float): Anticipation time in seconds. By default -1, since
we train the model to do action recognition, in which case
the model sees a clip that finishes tau_a seconds before
the action to be anticipated starts. This is as per defn
in https://arxiv.org/pdf/1804.02748.pdf (pg 15)
tau_o (float): The amount of video to see before doing the
anticipation. In the original paper they used 1s
(https://arxiv.org/pdf/1804.02748.pdf), but in further ones
they use 3.5 (https://arxiv.org/pdf/1905.09035.pdf).
future_clip_ratios: A list of ratios (< 1.0) of tau_a, to define what clips
to set as the future clips. These will be used when returning future
clips. Ideally the labels should be adjusted to match this too, but
not doing that for now.
"""
del root_dir
if tau_a == -999:
# No anticipation, just simple recognition
# still add the orig_start and orig_end, future etc
# so the future prediction baseline can do the case where not future
# is predicted.
# This will ensure the future clip ends up being the same as current
tau_a = df.loc[:, 'start'] - df.loc[:, 'end']
tau_o = df.loc[:, 'end'] - df.loc[:, 'start']
logging.debug(
'Converting data to anticipation with tau_a=%s and '
'tau_o=%s.', tau_a, tau_o)
# Copy over the current start and end times
df.loc[:, 'orig_start'] = df.start
df.loc[:, 'orig_end'] = df.end
# Convert using tau_o and tau_a
df.loc[:, 'end'] = df.loc[:, 'start'] - tau_a
df.loc[:, 'start'] = df.loc[:, 'end'] - tau_o
# Add the future clips
for i, future_clip_ratio in enumerate(future_clip_ratios):
if future_clip_ratio == -999:
# A spl number to use the exact current clip as the future
df.loc[:, f'{FUTURE_PREFIX}_{i}_start'] = df.loc[:, 'start']
df.loc[:, f'{FUTURE_PREFIX}_{i}_end'] = df.loc[:, 'end']
elif future_clip_ratio > -10 and future_clip_ratio < 10:
eff_tau_a = tau_a * future_clip_ratio
df.loc[:, f'{FUTURE_PREFIX}_{i}_start'] = (df.loc[:, 'end'] +
eff_tau_a)
df.loc[:, f'{FUTURE_PREFIX}_{i}_end'] = (
df.loc[:, f'future_{i}_start'] + tau_o)
else:
raise ValueError(f'Seems out of bound {future_clip_ratio}')
# first frame seconds
f1_sec = 1 / RULSTM_TSN_FPS
old_df = df
if drop_style == 'correct':
# at least 1 frame
df = df[df.end >= f1_sec]
elif drop_style == 'full_context_in':
# All frames should be in
df = df[df.start >= f1_sec]
elif drop_style == 'action_banks':
# Based on their dataset_anticipation:__get_snippet_features()
df = df[df.end >= 2]
else:
raise NotImplementedError(f'Unknown style {drop_style}')
discarded_df = pd.concat([old_df, df]).drop_duplicates(subset=['uid'],
keep=False)
df.reset_index(inplace=True, drop=True)
return df, discarded_df
def break_segments_by_duration(duration, label, segment_len):
"""
Return a list of [(duration, label1, label2, ...), ...] such that each
duration is == segment_len if set.
Note label can be a scalar or vector (in case of multi-label cls)
"""
if not isinstance(label, list):
label = [label]
if segment_len is None:
return [[duration] + label], duration
nseg = int(round(duration / segment_len))
return [[segment_len] + label for _ in range(nseg)], nseg * segment_len
def dense_labels_to_segments(
dense_labels,
segment_start_time,
segment_end_time,
# -1 => get as many as possible
pred_steps=-1,
fixed_duration=None,
dummy_label=-1):
segments = []
for start, end, label in dense_labels:
if end < segment_start_time:
# Then this action is past, not relevant here
# should only happen for the pos-1 action being added
continue
if start > segment_end_time:
# This action starts after the segment, so leave this
continue
# should not look at anything beyond the segment end time
end = min(end, segment_end_time)
if start > segment_start_time:
# Add an empty slot of action, for the time where we don't know
# what happened. Setting the action itself to be -1, so the
# model can predict whatever and it won't be penalized
new_segments, duration_used = break_segments_by_duration(
start - segment_start_time, dummy_label, fixed_duration)
segments += new_segments
segment_start_time += duration_used
new_segments, duration_used = break_segments_by_duration(
end - segment_start_time, label, fixed_duration)
segments += new_segments
segment_start_time += duration_used
if fixed_duration is None:
assert segment_start_time == end
if pred_steps > 0 and len(segments) >= pred_steps:
break
if pred_steps > 0:
segments = segments[:pred_steps]
# Pad it with dummy intervals for batching, if lower
if not isinstance(dummy_label, list):
dummy_label = [dummy_label]
segments += [[-1] + dummy_label] * (pred_steps - len(segments))
return segments
def get_abs_path(root_dirs: Sequence[Path], fpath: Path):
"""
Combine the fpath with the first root_dir it exists in.
"""
res_fpath = None
for root_dir in root_dirs:
res_fpath = root_dir / fpath
if res_fpath.exists():
return res_fpath
logging.warning('Did not find any directory for %s [from %s]', fpath,
root_dirs)
return res_fpath # return the last one for now
def read_saved_results_uids(resfpath: Path):
if not resfpath.exists():
return set([])
with h5py.File(resfpath, 'r') as fin:
res = fin['uid'][()].tolist()
# For fast lookup when filtering (makes big difference)
return set([el.decode() for el in res])
def dense_clip_sampler(df: pd.DataFrame,
root_dir: Sequence[Path],
clip_len: Union[float, str] = 'mean_action_len',
stride: float = 1.0,
shard_per_worker: bool = False,
keep_orig_clips: bool = True,
featext_skip_done: bool = False):
"""
Add clips to the data frame sampling the videos densely from the video.
This function is also compatible with the convert_to_anticipation_fn
to extract features etc. The class label for those clips
is -1, it's mostly just used for SSL/feat ext.
Args:
stride (float): stride in seconds on how the clips are sampled.
shard_per_worker (bool): If true, create subset DF for this process
featext_skip_done (bool): Set this to true only when extracting
features. This will go through saved results files and check
what features have been stored and skip those from populating
into the dataset to the computed, hence continuing from what
has already been done.
"""
uniq_videos = sorted(list(df.video_path.unique()))
if shard_per_worker:
world_size = get_world_size()
rank = get_rank()
vids_per_shard = int(math.ceil(len(uniq_videos) / world_size))
uniq_videos = uniq_videos[(vids_per_shard * rank):min((
(rank + 1) * vids_per_shard), len(uniq_videos))]
skip_uids = []
if featext_skip_done:
# TODO replace with RESULTS_SAVE_DIR
skip_uids = read_saved_results_uids(Path(f'./results/{get_rank()}.h5'))
logging.info('Found %d done UIDs, skipping those', len(skip_uids))
if clip_len == 'mean_action_len':
clip_len = np.mean(df.end - df.start)
new_rows = []
total_possible_clips = 0
for vid_path in uniq_videos:
end_s = get_video_info(get_abs_path(root_dir, vid_path),
['len'])['len']
new_ends = np.arange(0, end_s, stride)
for new_end in new_ends:
total_possible_clips += 1
uid = f'{vid_path.stem}_{new_end}'
if uid in skip_uids:
continue
new_rows.append({
'participant_id': vid_path.stem.split('_')[0],
'narration': '',
'video_id': vid_path.stem,
'start': new_end - clip_len,
'end': new_end,
'verb_class': -1,
'noun_class': -1,
'action_class': -1,
'video_path': vid_path,
'uid': uid,
})
logging.info('Out of %d total potential clips, kept %d',
total_possible_clips, len(new_rows))
new_df = pd.DataFrame(new_rows)
if keep_orig_clips:
# Convert the uid to str since the new UIDs being added to the new DF
# are all strings
df.uid = df.uid.astype('str')
new_df = pd.concat([df, new_df])
new_df.reset_index(drop=True, inplace=True)
return new_df, pd.DataFrame([])
class BaseVideoDataset(torch.utils.data.Dataset):
"""Basic video dataset."""
def __init__(
self,
df,
root: Union[Sequence[Path], Path] = Path(''),
frames_per_clip: int = 32,
frame_rate: float = None,
subclips_options: Dict[str, float] = None,
load_seg_labels: bool = False,
load_long_term_future_labels: int = 0,
reader_fn: TargetConf = {
'_target_': 'datasets.reader_fns.DefaultReader'
},
transform: torchvision.transforms.Compose = None,
# verb, noun, action
label_type: Union[str, Sequence[str]] = 'verb',
return_future_clips_too: bool = False,
sample_strategy: str = SAMPLE_STRAT_RAND,
sample_strategy_future: str = SAMPLE_STRAT_FIRST,
conv_to_anticipate_fn: TargetConf = None,
conv_to_anticipate_fn_runtime: TargetConf = None,
process_df_before_read_fn: TargetConf = None,
sample_clips_densely: bool = False,
sample_clips_densely_fn: TargetConf = None,
random_seed: int = 42,
verb_classes: dict = {},
noun_classes: dict = {},
action_classes: dict = {},
repeat_data_times: float = 1.0,
dummy_label: Union[list, int] = -1,
class_balanced_sampling: bool = False,
return_unsampled_video: bool = False,
uid_subset: list = None):
"""
Args:
df: DataFrame of all the data (see a subclass for example/fmt).
Must be passed in through super() when init-ing the subclass
root: The path where all the videos are stored, will be
prepended to video path.
load_seg_labels: Set to true to load frame level segmentation
labels that can be jointly used to finetune the model for
classification as well.
load_long_term_future_labels: Set to the number of future labels
to also return, from where load_seg_labels stops. This is
used for long-term rollout visualization and getting GT for
those.
transform: The video transform function
return_future_clips_too: Set to true to also return future, actual
action clips along with the tau_o clips. This is used for SSL.
sample_strategy_future: Samplnig strategy used to return future
clips, if return_future_clips_too is set.
conv_to_anticipate_fn: The function that converts to anticipation.
conv_to_anticipate_fn_runtime: A similar fn as ^, but is applied
in the getitem function. Useful if don't want to do upfront,
for large datasets like HowTo.
sample_clips_densely: Add clips to the data frame sampling the
videos densely between the first and the last labeled clip.
The class label for those clips is -1, it's mostly just
used for SSL.
sample_clips_densely_fn: If this function is set, then no need
to set the sample_clip_densely to true. It will use this fn
to densify.
process_df_before_read_fn: A function that is applied to the
data frame[idx] before it's used for reading the video etc.
repeat_data: Set to number of times to repeat the data in the
DF. This is used if the epoch is too small, so can roll
through the data more than once during a single epoch. Also
helps if the preprocessing at read time effectively means
each data item corresponds to > 1 data items really through
random cropping etc.
class_balanced_sampling: If true, it will sample from the data
such that each class appears approximately equally -- so using
the distribution of labels, it will try to enforce unformity.
This is independent of adding loss weights based on how
often a class appears, which is done in train_eval_ops.
return_unsampled_video (bool): If true, return the video clip
before it was sub-sampled to match the FPS requirements.
So if experimenting at 1FPS, this will also return the
original frame rate clip that could be used for visualization.
MUST use batch size = 1 if using this, since it will return
different length videos which won't be batch-able.
uid_subset: Make a dataset keeping only those UIDs. This is useful
for visualization code when I just want to visualize on
specific clips.
"""
super().__init__()
# Based on https://github.com/pytorch/pytorch/issues/13246#issuecomment-612396143,
# trying to avoid mem leaks by wrapping lists and dicts in this
# manager class objects
manager = Manager()
self.root = root
# Convert to list if not already
if OmegaConf.get_type(self.root) != list:
self.root = [self.root]
self.root = [Path(el) for el in self.root]
self.subclips_options = subclips_options
self.load_seg_labels = load_seg_labels
self.load_long_term_future_labels = load_long_term_future_labels
# TODO: Move away from DataFrames... based on
# https://github.com/pytorch/pytorch/issues/5902#issuecomment-374611523
# it seems data frames are not ideal and cause memory leaks...
self.df = df # Data frame that will contain all info
# To be consistent with EPIC, add a uid column if not already present
if 'uid' not in self.df.columns:
self.df.loc[:, 'uid'] = range(1, len(self.df) + 1)
if sample_clips_densely or sample_clips_densely_fn:
if sample_clips_densely_fn is None:
# Use the default parameters. Keeping this sample_clips_densely
# param to be backward compatible.
sample_clips_densely_fn = {
'_target_':
'datasets.base_video_dataset.dense_clip_sampler',
}
self.df, _ = hydra.utils.call(sample_clips_densely_fn, self.df,
self.root)
assert not (conv_to_anticipate_fn and conv_to_anticipate_fn_runtime), (
'At max only one of these should be set.')
self.conv_to_anticipate_fn = conv_to_anticipate_fn
self.discarded_df = None
if conv_to_anticipate_fn is not None:
self.df, self.discarded_df = hydra.utils.call(
conv_to_anticipate_fn, self.df, self.root)
logging.info('Discarded %d elements in anticipate conversion',
len(self.discarded_df))
# this is an alternate implementation of ^, run in getitem,
# useful for large datasets like HowTo, but won't work for
# any dataset where you want to run testing
self.conv_to_anticipate_fn_runtime = conv_to_anticipate_fn_runtime
# This is used in the output files for EPIC submissions
self.challenge_type = 'action_recognition'
if conv_to_anticipate_fn or conv_to_anticipate_fn_runtime:
# If either of these are set, this must be an anticipation setup
self.challenge_type = 'action_anticipation'
self.repeat_data_times = repeat_data_times
self.process_df_before_read_fn = process_df_before_read_fn
self.frames_per_clip = frames_per_clip
self.frame_rate = frame_rate
self.reader_fn = hydra.utils.instantiate(reader_fn)
self.transform = transform
self.label_type = label_type
if OmegaConf.get_type(self.label_type) != list:
# Will use the first one for the balancing etc
self.label_type = [self.label_type]
self.verb_classes = manager.dict(verb_classes)
self.noun_classes = manager.dict(noun_classes)
self.action_classes = manager.dict(action_classes)
self.return_future_clips_too = return_future_clips_too
self.sample_strategy = sample_strategy
self.sample_strategy_future = sample_strategy_future
self.random_seed = random_seed
self.rng = np.random.default_rng(self.random_seed)
self.dummy_label = dummy_label
if isinstance(self.dummy_label, list):
self.dummy_label = manager.list(self.dummy_label)
# Precompute some commonly useful stats
self.classes_counts = manager.dict(self._compute_stats_cls_counts())
self.class_balanced_sampling = class_balanced_sampling
if self.class_balanced_sampling:
# sort the data frame by labels, to allow for the runtime
# remapping of idx
assert len(self.label_type) == 1, 'Not supported more yet'
self.df.sort_values(by=self.label_type[0] + '_class', inplace=True)
self.return_unsampled_video = return_unsampled_video
if self.return_unsampled_video:
logging.warning('Make sure using batch size = 1 since '
'return_unsampled_videos is set to True.')
# store the full DF so far in df_before_subset, since I will now keep a
# subset that may be used for testing etc. df_before_subset will be
# used to get intermediate labels for L_cls etc still (even during
# visualizations sometimes I want to show that)
self.df_before_subset = self.df
if uid_subset is not None:
# Select a subset in the order of the list
self.df = self.df.iloc[pd.Index(
self.df.uid).get_indexer(uid_subset)].reset_index(drop=True)
def _compute_stats_cls_counts(self):
"""
Compute some stats that are useful, like ratio of classes etc.
"""
all_classes_counts = {}
for tname, tclasses in self.classes.items():
col_name = tname + '_class'
if col_name not in self.df:
logging.warning('Didnt find %s column in %s', col_name,
self.df)
continue
lbls = np.array(self.df.loc[:, col_name].values)
# not removing the -1 labels, it's a dict so keep all of them.
classes_counts = {
cls_id: np.sum(lbls == cls_id)
for _, cls_id in [('', -1)] + tclasses.items()
}
assert sum(classes_counts.values()) == len(self.df)
all_classes_counts[tname] = classes_counts
logging.debug('Found %s classes counts', all_classes_counts)
return all_classes_counts
@property
def classes(self) -> OrderedDict:
return OrderedDict([(tname,
operator.attrgetter(tname + '_classes')(self))
for tname in self.label_type])
@property
def classes_manyshot(self) -> OrderedDict:
"""This is subset of classes that are labeled as "many shot".
These were used in EPIC-55 for computing recall numbers. By default
using all the classes.
"""
return self.classes
@property
def class_mappings(self) -> Dict[Tuple[str, str], torch.FloatTensor]:
return {}
@property
def primary_metric(self) -> str:
"""
The primary metric for this dataset. Datasets should override this
if top1 is not the metric to be used. This is the key to the dictionary
in the func/train.py when accuracies are computed. Some of these come
from the notebook utils.
"""
return 'final_acc/action/top1'
def _get_text(self, df_row, df_key='narration'):
if df_key in df_row:
text = df_row[df_key]
else:
text = ''
return text
def _get_label_from_df_row(self, df_row, tname):
col_name = tname + '_class'
if col_name not in df_row:
lbl = self.dummy_label
else:
lbl = df_row[col_name]
return lbl
def _get_labels(self, df_row) -> OrderedDict:
labels = OrderedDict()
for tname in self.label_type:
labels[tname] = self._get_label_from_df_row(df_row, tname)
return labels
@classmethod
def _sample(cls, video_path: Path, fps: float, start: float, end: float,
df_row: pd.DataFrame, frames_per_clip: int, frame_rate: float,
sample_strategy: str, reader_fn: nn.Module,
rng: np.random.Generator):
"""
Need this since VideoClip/RandomSampler etc are not quite compatible
with this dataset. So recreating that here. Gets the full clip and
crops out a fixed size region.
Args:
video_path: The path to read the video from
fps: What this video's natural FPS is.
start, end: floats of the start and end point in seconds
Returns:
video between start', end'; info of the video
"""
start = max(start, 0) # No way can read negative time anyway
end = max(end, 0) # No way can read negative time anyway
if fps <= 0:
logging.error('Found %f FPS video => likely empty [%s].', fps,
video_path)
fps = frame_rate # So code works, will anyway return black frames
req_fps = frame_rate
if req_fps is None:
req_fps = fps
nframes = int(fps * (end - start))
frames_to_ext = int(round(frames_per_clip * (fps / req_fps)))
# Find a point in the video and crop out
if sample_strategy == SAMPLE_STRAT_RAND:
start_frame = max(nframes - frames_to_ext, 0)
if start_frame > 0:
start_frame = rng.integers(start_frame)
elif sample_strategy == SAMPLE_STRAT_CNTR:
start_frame = max((nframes - frames_to_ext) // 2, 0)
elif sample_strategy == SAMPLE_STRAT_LAST:
start_frame = max(nframes - frames_to_ext, 0)
elif sample_strategy == SAMPLE_STRAT_FIRST:
start_frame = 0
else:
raise NotImplementedError(f'Unknown {sample_strategy}')
new_start = start + max(start_frame / fps, 0)
new_end = start + max((start_frame + frames_to_ext) / fps, 0)
# Do not bleed out.. since this function could be used for anticipation
# as well
new_end = max(min(end, new_end), 0)
# Start from the beginning of the video in case anticipation made it
# go even further back
new_start = min(max(new_start, 0), new_end)
args = [str(video_path), new_start, new_end, fps, df_row]
kwargs = dict(pts_unit='sec')
outputs = reader_fn(*args, **kwargs)
video, _, info = outputs
if new_start >= new_end:
video_frame_sec = new_start * torch.ones((video.size(0), ))
else:
video_frame_sec = torch.linspace(new_start, new_end, video.size(0))
assert video_frame_sec.size(0) == video.size(0)
# Subsample the video to the req_fps
if sample_strategy == SAMPLE_STRAT_LAST:
# From the back
frames_to_keep = range(
len(video))[::-max(int(round(fps / req_fps)), 1)][::-1]
else:
# Otherwise this is fine
frames_to_keep = range(len(video))[::max(int(round(fps /
req_fps)), 1)]
# Convert video to the required fps
video_without_fps_subsample = video
video = video[frames_to_keep]
video_frame_sec = video_frame_sec[frames_to_keep]
sampled_frames = torch.LongTensor(frames_to_keep)
info['video_fps'] = req_fps
# Ideally could have done the following operations only on the
# frames_to_keep and done the above slice after, but to avoid bugs
# and ensuring reproducibility (since earlier it was done separately),
# just doing on all separately
# Pad the video with the last frame, or crop out the extra frames
# so that it is consistent with the frames_per_clip
vid_t = video.size(0)
if video.ndim != 4 or (video.size(0) * video.size(1) * video.size(2) *
video.size(3)) == 0:
# Empty clip if any of the dims are 0, corrupted file likely
logging.warning('Generating empty clip...')
video = torch.zeros((frames_per_clip, 100, 100, 3),
dtype=torch.uint8)
video_frame_sec = -torch.ones((frames_per_clip, ))
sampled_frames = torch.range(0, frames_per_clip, dtype=torch.int64)
elif vid_t < frames_per_clip:
# # Repeat the video
# video_reqfps = torch.cat([video_reqfps] *
# int(math.ceil(frames_per_clip / vid_t)),
# dim=0)
# Pad the last frame..
if sample_strategy == SAMPLE_STRAT_LAST:
# Repeat the first frame
def padding_fn(T, npad):
return torch.cat([T[:1]] * npad + [T], dim=0)
else:
# Repeat the last frame
def padding_fn(T, npad):
return torch.cat([T] + [T[-1:]] * npad, dim=0)
npad = frames_per_clip - vid_t
logging.debug('Too few frames read, padding with %d frames', npad)
video = padding_fn(video, npad)
video_frame_sec = padding_fn(video_frame_sec, npad)
sampled_frames = padding_fn(sampled_frames, npad)
if sample_strategy == SAMPLE_STRAT_LAST:
video = video[-frames_per_clip:]
video_frame_sec = video_frame_sec[-frames_per_clip:]
sampled_frames = sampled_frames[-frames_per_clip:]
else:
video = video[:frames_per_clip]
video_frame_sec = video_frame_sec[:frames_per_clip]
sampled_frames = sampled_frames[:frames_per_clip]
# TODO(rgirdhar): Resample the audio in the same way too..
return (video, video_frame_sec, video_without_fps_subsample,
sampled_frames, info)
def _get_video(self, df_row):
# While we only need the absolute path for certain reader_fns, worth
# doing it for all since some might still need it to read fps etc.
video_path = get_abs_path(self.root, df_row['video_path'])
fps = self.reader_fn.get_frame_rate(video_path)
video_dict = {}
(video, video_frame_sec, video_without_fps_subsample,
frames_subsampled,
info) = self._sample(video_path, fps, df_row['start'], df_row['end'],
df_row, self.frames_per_clip, self.frame_rate,
self.sample_strategy, self.reader_fn, self.rng)
if 'audio_fps' not in info:
# somehow this is missing is some elts.. it causes issues with
# batching... anyway not using it so this is fine
info['audio_fps'] = 0
# Assuming no temporal transformation is done here (except moving the
# dimension around), so no need to change the video_frame_sec
video = self._apply_vid_transform(video)
video_dict['video'] = video
if self.return_unsampled_video:
video_without_fps_subsample = self._apply_vid_transform(
video_without_fps_subsample)
video_dict[
'video_without_fps_subsample'] = video_without_fps_subsample
video_dict['video_frames_subsampled'] = frames_subsampled
# Using video.size(-3) since at test there is a #crops dimension too
# in the front, so from back it will always work
assert video_frame_sec.size(0) == video.size(-3), (
'nothing should have changed temporally')
video_dict['video_frame_sec'] = video_frame_sec
video_dict['video_info'] = info
if self.return_future_clips_too:
assert 'orig_start' in df_row, 'Has to be anticipation data'
nfutures = len([
el for el in df_row.keys() if el.startswith(FUTURE_PREFIX)
]) // 2 # Since start and end for each
for future_id in range(nfutures):
video_future, _, _, _, _ = self._sample(
video_path, fps,
df_row[f'{FUTURE_PREFIX}_{future_id}_start'],
df_row[f'{FUTURE_PREFIX}_{future_id}_end'], df_row,
self.frames_per_clip, self.frame_rate,
self.sample_strategy_future, self.reader_fn, self.rng)
video_future = self._apply_vid_transform(video_future)
video_dict[f'{FUTURE_PREFIX}_{future_id}_video'] = video_future
video_dict['start'] = df_row['start']
video_dict['end'] = df_row['end']
return video_dict
def _get_subclips(self, video: torch.Tensor, num_frames: int, stride: int):
"""
Args:
video (C, T, *): The original read video
num_frames: Number of frames in each clip
stride: stride to use when getting clips
Returns:
video (num_subclips, C, num_frames, *)
"""
total_time = video.size(1)
subclips = []
for i in range(0, total_time, stride):
subclips.append(video[:, i:i + num_frames, ...])
return torch.stack(subclips)
def _get_vidseg_labels(self, df_row, video_frame_sec: torch.Tensor):
"""
Args:
video_frame_sec (#clips, T): The time point each frame in the video
comes from.
"""
this_video_df = self.df_before_subset[self.df_before_subset.video_path
== df_row.video_path]
assert video_frame_sec.ndim == 2
labels = OrderedDict()
for tname in self.label_type:
labels[tname] = -torch.ones_like(video_frame_sec, dtype=torch.long)
for clip_id in range(video_frame_sec.size(0)):
for t in range(video_frame_sec[clip_id].size(0)):
cur_t = video_frame_sec[clip_id][t].tolist()
matching_rows = this_video_df[
(this_video_df.orig_start <= cur_t)
& (this_video_df.orig_end >= cur_t)]
if len(matching_rows) == 0:
continue # Nothing labeled at this point
elif len(matching_rows) > 1:
# logging.warning(
# 'Found multiple labels for a given time. '
# 'Should not happen.. overlapping labels. '
# '%f %s %s', t, df_row, matching_rows)
# Apparently ^ happens often in epic100, so lets take the
# label closest to the center
closest_row = np.argmin(
np.abs(cur_t - np.array((
(matching_rows.orig_end -
matching_rows.orig_start) / 2.0).tolist())))
matching_row = matching_rows.iloc[closest_row]
else:
matching_row = matching_rows.iloc[0]
for tname in self.label_type:
labels[tname][clip_id][t] = self._get_label_from_df_row(
matching_row, tname)
return labels
def _apply_vid_transform(self, video):
# Only apply the transform to normal videos, not if features are
# being read
if video.nelement() == 0: # Placeholder
return video
if self.transform:
assert video.ndim == 4
if video.size(1) > 1 and video.size(2) > 1:
# Normal video with spatial dimension
video = self.transform(video)
else:
# Make sure the video is in the right permutation as expected
# Esp important when video is the RULSTM features
# TxHxWxC -> CxTxHxW
# No other transformation to be applied in this case
video = video.permute(3, 0, 1, 2)
return video
def addl_df_proc_for_dense(self, df_row):
"""
This function allows processing the DF row after it is passed through
the `process_df_before_read_fn` function, so it's like 2 layers of
processing. This is a function that a specific dataset can override.
Used by HowTo100M to convert narrations to classes
"""
return df_row
def __getitem__(self, idx):
idx = self._class_balance_data_idx(idx) # Must be run before repeat
idx = self._repeat_process_idx(idx)
df_row = self.df.loc[idx, :]
if self.conv_to_anticipate_fn_runtime is not None:
df_row = hydra.utils.call(self.conv_to_anticipate_fn_runtime,
df_row, self.df, self.root,
self.addl_df_proc_for_dense)
if df_row is None:
return None
if self.process_df_before_read_fn is not None:
df_row = hydra.utils.call(self.process_df_before_read_fn, df_row,
self.root, self.rng, self.label_type,
self.frames_per_clip, self.frame_rate,
self.sample_strategy, self.dummy_label)
if df_row is None:
return None
video_dict = self._get_video(df_row)
video = video_dict['video']
orig_video_shape = video.shape
if len(orig_video_shape) == 5:
# #ncrops, C, T, H, W -- flatten first 2 dims for subclips
video = video.flatten(0, 1)
# #ncrops * C, T, H, W -> #clips, #ncrops * C, T', H, W
video = self._get_subclips(video, **self.subclips_options)
if len(orig_video_shape) == 5:
# unflatten back
video = video.reshape((video.size(0), ) + orig_video_shape[:2] +
video.shape[-3:])
video_dict['video'] = video
video_dict['video_frame_sec'] = self._get_subclips(
video_dict['video_frame_sec'].unsqueeze(0),
# squeeze(1) because the 0th dim now will be the clips
**self.subclips_options).squeeze(1)
sentence = self._get_text(df_row) # Not used at the moment
label_idx = self._get_labels(df_row)
video_dict.update({
'idx':
idx,
'text':
sentence,
'target':
label_idx,
'audio': [], # TODO?
'orig_vid_len':
df_row.video_len if 'video_len' in df_row else -1,
'uid':
df_row.uid,
})
if self.load_seg_labels:
video_dict.update({
'target_subclips':
self._get_vidseg_labels(df_row, video_dict['video_frame_sec'])
})
if self.load_long_term_future_labels > 0:
# This is only really used for visualization for now
last_frame = video_dict['video_frame_sec'][-1].item()
gap_in_frames = (video_dict['video_frame_sec'][-1].item() -
video_dict['video_frame_sec'][-2].item())
video_dict.update({
'future_subclips':
self._get_vidseg_labels(
df_row,
torch.FloatTensor([
last_frame + gap_in_frames * i
for i in range(1, self.load_long_term_future_labels +
1)
]).reshape(-1, 1))
})
return video_dict
def _repeat_process_idx(self, idx):
"""
Depending on repeat_data_times, convert to the idx to actual idx.
"""
total_len = len(self.df)
scaled_idx = idx / self.repeat_data_times
if self.repeat_data_times < 1:
# Add some jitter, since it is being mapped to a bigger space
scaled_idx += self.rng.integers(int(1 / self.repeat_data_times))
scaled_idx = int(scaled_idx)
scaled_idx %= total_len
return scaled_idx
def _class_balance_data_idx(self, idx):
"""
If asked for balanced sampling based on labels, remap the idx to try to
follow a uniform distribution over the dataset, based on classes.
This must be run before repeating the df etc, since it assumes values
based on self.df (not repeated versions) (can be done, but this is
how it's currently implememented).
"""
if not self.class_balanced_sampling:
return idx
classes_counts = OrderedDict(self.classes_counts)
# if there is > 0 elements with -1, then keep it, else remove it
if classes_counts[-1] == 0:
del classes_counts[-1]
# By equal distribution, the idx should land in this class
# Counts sorted in the same way class IDs are sorted in the DF
cls_counts = [classes_counts[i] for i in sorted(classes_counts.keys())]
cls_cumsum = np.cumsum(cls_counts).tolist()
cls_firstelt = [0] + cls_cumsum[:-1]
share_per_class = max(cls_counts)
# effective idx, given that we would have replicated each class to have
# same number of elements
new_total_len = len(cls_counts) * share_per_class
old_total_len = sum(cls_counts)
# inflation_per_idx = (new_total_len - old_total_len) // len(old_total_len)
# Any random position in the scaled up indexing space
# eff_idx = (int(idx * (new_total_len / old_total_len)) +
# self.rng.integers(inflation_per_idx))
eff_idx = int(round(idx * ((new_total_len - 1) / (old_total_len - 1))))
assert eff_idx <= new_total_len
cls_idx = eff_idx // share_per_class
new_idx = self.rng.integers(cls_firstelt[cls_idx], cls_cumsum[cls_idx])
# Make sure it doesn't go over
new_idx = new_idx % len(self.df)
return new_idx
def __len__(self):
return int(len(self.df) * self.repeat_data_times)
|
AVT-main
|
datasets/base_video_dataset.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Implementation of reader functions."""
import logging
from pathlib import Path
import torch
import torch.nn as nn
import torchvision
from common.utils import get_video_info
# An abstract class to keep track of all reader type classes
class Reader(nn.Module):
pass
class DefaultReader(Reader):
def forward(self, video_path, start, end, fps, df_row, **kwargs):
del df_row, fps # Not needed here
video_info = torchvision.io.read_video(video_path, start, end,
**kwargs)
# DEBUG see what is breaking
logging.debug('Read %s from %s', video_info[0].shape, video_path)
return video_info
@staticmethod
def get_frame_rate(video_path: Path) -> float:
return get_video_info(video_path, ['fps'])['fps']
class VideoAsLabelOnehotReader(Reader):
@staticmethod
def get_frame_rate(video_path: Path) -> float:
raise NotImplementedError('Not sure what it is here... TODO')
def forward(self,
video_path,
start,
end,
fps,
df_row,
pts_unit='sec',
num_classes=1000):
"""
Return the video as a 1-hot representation of the actual labels.
Args:
video_path
start: start time in sec
end: end time in sec
fps: frame rate of this video
df_row: The data frame row corresponding to this video. Includes
labels
num_classes: Total number of classes for the 1-hot representation.
Could just be a large number, should work too.
Returns:
video_feature of shape T x 1 x 1 x num_classes
"""
del pts_unit, video_path, start, fps
assert abs(end -
df_row['end']) < 0.1, 'For now just supporting last_clip'
labels = df_row['obs_action_class'][:, 1]
# Convert to 1-hot, TxC shape
feats = nn.functional.one_hot(torch.LongTensor(labels), num_classes)
return feats.unsqueeze(1).unsqueeze(1).float(), {}, {}
|
AVT-main
|
datasets/reader_fns.py
|
AVT-main
|
datasets/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""The Breakfast/50Salads dataset loader.
"""
from pathlib import Path
import logging
import pandas as pd
from tqdm import tqdm
import gzip
import numpy as np
import torch
import torch.nn as nn
import hydra
from hydra.types import TargetConf
from common.utils import get_video_info
from .base_video_dataset import BaseVideoDataset
from .reader_fns import Reader, DefaultReader
def load_mappings_file(fpath: Path) -> list:
"""
Read the mappings file shared by Abu farha (CVPR'18) to read the class
names.
"""
res = []
with open(fpath, 'r') as fin:
for line in fin:
res.append(line.rpartition(' ')[-1].strip())
# convert to dict
return dict(zip(res, range(len(res))))
def bundle_entry_to_video_fname_50salads(bundle_entry, root):
del root # not needed here
# remove the "rgb-"
video_id = bundle_entry.strip()[len('rgb-'):-(len('.txt'))]
video_fname = f'rgb-{video_id}.avi'
annot_fname = f'{video_id}-activityAnnotation.txt'
return video_fname, annot_fname
def read_orig_50salads_annotations(videos: list, root: Path,
action_classes: dict, annots_dir: Path,
timestamps_dir: Path):
all_segments = []
for video in videos:
video_fname, annot_fname = bundle_entry_to_video_fname_50salads(
video.strip(), root)
video_id = video.strip()[len('rgb-'):-(len('.txt'))]
ts_fpath = f'timestamps-{video_id}.txt'
frame_rate = get_video_info(Path(root) / video_fname, ['fps'])['fps']
frame_ts = []
# Read the "timestamp" of each frame
with open(Path(timestamps_dir) / ts_fpath, 'r') as fin:
for line in fin:
frame_ts.append(int(line.partition(' ')[0]))
first_start = len(frame_ts)
last_end = 0
with open(Path(annots_dir) / annot_fname, 'r') as fin:
for line in fin:
start_ts, end_ts, activity = line.split(' ')
act_pre, _, act_post = activity.strip().rpartition('_')
if not act_post in ['prep', 'core', 'post']:
# This is a coarse grained label, so ignore it
continue
label = action_classes[act_pre]
start = frame_ts.index(int(start_ts)) / frame_rate
first_start = min(first_start, start)
end = frame_ts.index(int(end_ts) + 1) / frame_rate
last_end = max(last_end, end)
all_segments.append((video_fname, start, end, label))
return all_segments
def bundle_entry_to_video_fname_breakfast(bundle_entry, root):
# remove the "rgb-"
person, camera, _, topic = bundle_entry.strip()[:-len('.txt')].split('_')
channels = ['']
if camera.startswith('stereo'):
channels = ['_ch0', '_ch1'] # ch0 is not always available
camera = 'stereo'
video_fname = f'{person}/{camera}/{person}_{topic}{{channel}}.avi'
annot_fname = f'{video_fname}.labels'
# Try both, if defined
for channel in channels:
if (Path(root) / annot_fname.format(channel=channel)).exists():
video_fname = video_fname.format(channel=channel)
annot_fname = annot_fname.format(channel=channel)
break
return video_fname, annot_fname
def read_orig_breakfast_annotations(videos: list, root: Path,
action_classes: dict):
all_segments = []
for video in videos:
video_fname, annot_fname = bundle_entry_to_video_fname_breakfast(
video.strip(), root)
# All videos are 15fps as says here:
# https://serre-lab.clps.brown.edu/resource/breakfast-actions-dataset/
# though can read out from the video if needed..
video_fps = 15
with open(Path(root) / annot_fname, 'r') as fin:
lines = [el.strip() for el in fin.readlines()]
# No longer removing SIL -- based on email conversation
# with Fadime, they keep everything, and drop any action segments
# they don't have any context for prediction; so the SIL from the
# beginning will be removed anyway.
# # Ignore the lead and end SIL (silence) segments as per
# # Sec 5.4 https://serre-lab.clps.brown.edu/wp-content/uploads/2014/05/paper_cameraReady-2.pdf (Unit recognition)
# if lines[0].endswith('SIL'):
# lines = lines[1:]
# if lines[-1].endswith('SIL'):
# lines = lines[:-1]
for line in lines:
start_end, activity = line.split(' ')
start, end = start_end.split('-')
if activity in action_classes:
label = action_classes[activity]
else:
logging.warning(
'Didnt find %s. Ignoring... Ideally '
'should merge with the next action, or '
'just use abu_farha annotations which '
'already does that.', activity)
continue
start = int(start) / video_fps
end = int(end) / video_fps
all_segments.append((video_fname, start, end, label))
return all_segments
def read_abu_farha_annotations(videos: list,
root: Path,
action_classes: dict,
annots_dir: Path,
bundle_entry_to_vname_fn: TargetConf,
frame_rate: int = None):
all_segments = []
for video in tqdm(videos, desc='Loading Abu Farha annots'):
video_fname, _ = hydra.utils.call(bundle_entry_to_vname_fn,
video.strip(), root)
if frame_rate is None:
frame_rate = get_video_info(Path(root) / video_fname,
['fps'])['fps']
with open(Path(annots_dir) / video.strip(), 'r') as fin:
cur_action = '' # dummy, will fire to insert action first
for lno, line in enumerate(fin):
if line == cur_action:
# Update the end time
# Using lno + 1 to avoid any gaps between the clips, which
# would lead to the -1 clips, making it harder for the
# model to learn
# Update the last added segment's end time point to this
# frame
all_segments[-1][-2] = (lno + 1) / frame_rate
continue
# Else a new action is starting, add to the segments
cur_action = line
label = action_classes[cur_action.strip()]
all_segments.append([
video,
video_fname,
lno / frame_rate, # start
(lno + 1) / frame_rate, # end
label,
])
return all_segments
def init_df(bundle_fpath: Path, annot_reader_fn: TargetConf, root: Path,
action_classes: dict):
with open(bundle_fpath, 'r') as fin:
videos = fin.readlines()
# Remove the "#bundle.txt" line from top
assert videos[0].startswith('#')
videos = videos[1:]
all_segments = hydra.utils.call(annot_reader_fn,
videos,
root,
action_classes,
_recursive_=False)
dataframe = pd.DataFrame(all_segments,
columns=[
'video_bundle_name', 'video_path', 'start',
'end', 'action_class'
])
dataframe = dataframe.astype(dtype={
'start': 'float16',
'end': 'float16',
'video_path': 'object',
})
return dataframe
class Breakfast50Salads(BaseVideoDataset):
"""Wrapper for Univ of Dundee 50Salads, or Bonn Breakfast dataset."""
def __init__(
self,
which: str, # specify which of BF or 50S
root: Path,
splits_dir: Path,
classes_fpath: Path,
is_train: bool = True,
fold: int = 1,
annot_reader_fn: TargetConf = None,
**kwargs):
bundle_fpath = (
Path(splits_dir) /
f'{"train" if is_train else "test"}.split{fold}.bundle')
self.which = which
if self.which == '50Salads':
assert 1 <= fold <= 5
elif self.which == 'Breakfast':
assert 1 <= fold <= 4
else:
raise NotImplementedError(f'Unknown type {which}')
action_classes = load_mappings_file(classes_fpath)
dataframe = init_df(bundle_fpath, annot_reader_fn, root,
action_classes)
kwargs['action_classes'] = action_classes
kwargs['label_type'] = 'action'
super().__init__(dataframe, root=root, **kwargs)
class FormatReader(nn.Module):
pass
class GZFormatReader(FormatReader):
def forward(self, path, start_frame, end_frame):
feats = []
with gzip.open(str(path).replace('.txt', '.gz'), 'r') as fin:
for lno, line in enumerate(fin):
if lno >= start_frame and lno <= end_frame:
feats.append(
[float(el) for el in line.strip().split(b' ')])
feats = torch.FloatTensor(feats)
return feats
class NPYFormatReader(FormatReader):
def forward(self, path, start_frame, end_frame):
feats = np.load(str(path).replace('.txt', '.npy'))
start_frame = max(start_frame, 0)
end_frame = min(end_frame, feats.shape[1])
feats_subset = feats[:, start_frame:(end_frame + 1)]
return torch.from_numpy(feats_subset.transpose())
class SenerFeatsReader(Reader):
def __init__(self, feat_dir: Path, format_reader: FormatReader):
super().__init__()
self.feat_dir = Path(feat_dir)
# No need to init the reader again, will be done recursively
self.format_reader = format_reader
def get_frame_rate(self, *args, **kwargs) -> float:
# Use the actual frame rate, since I guess that's what is used in the
# Abu Farha annotations, which is what the features here correspond
# to as well.
return DefaultReader.get_frame_rate(*args, **kwargs)
def forward(self,
video_path: Path,
start_sec: float,
end_sec: float,
fps: float,
df_row: pd.DataFrame,
pts_unit='sec'):
"""
Returns:
feats: (T, 1, 1, C) -- features shaped like a video
"""
del pts_unit, video_path # Not supported here
vidname = df_row['video_bundle_name'].strip()
start_frame = int(round(start_sec * fps - 1))
end_frame = int(round(end_sec * fps - 1))
feats = self.format_reader(self.feat_dir / vidname, start_frame,
end_frame)
return feats.unsqueeze(1).unsqueeze(1), {}, {}
|
AVT-main
|
datasets/breakfast_50salads.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import os
import torch
from importlib import import_module
from tqdm import tqdm
import omegaconf
import hydra
from common import utils
__all__ = [
"get_dataset",
]
def get_dataset(dataset_cfg, data_cfg, transform, logger):
# If there is _precomputed_metadata file passed in, load that in
kwargs = {}
precomp_metadata_fpath = None
if '_precomputed_metadata_file' in dataset_cfg:
precomp_metadata_fpath = dataset_cfg._precomputed_metadata_file
# Remove from the config since otherwise can't init the obj
with omegaconf.open_dict(dataset_cfg):
del dataset_cfg['_precomputed_metadata_file']
if os.path.exists(precomp_metadata_fpath):
_precomputed_metadata = torch.load(precomp_metadata_fpath)
kwargs['_precomputed_metadata'] = _precomputed_metadata
kwargs['transform'] = transform
kwargs['frame_rate'] = data_cfg.frame_rate
kwargs['frames_per_clip'] = data_cfg.num_frames
# Have to call dict() here since relative interpolation somehow doesn't
# work once I get the subclips object
kwargs['subclips_options'] = dict(data_cfg.subclips)
kwargs['load_seg_labels'] = data_cfg.load_seg_labels
logger.info('Creating the dataset object...')
# Not recursive since many of the sub-instantiations would need positional
# arguments
_dataset = hydra.utils.instantiate(dataset_cfg,
_recursive_=False,
**kwargs)
try:
logger.info('Computing clips...')
_dataset.video_clips.compute_clips(data_cfg.num_frames,
1,
frame_rate=data_cfg.frame_rate)
logger.info('Done')
except AttributeError: # if video_clips not in _dataset
logger.warning('No video_clips present')
logger.info(f'Created dataset with {len(_dataset)} elts')
if precomp_metadata_fpath and not os.path.exists(precomp_metadata_fpath):
utils.save_on_master(_dataset.metadata, precomp_metadata_fpath)
return _dataset
|
AVT-main
|
datasets/data.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Implementation of the future features prediction models.
Input: (B, C)
Output: (B, C)
"""
import torch
import torch.nn as nn
import transformers
import logging
import hydra
from common.cluster import KmeansAssigner
class Identity(nn.Module):
"""Wrapper around the Identity fn to drop target_shape etc."""
def __init__(self, in_features):
super().__init__()
self.in_features = in_features
def forward(self, feats, target_shape=None):
del target_shape # not needed here
return feats, feats, {}, {}
@property
def output_dim(self):
return self.in_features
class MLP(nn.Module):
def __init__(self, in_features, num_layers=2):
super().__init__()
self.in_features = in_features
layers = [[nn.Linear(in_features, in_features),
nn.ReLU(inplace=True)] for _ in range(num_layers)]
# Flatten, remove the last ReLU, and create a sequential
self.model = nn.Sequential(
*([item for sublist in layers for item in sublist][:-1]))
def forward(self, feats, target_shape=None):
del target_shape
return feats, self.model(feats), {}, {}
@property
def output_dim(self):
return self.in_features
class AVTh(nn.Module):
"""AVT head architecture."""
def __init__(
self,
in_features: int,
output_len: int = -1,
output_len_eval: int = -1, # Same as output_len, used during eval
avg_last_n: int = -1,
inter_dim: int = 768,
future_pred_loss: hydra.types.TargetConf = None,
return_past_too: bool = False,
drop_last_n: int = 0,
quantize_before_rollout: bool = False,
# This is only relevant when in_features=1 and input is
# clustered, or if on the fly cluster assgn is requested
assign_to_centroids: str = None,
num_cluster_centers: int = 50000,
freeze_encoder_decoder: bool = False,
**kwargs):
super().__init__()
self.assign_to_centroids = assign_to_centroids
if self.assign_to_centroids:
# Since we will be assign the features
assert in_features != 1
self.assigner = KmeansAssigner(assign_to_centroids)
assert self.assigner.num_clusters == num_cluster_centers
if in_features == 1 or assign_to_centroids:
self.encoder = nn.Embedding(num_cluster_centers, inter_dim)
else:
self.encoder = nn.Linear(in_features, inter_dim, bias=False)
self.decoder = nn.Linear(inter_dim, in_features, bias=False)
# If encoder is an embedding, then tie up the weights
if isinstance(self.encoder, nn.Embedding):
self.decoder.weight = self.encoder.weight
if freeze_encoder_decoder:
self.encoder.weight.requires_grad = False
self.decoder.weight.requires_grad = False
# This already has the LayerNorm inside residual, as Naman suggested.
self.gpt_model = transformers.GPT2Model(
transformers.GPT2Config(n_embd=inter_dim,
vocab_size=in_features,
use_cache=True,
**kwargs))
# Not needed, encoder will take care of it.
del self.gpt_model.wte
self.output_len = output_len
self.output_len_eval = output_len_eval
self.avg_last_n = avg_last_n
self.inter_dim = inter_dim
self.in_features = in_features
if future_pred_loss is not None:
self.future_pred_loss = hydra.utils.instantiate(future_pred_loss,
reduction='none')
else:
self.future_pred_loss = None
self.return_past_too = return_past_too
self.drop_last_n = drop_last_n
# Set this, if want to quantize the prediction (using top-1) and
# re-encode, as opposed to using the soft predicted feature
self.quantize_before_rollout = quantize_before_rollout
def forward(self, feats, target_shape):
"""
Args:
feats: tensor of shape (B, T, C)
target_shape: shape of the output (B, T', n_output)
"""
addl_endpoints = {}
if feats.ndim == 2:
# add back the temporal dimension, which was likely mean pooled
feats = feats.unsqueeze(1)
# Decide the output len based on the target_shape
if len(target_shape) == 3:
output_len = target_shape[1]
elif self.training or self.output_len_eval < 0:
# If training mode or output_len for eval has not been set
output_len = self.output_len
else: # eval mode
output_len = self.output_len_eval
# Keep track
full_inp_feats = feats
if self.assign_to_centroids:
# Unsqueeze only to be compatible with the 1 channel inputs -- that
# will get squeezed out later
feats = self.assigner(feats).unsqueeze(-1)
# The time dimension in already in the middle -> B, T, C
# That's what huggingface version needs:
# (batch_size, sequence_length, hidden_size)
if self.in_features == 1 or self.assign_to_centroids:
# This is a quantized input, so cast it to long, and remove the
# last singleton dimension
assert feats.size(-1) == 1
feats = feats.squeeze(-1).long()
# Keep only the first N, this is used when the model is given
# input more frames than it should be using for prediction. The other
# future is used to incur loss during training, but shouldn't otherwise
# be used, so dropping those features
full_orig_feats = feats
inp_feats = full_inp_feats
if self.drop_last_n != 0:
logging.warning('This should be used very carefully, ideally only '
'for debugging. The padding can lead to some '
'frames from the actual clip to leak into the '
'past clip, even after dropping last n. So even '
'after dropping the model might end up seeing '
'frames that are beyond the tau_a.')
feats = feats[:, :-self.drop_last_n]
inp_feats = inp_feats[:, :-self.drop_last_n]
# Keep track
orig_feats_len = feats.size(1)
# Reduce the dimensionality, since not using the GPT encoding matrix,
# since I don't have a "token" representation
feats = self.encoder(feats)
orig_feats_encoded = feats
past = None
all_outputs = []
all_outputs_decoded = []
for output_id in range(output_len):
pred_so_far = sum([el.size(1) for el in all_outputs])
position_ids = torch.arange(pred_so_far,
pred_so_far + feats.size(1),
dtype=torch.long,
device=feats.device)
# The past output will encode the previous past AND the new input
# (you can check the output, it keeps increasing)
# Got this from
# https://huggingface.co/transformers/quickstart.html#using-the-past
outputs = self.gpt_model(inputs_embeds=feats,
past_key_values=past,
position_ids=position_ids)
last_hidden_state = outputs.last_hidden_state
past = outputs.past_key_values
all_outputs.append(last_hidden_state)
# For visualization later, if output_attentions was passed into gpt
if outputs.attentions is not None:
# dimensions will be (batch_size, nlayers, nheads, seqlen, seqlen)
addl_endpoints[f'gpt2_att_{output_id}'] = torch.stack(
outputs.attentions).transpose(0, 1)
# Map back to the original feature dimension
all_outputs_decoded.append(self.decoder(last_hidden_state))
# hidden_states[-1] or last_hidden_state is the embedding from the
# final layer. Not using logits (earlier was using the LMHead model
# that returned logits) since that is already decoded to vocab size
# and I want to have control over the weights of that final matrix
# Also, the input for the next would be encodings, so need to
# access the encodings directly
if self.quantize_before_rollout:
assert isinstance(self.encoder, nn.Embedding)
feats = self.encoder(
all_outputs_decoded[-1][:, -1:, :].argmax(dim=-1))
else:
feats = last_hidden_state[:, -1:, :]
all_outputs = torch.cat(all_outputs, dim=1)
all_outputs_decoded = torch.cat(all_outputs_decoded, dim=1)
# Compute a loss on future prediction (teacher forced)
losses = {}
if self.future_pred_loss is not None:
num_elts_for_loss = min(full_orig_feats.size(1),
all_outputs_decoded.size(1))
losses = {
'feat':
self.future_pred_loss(
all_outputs_decoded[:, :num_elts_for_loss - 1],
full_orig_feats[:, 1:num_elts_for_loss])
}
# Set all_output as the final output features, and prev as the
# structure to use to get the original features of past
if self.in_features == 1:
prev = orig_feats_encoded
# all_outputs contains the hidden states, the best we will get
# anyway, so that doesn't change
elif self.assign_to_centroids:
prev = inp_feats # For this, I have the orig feats, so use that
# For prediction, use the predicted cluster centers, but use
# features from the original kmeans, not what the embeddings
# that were learnt.. it didn't work with them
all_outputs = self.assigner(all_outputs_decoded.argmax(dim=-1))
else:
prev = inp_feats
all_outputs = all_outputs_decoded
# Return the actual predictions
if self.return_past_too:
# Pad in the GT past (no point using the predicted past when
# we have the actual past)
final = torch.cat((prev, all_outputs[:, orig_feats_len - 1:, :]),
dim=1)
elif output_len > 0:
final = all_outputs[:, -output_len:]
else:
final = all_outputs
if self.avg_last_n > 0:
final = torch.mean(final[:, -self.avg_last_n:, :], dim=1)
# compute the past feature.
assert prev.size(1) == orig_feats_len, (
'If not, need to figure how to deal')
# Now keep the old feature for the first one, and return the predicted
# features shifted by 1 for the rest -- which are as predicted by
# GPT
updated_past_feat = torch.cat(
[prev[:, :1, :], all_outputs[:, :(orig_feats_len - 1)]], dim=1)
return updated_past_feat, final, losses, addl_endpoints
@property
def output_dim(self):
if self.in_features == 1:
return self.inter_dim # since it will return encoded features
# else, it will decode it back to the original feat dimension
return self.in_features
|
AVT-main
|
models/future_prediction.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Implementation of the temporal aggregation algorithms.
Input: (B, C, T)
Output: (B, C)
"""
import math
import torch
import torch.nn as nn
import logging
import warnings
try:
from external.rulstm.RULSTM.models import RULSTM
except ImportError:
RULSTM = object
logging.warning('No RULSTM found.')
class Identity(nn.Identity):
def __init__(self, in_features):
super().__init__()
self.in_features = in_features
def forward(self, *args, **kwargs):
return super().forward(*args, **kwargs), {}
@property
def output_dim(self):
return self.in_features
class Mean(nn.Module):
def __init__(self, in_features):
super().__init__()
self.in_features = in_features
def forward(self, feats):
"""
feats: B, T, C dimensional input
"""
return torch.mean(feats, dim=1), {}
@property
def output_dim(self):
return self.in_features
class PositionalEncoding(nn.Module):
"""For now, just using simple pos encoding from language.
https://pytorch.org/tutorials/beginner/transformer_tutorial.html
"""
def __init__(self, d_model, dropout=0.1, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() *
(-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class Transformer(nn.Module):
""" Using a transformer encoder and simple decoder. """
def __init__(self,
in_features,
inter_rep=512,
nheads=8,
nlayers=6,
agg_style='mean',
cloze_loss_ratio=0.0,
cloze_loss_wt=0.0):
super().__init__()
self.in_features = in_features
self.inter_rep = inter_rep
self.downproject = nn.Linear(in_features, inter_rep)
layer = nn.TransformerEncoderLayer(d_model=inter_rep, nhead=nheads)
# Don't think I'll ever consider longer than 1000 features?
self.pos_encoder = PositionalEncoding(inter_rep, max_len=1000)
self.transformer_encoder = nn.TransformerEncoder(
layer, num_layers=nlayers, norm=nn.LayerNorm(inter_rep))
self.agg_style = agg_style
self.cloze_loss_ratio = cloze_loss_ratio
self.cloze_loss_wt = cloze_loss_wt
self.cloze_loss_fn = nn.MSELoss(reduction='none')
# The embedding for the [MASK] token
if self.cloze_loss_ratio > 0:
self.extra_embeddings = nn.Embedding(1, in_features)
def forward(self, feats):
"""
Args:
feats (B, T, C)
Returns:
aggregated features (B, C')
"""
# Convert to the format used by transformer: T, B, C
feats = feats.transpose(0, 1)
kwargs = {}
if self.training and self.cloze_loss_ratio > 0:
# Mask out certain positions, so when doing attention these
# positions will be ignored
key_padding_mask = torch.rand((feats.size(0), feats.size(1)),
device=feats.device)
# Get close_ratio amount as True, so those will be ignored
key_padding_mask = key_padding_mask <= self.cloze_loss_ratio
# Set the features to MASK embedding, for the ones that are masked
key_padding_mask_rep = key_padding_mask.unsqueeze(-1).expand(
-1, -1, feats.size(2))
# Set the masked elements to 0, and add the MASK embedding
replaced_feats = (
feats * (~key_padding_mask_rep) +
key_padding_mask_rep * self.extra_embeddings(
torch.tensor([0], dtype=torch.long,
device=feats.device)).unsqueeze(0))
feats = replaced_feats
# Transpose since the function takes in B, T
kwargs['src_key_padding_mask'] = key_padding_mask.t()
feats = self.pos_encoder(self.downproject(feats))
feats_encoded = self.transformer_encoder(feats, **kwargs)
aux_losses = {}
if self.training and self.cloze_loss_ratio > 0:
dist = self.cloze_loss_fn(feats_encoded, feats)
dist_masked_elts = self.cloze_loss_wt * torch.mean(
torch.mean(dist, dim=-1) * key_padding_mask)
aux_losses['tx_mlm'] = dist_masked_elts
if self.agg_style == 'mean':
res = torch.mean(feats_encoded, dim=[0])
elif self.agg_style == 'last':
res = feats_encoded[-1]
else:
raise NotImplementedError(f'Unknown agg style {self.agg_style}')
return res, aux_losses
@property
def output_dim(self):
return self.inter_rep
class RULSTMAggregation(RULSTM):
def __init__(self,
in_features: int,
intermediate_featdim: int = 1024,
dropout: float = 0.8,
num_pad_feats: int = 0):
"""
Args:
num_pad_feats (int): Pad the features with zero feats for this
many times on the time axis. This is because the unrolling
LSTM unrolls forward as many times as input, and since original
models were trained for 14 steps unrolling (upto 0.25s
before the action), and I usually test for 11 steps (1s before
action), need to pad 3 times to get the same output when
testing pre-trained models.
"""
super().__init__(1, in_features, intermediate_featdim, dropout)
# Remove the classifier, since the outside code will deal with that
self.classifier = nn.Sequential()
self.output_dim = intermediate_featdim
self.num_pad_feats = num_pad_feats
# Ignore warnings because it UserWarning: RNN module weights are not
# part of single contiguous chunk of memory. This means they need to be
# compacted at every call, possibly greatly increasing memory usage.
# To compact weights again call flatten_parameters().
# Not sure how to fix this, adding the flatten didn't really fix
# Happens only with DataParallel, not DDP
# Using https://github.com/pytorch/pytorch/issues/24155#issuecomment-604474511
# Just ignoring the warning
warnings.filterwarnings('ignore')
def forward(self, feats):
"""
Args:
feats (B, T, C)
Returns:
aggregated (B, C)
"""
if self.num_pad_feats > 0:
empty_feats = torch.zeros(
(feats.size(0), self.num_pad_feats, feats.size(-1)),
dtype=feats.dtype,
device=feats.device)
feats = torch.cat([feats, empty_feats], dim=1)
res = super().forward(feats)
# Return output corresponding to the last input frame. Note that in
# original RULSTM they do -4 since they predict 3 steps further into
# the anticipation time, whereas I stop when the anticipation time
# starts here.
# Subtract num_pad_feat as that would mean it predicted further into
# the future
return res[:, -1 - self.num_pad_feats, :], {}
|
AVT-main
|
models/temporal_aggregation.py
|
AVT-main
|
models/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Model architectures.
"""
import torch.nn as nn
from torchvision.models.video.resnet import (
BasicBlock,
Bottleneck,
R2Plus1dStem,
_video_resnet,
)
from pretrainedmodels import bninception
import timm
__all__ = [
'r2plus1d_34',
'r2plus1d_152',
'ir_csn_152',
'ip_csn_152',
'ip_csn_50',
'BNInceptionVideo',
]
class BasicStem_Pool(nn.Sequential):
def __init__(self):
super(BasicStem_Pool, self).__init__(
nn.Conv3d(
3,
64,
kernel_size=(3, 7, 7),
stride=(1, 2, 2),
padding=(1, 3, 3),
bias=False,
),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True),
nn.MaxPool3d(kernel_size=(1, 3, 3),
stride=(1, 2, 2),
padding=(0, 1, 1)),
)
class Conv3DDepthwise(nn.Conv3d):
def __init__(self,
in_planes,
out_planes,
midplanes=None,
stride=1,
padding=1):
assert in_planes == out_planes
super(Conv3DDepthwise, self).__init__(
in_channels=in_planes,
out_channels=out_planes,
kernel_size=(3, 3, 3),
stride=stride,
padding=padding,
groups=in_planes,
bias=False,
)
@staticmethod
def get_downsample_stride(stride):
return (stride, stride, stride)
class IPConv3DDepthwise(nn.Sequential):
def __init__(self, in_planes, out_planes, midplanes, stride=1, padding=1):
assert in_planes == out_planes
super(IPConv3DDepthwise, self).__init__(
nn.Conv3d(in_planes, out_planes, kernel_size=1, bias=False),
nn.BatchNorm3d(out_planes),
# nn.ReLU(inplace=True),
Conv3DDepthwise(out_planes, out_planes, None, stride),
)
@staticmethod
def get_downsample_stride(stride):
return (stride, stride, stride)
class Conv2Plus1D(nn.Sequential):
def __init__(self, in_planes, out_planes, midplanes, stride=1, padding=1):
midplanes = (in_planes * out_planes * 3 * 3 *
3) // (in_planes * 3 * 3 + 3 * out_planes)
super(Conv2Plus1D, self).__init__(
nn.Conv3d(
in_planes,
midplanes,
kernel_size=(1, 3, 3),
stride=(1, stride, stride),
padding=(0, padding, padding),
bias=False,
),
nn.BatchNorm3d(midplanes),
nn.ReLU(inplace=True),
nn.Conv3d(
midplanes,
out_planes,
kernel_size=(3, 1, 1),
stride=(stride, 1, 1),
padding=(padding, 0, 0),
bias=False,
),
)
@staticmethod
def get_downsample_stride(stride):
return (stride, stride, stride)
def _set_bn_params(model, bn_eps=1e-3, bn_mom=0.1):
"""
Set the BN parameters to the defaults: Du's models were trained
with 1e-3 and 0.9 for eps and momentum resp.
Ref: https://github.com/facebookresearch/VMZ/blob/f4089e2164f67a98bc5bed4f97dc722bdbcd268e/lib/models/r3d_model.py#L208
"""
for module in model.modules():
if isinstance(module, nn.BatchNorm3d):
module.eps = bn_eps
module.momentum = bn_mom
def r2plus1d_34(pretrained=False,
progress=False,
bn_eps=1e-3,
bn_mom=0.1,
**kwargs):
model = _video_resnet("r2plus1d_34",
False,
False,
block=BasicBlock,
conv_makers=[Conv2Plus1D] * 4,
layers=[3, 4, 6, 3],
stem=R2Plus1dStem,
**kwargs)
_set_bn_params(model, bn_eps, bn_mom)
return model
def r2plus1d_152(pretrained=False,
progress=False,
bn_eps=1e-3,
bn_mom=0.1,
**kwargs):
model = _video_resnet("r2plus1d_152",
False,
False,
block=Bottleneck,
conv_makers=[Conv2Plus1D] * 4,
layers=[3, 8, 36, 3],
stem=R2Plus1dStem,
**kwargs)
_set_bn_params(model, bn_eps, bn_mom)
return model
def ir_csn_152(pretrained=False,
progress=False,
bn_eps=1e-3,
bn_mom=0.1,
**kwargs):
model = _video_resnet("ir_csn_152",
False,
False,
block=Bottleneck,
conv_makers=[Conv3DDepthwise] * 4,
layers=[3, 8, 36, 3],
stem=BasicStem_Pool,
**kwargs)
_set_bn_params(model, bn_eps, bn_mom)
return model
def ip_csn_152(pretrained=False,
progress=False,
bn_eps=1e-3,
bn_mom=0.1,
**kwargs):
model = _video_resnet("ip_csn_152",
False,
False,
block=Bottleneck,
conv_makers=[IPConv3DDepthwise] * 4,
layers=[3, 8, 36, 3],
stem=BasicStem_Pool,
**kwargs)
_set_bn_params(model, bn_eps, bn_mom)
return model
def ip_csn_50(pretrained=False,
progress=False,
bn_eps=0.3,
bn_mom=0.1,
**kwargs):
model = _video_resnet("ip_csn_50",
False,
False,
block=Bottleneck,
conv_makers=[IPConv3DDepthwise] * 4,
layers=[3, 8, 6, 3],
stem=BasicStem_Pool,
**kwargs)
_set_bn_params(model, bn_eps, bn_mom)
return model
def process_each_frame(model, video, *args, **kwargs):
"""
Pass in each frame separately
Args:
video (B, C, T, H, W)
Returns:
feats: (B, C', T, 1, 1)
"""
batch_size = video.size(0)
time_dim = video.size(2)
video_flat = video.transpose(1, 2).flatten(0, 1)
feats_flat = model(video_flat, *args, **kwargs)
return feats_flat.view((batch_size, time_dim) +
feats_flat.shape[1:]).transpose(
1, 2).unsqueeze(-1).unsqueeze(-1)
class FrameLevelModel(nn.Module):
"""Runs a frame level model on all the frames."""
def __init__(self, num_classes: int, model: nn.Module = None):
del num_classes
super().__init__()
self.model = model
def forward(self, video, *args, **kwargs):
return process_each_frame(self.model, video, *args, **kwargs)
class BNInceptionVideo(FrameLevelModel):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.model = bninception(*args, **kwargs)
self.model.last_linear = nn.Identity()
self.model.global_pool = nn.AdaptiveAvgPool2d(1)
class TIMMModel(FrameLevelModel):
def __init__(self,
num_classes,
model_type='vit_base_patch16_224',
drop_cls=True):
super().__init__(num_classes)
model = timm.create_model(model_type,
num_classes=0 if drop_cls else num_classes)
self.model = model
|
AVT-main
|
models/video_classification.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
The overall base model.
"""
from typing import Dict, Tuple
import operator
import torch
import torch.nn as nn
import hydra
from omegaconf import OmegaConf
CLS_MAP_PREFIX = 'cls_map_'
PAST_LOGITS_PREFIX = 'past_'
class BaseModel(nn.Module):
def __init__(self, model_cfg: OmegaConf, num_classes: Dict[str, int],
class_mappings: Dict[Tuple[str, str], torch.FloatTensor]):
super().__init__()
# Takes as input (B, T, H, W, C) -> (B, T', H', W', C')
_backbone_full = hydra.utils.instantiate(
model_cfg.backbone,
# Add dummy value for num_cls
# will be removed next anyway
num_classes=1)
if model_cfg.backbone_last_n_modules_to_drop > 0:
self.backbone = nn.Sequential()
for name, child in list(_backbone_full.named_children(
))[:-model_cfg.backbone_last_n_modules_to_drop]:
self.backbone.add_module(name, child)
else:
self.backbone = _backbone_full
# Map the (B, T', H', W', C') -> (B, T', H', W', C*)
# to the intermediate feature dimensions
# IMP: this is only used if C' != C*
if (model_cfg.backbone_last_n_modules_to_drop == 0
and 'output_dim' in dir(self.backbone)):
backbone_dim = self.backbone.output_dim
else:
backbone_dim = model_cfg.backbone_dim # TODO: Figure automatically
self.mapper_to_inter = None
if model_cfg.intermediate_featdim is None:
model_cfg.intermediate_featdim = backbone_dim
if backbone_dim != model_cfg.intermediate_featdim:
self.mapper_to_inter = nn.Linear(backbone_dim,
model_cfg.intermediate_featdim,
bias=False)
# Takes as input (B, T', H', W', C*) -> (B, C**)
self.temporal_aggregator = hydra.utils.instantiate(
model_cfg.temporal_aggregator,
in_features=model_cfg.intermediate_featdim)
self.reset_temp_agg_feat_dim = nn.Sequential()
temp_agg_output_dim = self.temporal_aggregator.output_dim
if model_cfg.same_temp_agg_dim and (temp_agg_output_dim !=
model_cfg.intermediate_featdim):
# Ideally want to maintain it so that the same project_mlp
# can be used for the temporally aggregated features, or the
# original features.
self.reset_temp_agg_feat_dim = nn.Linear(
temp_agg_output_dim, model_cfg.intermediate_featdim)
temp_agg_output_dim = model_cfg.intermediate_featdim
# Transforms the current features to future ones
# (B, C**) -> (B, C**)
self.future_predictor = hydra.utils.instantiate(
model_cfg.future_predictor,
in_features=temp_agg_output_dim,
_recursive_=False)
# Projection layer
self.project_mlp = nn.Sequential()
if model_cfg.project_dim_for_nce is not None:
self.project_mlp = nn.Sequential(
nn.Linear(temp_agg_output_dim, temp_agg_output_dim),
nn.ReLU(inplace=True),
nn.Linear(temp_agg_output_dim, model_cfg.project_dim_for_nce))
# 2nd round of temporal aggregation, if needed
self.temporal_aggregator_after_future_pred = hydra.utils.instantiate(
model_cfg.temporal_aggregator_after_future_pred,
self.future_predictor.output_dim)
# Dropout
self.dropout = nn.Dropout(model_cfg.dropout)
# Takes as input (B, C**) -> (B, num_classes)
cls_input_dim = self.temporal_aggregator_after_future_pred.output_dim
# Make a separate classifier for each output
self.classifiers = nn.ModuleDict()
self.num_classes = num_classes
for i, (cls_type, cls_dim) in enumerate(num_classes.items()):
if model_cfg.use_cls_mappings and i > 0:
# In this case, rely on the class mappings to generate the
# other predictions, rather than creating a new linear layer
break
self.classifiers.update({
cls_type:
hydra.utils.instantiate(model_cfg.classifier,
in_features=cls_input_dim,
out_features=cls_dim)
})
# Store the class mappings as buffers
for (src, dst), mapping in class_mappings.items():
self.register_buffer(f'{CLS_MAP_PREFIX}{src}_{dst}', mapping)
self.regression_head = None
if model_cfg.add_regression_head:
self.regression_head = nn.Linear(cls_input_dim, 1)
# Init weights, as per the video resnets
self._initialize_weights()
# Set he BN momentum and eps here, Du uses a different value and its imp
self._set_bn_params(model_cfg.bn.eps, model_cfg.bn.mom)
self.cfg = model_cfg
def _initialize_weights(self):
# Copied over from
# https://github.com/pytorch/vision/blob/75f5b57e680549d012b3fc01b356b2fb92658ea7/torchvision/models/video/resnet.py#L261
# Making sure all layers get init to good video defaults
for m in self.modules():
if isinstance(m, nn.Conv3d):
nn.init.kaiming_normal_(m.weight,
mode='fan_out',
nonlinearity='relu')
if m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm3d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
nn.init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def _set_bn_params(self, bn_eps=1e-3, bn_mom=0.1):
"""
Set the BN parameters to the defaults: Du's models were trained
with 1e-3 and 0.9 for eps and momentum resp.
Ref: https://github.com/facebookresearch/VMZ/blob/f4089e2164f67a98bc5bed4f97dc722bdbcd268e/lib/models/r3d_model.py#L208
"""
for module in self.modules():
if isinstance(module, nn.BatchNorm3d):
module.eps = bn_eps
module.momentum = bn_mom
def forward_singlecrop(self, video, target_shape=None):
"""
Args:
video (torch.Tensor, Bx#clipsxCxTxHxW)
target_shape: The shape of the target. Some of these layers might
be able to use this information.
"""
outputs = {}
aux_losses = {}
batch_size = video.size(0)
num_clips = video.size(1)
# Fold the clips dimension into the batch for feature extraction, upto
# temporal aggregation
video = video.flatten(0, 1)
feats = self.backbone(video)
outputs['backbone'] = feats
# Spatial mean
feats = torch.mean(feats, [-1, -2])
# store temporal mean as well
outputs['backbone_mean'] = torch.mean(feats, [-1])
# If it's not sequential and can be applied here
if len(self.project_mlp) > 0 and (outputs['backbone_mean'].size(-1) ==
self.project_mlp[0].in_features):
outputs['backbone_mean_projected'] = self.project_mlp(
outputs['backbone_mean'])
# Move the time dimension inside: B,C,T -> B,T,C
feats = feats.permute((0, 2, 1))
# Map the feats to intermediate dimension, that rest of the code
# will operate on. Only if the original feature is not already
if feats.shape[-1] != self.cfg.intermediate_featdim:
assert self.mapper_to_inter is not None, (
f'The backbone feat does not match intermediate {feats.shape} '
f'and {self.cfg.intermediate_featdim}. Please set '
f'model.backbone_dim correctly.')
feats = self.mapper_to_inter(feats)
feats_agg, agg_losses = self.temporal_aggregator(feats)
aux_losses.update(agg_losses)
feats_agg = self.reset_temp_agg_feat_dim(feats_agg)
outputs['temp_agg'] = feats_agg
# For the contrastive loss, I need a projected version of this feature
outputs['temp_agg_projected'] = self.project_mlp(feats_agg)
# Now before future prediction, need to unfold the clips back out,
# and concat on the temporal dimension
if num_clips > 1:
assert (
(feats_agg.ndim == 2)
or (feats_agg.ndim == 3 and feats_agg.size(1) == 1)
), ('Should be using some temporal aggregation when using clips')
feats_agg = feats_agg.reshape((batch_size, num_clips) +
feats_agg.shape[1:])
if feats_agg.ndim == 4:
feats_agg = torch.flatten(feats_agg, 1, 2)
# now feats_agg back to 3D (B, T, F)
feats_past = feats_agg
# Now the future prediction, also it might update the past features
# like the GPT style models would
(feats_past, feats_future, future_losses,
endpoints) = self.future_predictor(feats_past, target_shape)
aux_losses.update(future_losses)
outputs.update(endpoints)
outputs['future'] = feats_future
outputs['past'] = feats_past
# Apply a classifier on the past features, might be supervising that
if self.cfg.classifier_on_past:
feats_past_drop = self.dropout(feats_past)
outputs.update(
self._apply_classifier(feats_past_drop,
outputs_prefix=PAST_LOGITS_PREFIX))
# For the contrastive loss, I need a projected version of this feature
outputs['future_projected'] = self.project_mlp(feats_agg)
# Aggregate again, if asked for
feats_future_agg, future_agg_losses = (
self.temporal_aggregator_after_future_pred(feats_future))
aux_losses.update(future_agg_losses)
outputs['future_agg'] = feats_future_agg
feats_future_agg_drop = self.dropout(feats_future_agg)
outputs.update(self._apply_classifier(feats_future_agg_drop))
if self.regression_head:
outputs['logits_regression'] = self.regression_head(
feats_future_agg_drop)
return outputs, aux_losses
def _apply_classifier(self, input_feat, outputs_prefix=''):
outputs = {}
for key in self.num_classes.keys():
if key in self.classifiers:
outputs[f'{outputs_prefix}logits/{key}'] = self.classifiers[
key](input_feat)
else:
# A mapping must exist, in order to compute this, and must
# have been computed already (so ordering in the config
# matters)
src_key = next(iter(self.classifiers.keys()))
src_tensor = outputs[f'{outputs_prefix}logits/{src_key}']
mapper = operator.attrgetter(
f'{CLS_MAP_PREFIX}{key}_{src_key}')(self)
outputs[f'{outputs_prefix}logits/{key}'] = torch.mm(
src_tensor, mapper)
return outputs
def forward(self, video, *args, **kwargs):
"""
Args: video (torch.Tensor)
Could be (B, #clips, C, T, H, W) or
(B, #clips, #crops, C, T, H, W)
Returns:
Final features
And any auxiliarly losses produced by the model
"""
if video.ndim == 6:
video_crops = [video]
elif video.ndim == 7 and video.size(2) == 1:
video_crops = [video.squeeze(2)]
elif video.ndim == 7:
video_crops = torch.unbind(video, dim=2)
else:
raise NotImplementedError('Unsupported size %s' % video.shape)
feats_losses = [
self.forward_singlecrop(el, *args, **kwargs) for el in video_crops
]
feats, losses = zip(*feats_losses)
# Convert to dict of lists
feats = {k: [dic[k] for dic in feats] for k in feats[0]}
losses = {k: [dic[k] for dic in losses] for k in losses[0]}
# Average over the crops
feats = {
k: torch.mean(torch.stack(el, dim=0), dim=0)
for k, el in feats.items()
}
losses = {
k: torch.mean(torch.stack(el, dim=0), dim=0)
for k, el in losses.items()
}
return feats, losses
|
AVT-main
|
models/base_model.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch.nn as nn
class MLP(nn.Module):
def __init__(self, in_features, out_features, nlayers, **kwargs):
super().__init__()
layers = [[nn.Linear(in_features, in_features, **kwargs),
nn.ReLU()] for _ in range(nlayers - 1)]
# flatten out the pairs
layers = [item for sublist in layers for item in sublist]
layers.append(nn.Linear(in_features, out_features))
self.cls = nn.Sequential(*layers)
def forward(self, inp):
return self.cls(inp)
|
AVT-main
|
models/classifiers.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import numbers
import random
from torchvision.transforms import (
RandomCrop,
RandomResizedCrop,
ColorJitter,
ToPILImage,
ToTensor,
)
__all__ = [
"RandomCropVideo",
"RandomResizedCropVideo",
"CenterCropVideo",
"NormalizeVideo",
"ToTensorVideo",
"RandomHorizontalFlipVideo",
"Resize",
"TemporalCenterCrop",
"ColorJitterVideo",
]
def _is_tensor_video_clip(clip):
if not torch.is_tensor(clip):
raise TypeError("clip should be Tensor. Got %s" % type(clip))
if not clip.ndimension() == 4:
raise ValueError("clip should be 4D. Got %dD" % clip.dim())
return True
def crop(clip, i, j, h, w):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
"""
assert len(clip.size()) == 4, "clip should be a 4D tensor"
return clip[..., i:i + h, j:j + w]
def temporal_center_crop(clip, clip_len):
"""
Args:
clip (torch.tensor): Video clip to be
cropped along the temporal axis. Size is (C, T, H, W)
"""
assert len(clip.size()) == 4, "clip should be a 4D tensor"
assert clip.size(1) >= clip_len, "clip is shorter than the proposed lenght"
middle = int(clip.size(1) // 2)
start = middle - clip_len // 2
return clip[:, start:start + clip_len, ...]
def resize(clip, target_size, interpolation_mode):
"""
Args:
target_size can be a
integer: Which is the length of the smaller side
string: with format <min>-<max>: will randomly pick a size from
min and max (included) to be the smaller side
or tuple of either integers and/or string
"""
def _convert_size_to_integer(size_str):
if isinstance(size_str, int):
return size_str
size_min, size_max = [int(el) for el in size_str.split('-')]
return random.randint(size_min, size_max)
if isinstance(target_size, (list, tuple)):
target_size = (_convert_size_to_integer(target_size[0]),
_convert_size_to_integer(target_size[1]))
else:
target_size = _convert_size_to_integer(target_size)
if isinstance(target_size, int):
clip_h, clip_w = clip.shape[-2], clip.shape[-1]
scale_factor = target_size * 1.0 / min(clip_h, clip_w)
# Make sure the new sizes def follow the target_size, sometimes the
# ratio leads to a couple pixel below which can lead to errors
new_h = max(int(clip_h * scale_factor), target_size)
new_w = max(int(clip_w * scale_factor), target_size)
target_size = (new_h, new_w)
return torch.nn.functional.interpolate(clip,
size=target_size,
mode=interpolation_mode)
def resized_crop(clip, i, j, h, w, size, interpolation_mode="bilinear"):
"""
Do spatial cropping and resizing to the video clip
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
i (int): i in (i,j) i.e coordinates of the upper left corner.
j (int): j in (i,j) i.e coordinates of the upper left corner.
h (int): Height of the cropped region.
w (int): Width of the cropped region.
size (tuple(int, int)): height and width of resized clip
Returns:
clip (torch.tensor): Resized and cropped clip. Size is (C, T, H, W)
"""
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
clip = crop(clip, i, j, h, w)
clip = resize(clip, size, interpolation_mode)
return clip
def center_crop(clip, crop_size):
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
h, w = clip.size(-2), clip.size(-1)
th, tw = crop_size
assert h >= th and w >= tw, "height and width must be >= than crop_size"
i = int(round((h - th) / 2.0))
j = int(round((w - tw) / 2.0))
return crop(clip, i, j, th, tw)
def to_tensor(clip):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimenions of clip tensor
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
try:
_is_tensor_video_clip(clip)
except (TypeError, ValueError):
# Needed to add this since this happens when using Miao's transforms
clip = torch.as_tensor(clip)
if not clip.dtype == torch.uint8:
raise TypeError("clip tensor should have data type uint8. Got %s" %
str(clip.dtype))
return clip.float().permute(3, 0, 1, 2) / 255.0
def normalize(clip, mean, std, inplace=False):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
mean (tuple): pixel RGB mean. Size is (3)
std (tuple): pixel standard deviation. Size is (3)
Returns:
normalized clip (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
if not inplace:
clip = clip.clone()
mean = torch.as_tensor(mean, dtype=clip.dtype, device=clip.device)
std = torch.as_tensor(std, dtype=clip.dtype, device=clip.device)
clip.sub_(mean[:, None, None, None]).div_(std[:, None, None, None])
return clip
def hflip(clip):
"""
Args:
clip (torch.tensor): Video clip to be normalized. Size is (C, T, H, W)
Returns:
flipped clip (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
return clip.flip((-1))
class RandomCropVideo(RandomCrop):
def __init__(self, size):
if isinstance(size, numbers.Number):
self.size = (int(size), int(size))
else:
self.size = size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, OH, OW)
"""
i, j, h, w = self.get_params(clip, self.size)
return crop(clip, i, j, h, w)
def __repr__(self):
return self.__class__.__name__ + '(size={0})'.format(self.size)
class RandomResizedCropVideo(RandomResizedCrop):
def __init__(
self,
size,
scale=(0.08, 1.0),
ratio=(3.0 / 4.0, 4.0 / 3.0),
interpolation_mode="bilinear",
):
if isinstance(size, tuple):
assert len(size) == 2, "size should be tuple (height, width)"
self.size = size
else:
self.size = (size, size)
self.interpolation_mode = interpolation_mode
self.scale = scale
self.ratio = ratio
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: randomly cropped/resized video clip.
size is (C, T, H, W)
"""
i, j, h, w = self.get_params(clip, self.scale, self.ratio)
return resized_crop(clip, i, j, h, w, self.size,
self.interpolation_mode)
def __repr__(self):
return self.__class__.__name__ + \
'(size={0}, interpolation_mode={1}, scale={2}, ratio={3})'.format(
self.size, self.interpolation_mode, self.scale, self.ratio
)
class CenterCropVideo(object):
def __init__(self, crop_size):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(C, T, crop_size, crop_size)
"""
return center_crop(clip, self.crop_size)
def __repr__(self):
r = self.__class__.__name__ + '(crop_size={0})'.format(self.crop_size)
return r
def multi_crop(video, crop_size, num_crops, flips):
"""
Returns a list of video crops of crop_size, num_crops * 2 in length
(including flipped versions)
"""
assert _is_tensor_video_clip(video), "clip should be a 4D torch.tensor"
h, w = video.size(-2), video.size(-1)
th, tw = crop_size
assert h >= th and w >= tw, "height and width must be >= than crop_size"
if num_crops == 1:
# Center crop, as used in the CenterCrop function
pos = [(int(round((h - th) / 2.0)), int(round((w - tw) / 2.0)))]
elif num_crops == 3:
# top left, center, and bottom right
pos = [(0, 0), (int(round((h - th) / 2.0)), int(round(
(w - tw) / 2.0))), (h - th, w - tw)]
else:
raise NotImplementedError('Not supported')
cropped = [crop(video, i, j, th, tw) for i, j in pos]
if flips:
cropped += [hflip(el) for el in cropped]
return cropped
class MultiCropVideo(object):
def __init__(self, crop_size, num_crops, flips=False):
if isinstance(crop_size, numbers.Number):
self.crop_size = (int(crop_size), int(crop_size))
else:
self.crop_size = crop_size
self.num_crops = num_crops
self.flips = flips
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Video clip to be cropped. Size is (C, T, H, W)
Returns:
torch.tensor: central cropping of video clip. Size is
(num_crops, C, T, crop_size, crop_size)
"""
return torch.stack(
multi_crop(clip, self.crop_size, self.num_crops, self.flips), 0)
def __repr__(self):
return (self.__class__.__name__ +
f'(crop_size={self.crop_size},num_crops={self.num_crops})')
class TemporalCenterCrop(object):
def __init__(self, clip_len):
self.clip_len = clip_len
def __call__(self, clip):
return temporal_center_crop(clip, self.clip_len)
class UnfoldClips(object):
def __init__(self, clip_len, overlap):
self.clip_len = clip_len
assert overlap > 0 and overlap <= 1
self.step = round(clip_len * overlap)
def __call__(self, clip):
if clip.size(1) < self.clip_len:
return clip.unfold(1, clip.size(1),
clip.size(1)).permute(1, 0, 4, 2, 3)
results = clip.unfold(1, self.clip_len,
self.clip_len).permute(1, 0, 4, 2, 3)
return results
class NormalizeVideo(object):
"""
Normalize the video clip by mean subtraction
and division by standard deviation
Args:
mean (3-tuple): pixel RGB mean
std (3-tuple): pixel RGB standard deviation
inplace (boolean): whether do in-place normalization
"""
def __init__(self, mean, std, inplace=False):
self.mean = mean
self.std = std
self.inplace = inplace
def __call__(self, clip):
"""
Args:
clip (torch.tensor): video clip to be
normalized. Size is (C, T, H, W)
"""
return normalize(clip, self.mean, self.std, self.inplace)
def __repr__(self):
return self.__class__.__name__ + '(mean={0}, std={1}, inplace={2})'.format(
self.mean, self.std, self.inplace)
class ToTensorVideo(object):
"""
Convert tensor data type from uint8 to float, divide value by 255.0 and
permute the dimenions of clip tensor
"""
def __init__(self):
pass
def __call__(self, clip):
"""
Args:
clip (torch.tensor, dtype=torch.uint8): Size is (T, H, W, C)
Return:
clip (torch.tensor, dtype=torch.float): Size is (C, T, H, W)
"""
return to_tensor(clip)
def __repr__(self):
return self.__class__.__name__
class RandomHorizontalFlipVideo(object):
"""
Flip the video clip along the horizonal direction with a given probability
Args:
p (float): probability of the clip being flipped. Default value is 0.5
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
if random.random() < self.p:
clip = hflip(clip)
return clip
def __repr__(self):
return self.__class__.__name__ + "(p={0})".format(self.p)
class ColorJitterVideo():
"""
Randomly add color jitter to video
Args:
Same as original ColorJitter
"""
def __init__(self, *args, **kwargs):
self.frame_color_jitter = ColorJitter(*args, **kwargs)
def __call__(self, clip):
"""
Args:
clip (torch.tensor): Size is (C, T, H, W)
Return:
clip (torch.tensor): Size is (C, T, H, W)
"""
assert _is_tensor_video_clip(clip), "clip should be a 4D torch.tensor"
# Stack the frames on height dimension
stacked_frames = clip.view((clip.size(0), -1, clip.size(-1)))
stacked_frames_pil = ToPILImage()(stacked_frames)
output_stacked_frames = ToTensor()(
self.frame_color_jitter(stacked_frames_pil))
return output_stacked_frames.view(clip.shape)
class Resize(object):
def __init__(self, size):
self.size = size
def __call__(self, vid):
return resize(vid, self.size, interpolation_mode="bilinear")
|
AVT-main
|
common/transforms.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from collections import defaultdict, deque
import datetime
import time
import logging
import torch
import torch.distributed as dist
from common.utils import is_dist_avail_and_initialized, is_main_process
__all__ = [
'SmoothedValue', 'MetricLogger', 'get_default_loggers',
'get_default_loggers'
]
EPS = 0.000001
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size=20, fmt=None):
if fmt is None:
fmt = "{median:.4f} ({global_avg:.4f})"
self.deque = deque(maxlen=window_size)
self.total = 0.0
self.count = 0
self.fmt = fmt
self.ws = window_size
def reset(self):
self.__init__(window_size=self.ws, fmt=self.fmt)
def update(self, value, n=1):
self.deque.append(value)
self.count += n
self.total += value * n
def synchronize_between_processes(self):
"""
Warning: does not synchronize the deque!
"""
if not is_dist_avail_and_initialized():
return
t = torch.tensor([self.count, self.total],
dtype=torch.float64,
device='cuda')
dist.barrier()
dist.all_reduce(t)
t = t.tolist()
self.count = int(t[0])
self.total = t[1]
@property
def median(self):
d = torch.tensor(list(self.deque))
return d.median().item()
@property
def avg(self):
d = torch.tensor(list(self.deque), dtype=torch.float32)
return d.mean().item()
@property
def global_avg(self):
return self.total / (self.count + EPS)
@property
def max(self):
return max(self.deque)
@property
def value(self):
return self.deque[-1]
def __str__(self):
return self.fmt.format(median=self.median,
avg=self.avg,
global_avg=self.global_avg,
max=self.max,
value=self.value)
class MetricLogger(object):
def __init__(self,
delimiter="\t",
writer=None,
stat_set="train",
epoch=0,
logger=None):
self.meters = defaultdict(SmoothedValue)
self.delimiter = delimiter
self.metric_set = stat_set
self.epoch = epoch
self.logger = logger.info if logger is not None else logging.info
self.writer = writer
self.writer_step = 0
# Adding all logs from this to raw/ header, so I can plot other metrics
# cleanly
self.tbd_header = 'metric_logger/'
self.meters["iter_time"] = SmoothedValue(fmt='{avg:.4f}')
self.meters["data_time"] = SmoothedValue(fmt='{avg:.4f}')
def update(self, **kwargs):
for k, v in kwargs.items():
if isinstance(v, torch.Tensor):
v = v.item()
assert isinstance(v, (float, int))
self.meters[k].update(v)
def __getattr__(self, attr):
if attr in self.meters:
return self.meters[attr]
if attr in self.__dict__:
return self.__dict__[attr]
raise AttributeError("'{}' object has no attribute '{}'".format(
type(self).__name__, attr))
def __str__(self):
loss_str = []
for name, meter in self.meters.items():
loss_str.append("{}: {}".format(name, str(meter)))
return self.delimiter.join(loss_str)
def synchronize_between_processes(self):
for meter in self.meters.values():
meter.synchronize_between_processes()
def add_meter(self, name, meter):
self.meters[name] = meter
def reset_meters(self):
self.logger("Logging: resseting all meters")
for name, meter in self.meters.items():
meter.reset()
self.logger(
"Logging: resseting all meters done, updating epoch to %d".format(
self.epoch + 1))
self.epoch += 1
def log_every(self, iterable, print_freq, header=None):
i = 0
if not header:
header = ''
start_time = time.time()
end = time.time()
space_fmt = ':' + str(len(str(len(iterable)))) + 'd'
if torch.cuda.is_available():
log_msg = self.delimiter.join([
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}', 'max mem: {memory:.0f}'
])
else:
log_msg = self.delimiter.join([
header, '[{0' + space_fmt + '}/{1}]', 'eta: {eta}', '{meters}',
'time: {time}', 'data: {data}'
])
MB = 1024.0 * 1024.0
for obj in iterable:
self.meters["data_time"].update(time.time() - end)
yield obj
self.meters["iter_time"].update(time.time() - end)
if i % print_freq == 0:
self._write_meters()
eta_seconds = self.meters["iter_time"].global_avg * (
len(iterable) - i)
eta_string = str(datetime.timedelta(seconds=int(eta_seconds)))
if torch.cuda.is_available():
self.logger(
log_msg.format(
i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(self.meters["iter_time"]),
data=str(self.meters["data_time"]),
memory=torch.cuda.max_memory_allocated() / MB))
else:
self.logger(
log_msg.format(i,
len(iterable),
eta=eta_string,
meters=str(self),
time=str(self.meters["iter_time"]),
data=str(self.meters["data_time"])))
i += 1
end = time.time()
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
self.logger('{} Total time: {}'.format(header, total_time_str))
self._write_epoch(total_time_str)
def _write_meters(self):
if self.writer is not None:
for name, meter in self.meters.items():
self.writer.add_scalar(
f"{self.tbd_header}iter/{self.metric_set}_{name}",
meter.avg, self.writer_step)
self.writer_step += 1
def _write_epoch(self, total_time_string):
if self.writer is not None:
for name, meter in self.meters.items():
self.writer.add_scalar(
f"{self.tbd_header}epoch/{self.metric_set}_{name}",
meter.avg, self.epoch)
self.writer.add_text(
f"{self.tbd_header}epoch/{self.metric_set}_totaltime",
total_time_string, self.epoch)
def setup_tbx(save_dir, SummaryWriter):
if not is_main_process():
return None
writer = SummaryWriter(save_dir)
return writer
def get_default_loggers(writer, epoch, logger):
stat_loggers = dict()
stat_loggers["train"] = MetricLogger(delimiter=" ",
writer=writer,
stat_set="train",
epoch=epoch,
logger=logger)
stat_loggers["train"].add_meter(
'lr', SmoothedValue(window_size=1, fmt='{value}'))
stat_loggers["train"].add_meter(
'clips/s', SmoothedValue(window_size=10, fmt='{value:.3f}'))
stat_loggers["val"] = MetricLogger(delimiter=" ",
writer=writer,
stat_set="val",
epoch=epoch,
logger=logger)
return stat_loggers
|
AVT-main
|
common/log.py
|
from .log import *
|
AVT-main
|
common/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from __future__ import print_function
from typing import List, Dict
import errno
import os
from pathlib import Path
import logging
import submitit
import cv2
import torch
import torch.distributed as dist
def accuracy(output, target, topk=(1, )):
"""Computes the accuracy over the k top predictions
for the specified values of k
Args:
output (*, K) predictions
target (*, ) targets
"""
if torch.all(target < 0):
return [
torch.zeros([], device=output.device) for _ in range(len(topk))
]
with torch.no_grad():
# flatten the initial dimensions, to deal with 3D+ input
output = output.flatten(0, -2)
target = target.flatten()
# Now compute the accuracy
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target[None])
res = []
for k in topk:
correct_k = correct[:k].flatten().sum(dtype=torch.float32)
res.append(correct_k * (100.0 / batch_size))
return res
def mkdir(path):
try:
os.makedirs(path)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def setup_for_distributed(is_master, logger):
"""
This function disables printing when not in master process
"""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
force = kwargs.pop('force', False)
if is_master or force:
builtin_print(*args, **kwargs)
__builtin__.print = print
if not is_master:
# Don't print anything except FATAL
logger.setLevel(logging.ERROR)
logging.basicConfig(level=logging.ERROR)
else:
logger.setLevel(logging.INFO)
logging.basicConfig(level=logging.INFO)
def is_dist_avail_and_initialized():
if not dist.is_available():
return False
if not dist.is_initialized():
return False
return True
def get_world_size():
if not is_dist_avail_and_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not is_dist_avail_and_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def save_on_master(*args, **kwargs):
if is_main_process():
torch.save(*args, **kwargs)
def init_distributed_mode(logger, dist_backend='nccl'):
dist_info = dict(
distributed=False,
rank=0,
world_size=1,
gpu=0,
dist_backend=dist_backend,
dist_url=get_init_file(None).as_uri(),
)
# If launched using submitit, get the job_env and set using those
try:
job_env = submitit.JobEnvironment()
except RuntimeError:
job_env = None
if job_env is not None:
dist_info['rank'] = job_env.global_rank
dist_info['world_size'] = job_env.num_tasks
dist_info['gpu'] = job_env.local_rank
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
dist_info['rank'] = int(os.environ["RANK"])
dist_info['world_size'] = int(os.environ['WORLD_SIZE'])
dist_info['gpu'] = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
dist_info['rank'] = int(os.environ['SLURM_PROCID'])
dist_info['gpu'] = dist_info['rank'] % torch.cuda.device_count()
elif 'rank' in dist_info:
pass
else:
print('Not using distributed mode')
dist_info['distributed'] = False
return dist_info
dist_info['distributed'] = True
torch.cuda.set_device(dist_info['gpu'])
dist_info['dist_backend'] = dist_backend
print('| distributed init (rank {}): {}'.format(dist_info['rank'],
dist_info['dist_url']),
flush=True)
torch.distributed.init_process_group(backend=dist_info['dist_backend'],
init_method=dist_info['dist_url'],
world_size=dist_info['world_size'],
rank=dist_info['rank'])
setup_for_distributed(dist_info['rank'] == 0, logger)
return dist_info
def get_shared_folder(name) -> Path:
# Since using hydra, which figures the out folder
return Path('./').absolute()
def get_init_file(name):
# Init file must not exist, but it's parent dir must exist.
os.makedirs(str(get_shared_folder(name)), exist_ok=True)
init_file = get_shared_folder(name) / 'sync_file_init'
return init_file
def gather_tensors_from_all(tensor: torch.Tensor) -> List[torch.Tensor]:
"""
Wrapper over torch.distributed.all_gather for performing
'gather' of 'tensor' over all processes in both distributed /
non-distributed scenarios.
"""
if tensor.ndim == 0:
# 0 dim tensors cannot be gathered. so unsqueeze
tensor = tensor.unsqueeze(0)
if is_dist_avail_and_initialized():
gathered_tensors = [
torch.zeros_like(tensor)
for _ in range(torch.distributed.get_world_size())
]
torch.distributed.all_gather(gathered_tensors, tensor)
else:
gathered_tensors = [tensor]
return gathered_tensors
def gather_from_all(tensor: torch.Tensor) -> torch.Tensor:
gathered_tensors = gather_tensors_from_all(tensor)
gathered_tensor = torch.cat(gathered_tensors, 0)
return gathered_tensor
def get_video_info(video_path: Path, props: List[str]) -> Dict[str, float]:
"""
Given the video, return the properties asked for
"""
output = {}
cam = cv2.VideoCapture(str(video_path))
if 'fps' in props:
output['fps'] = cam.get(cv2.CAP_PROP_FPS)
if 'len' in props:
fps = cam.get(cv2.CAP_PROP_FPS)
if fps <= 0:
output['len'] = 0
else:
output['len'] = (cam.get(cv2.CAP_PROP_FRAME_COUNT) / fps)
cam.release()
return output
|
AVT-main
|
common/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
import torch.nn as nn
class KmeansAssigner(nn.Module):
def __init__(self, centroids_fpath, norm=False):
super().__init__()
# NxC dimension
# Not converting this to linear layer as then the weights get
# overwriten during random init, and these cluster centers are lost.
self.register_buffer('centroids',
torch.load(centroids_fpath)['weight'])
self.norm = norm
@property
def num_clusters(self):
return self.centroids.size(0)
@staticmethod
def feat2cluster(feats, centroids, norm):
"""
Compute index for the feats, w.r.t centroids.
Args:
feats *xC
centroids KxC
Returns:
assignments *
"""
feats_flat = feats.flatten(0, -2)
if norm:
feats_flat = nn.functional.normalize(feats_flat, dim=-1, p=2)
dists = torch.cdist(feats_flat.unsqueeze(0), centroids.unsqueeze(0))
assgns = torch.argmin(dists[0], dim=-1)
assgns = assgns.reshape(feats.shape[:-1])
return assgns
@staticmethod
def cluster2feat(idx, centroids):
"""
Get features for cluster ids
Args:
idx *
centroids KxC
Returns:
assignments *xC
"""
idx_flat = idx.reshape((-1, ))
feats = centroids[idx_flat, :]
return feats.reshape(list(idx.shape) + [feats.size(-1)])
def forward(self, inp):
"""
If inp is torch.float, then find the nearest assignments.
If torch.long, return the corresponding features.
"""
if inp.dtype == torch.long:
return self.cluster2feat(inp, self.centroids)
return self.feat2cluster(inp, self.centroids, self.norm)
|
AVT-main
|
common/cluster.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
from typing import Sequence
import torch
from bisect import bisect_right
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer: torch.optim.Optimizer,
milestone_epochs: Sequence[int],
gamma: float = 0.1,
warmup_factor: float = 1.0 / 3,
warmup_epochs: int = 5,
warmup_method: str = 'linear',
last_epoch: int = -1,
iters_per_epoch: int = None, # Must be set by calling code
world_size: int = None,
):
del world_size
if not milestone_epochs == sorted(milestone_epochs):
raise ValueError(
"Milestones should be a list of"
" increasing integers. Got {}",
milestone_epochs,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method))
self.milestones = [iters_per_epoch * m for m in milestone_epochs]
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = max(warmup_epochs * iters_per_epoch, 1)
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr * warmup_factor *
self.gamma**bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
]
class CosineLR(torch.optim.lr_scheduler.CosineAnnealingLR):
def __init__(self,
optimizer,
num_epochs,
iters_per_epoch=None,
world_size=None,
**kwargs):
kwargs['eta_min'] *= world_size
super().__init__(optimizer,
T_max=num_epochs * iters_per_epoch,
**kwargs)
def get_lr(self, *args, **kwargs):
if self.last_epoch < self.T_max:
return super().get_lr(*args, **kwargs)
else:
# Adding this if I train the model longer than the T_max set in
# this. Happens when I sweep over different amounts of warmup.
return [0.0 for _ in self.optimizer.param_groups]
class ReduceLROnPlateau(torch.optim.lr_scheduler.ReduceLROnPlateau):
def __init__(self,
optimizer,
iters_per_epoch=None,
world_size=None,
**kwargs):
del iters_per_epoch, world_size
super().__init__(optimizer, **kwargs)
class Warmup(torch.optim.lr_scheduler._LRScheduler):
"""Wrap the scheduler for warmup before it kicks in."""
def __init__(
self,
optimizer: torch.optim.Optimizer,
scheduler: torch.optim.lr_scheduler._LRScheduler,
init_lr_ratio: float = 0.0,
num_epochs: int = 5,
last_epoch: int = -1,
iters_per_epoch: int = None, # Must be set by calling code
world_size: int = None,
):
"""
Args:
init_lr_ratio (float in [0, 1]): Ratio of the original LR to start
from. If 0.1, it will start from 0.1 of the original LRs and go
upto 1.0 of the original LRs in the epochs. By def start from
0 up.
num_epochs (int): Num of epochs to take to warmup.
last_epoch (int): Which was the last epoch to init from (not really
used anymore since we store the state_dict when loading
scheduler from disk.)
"""
del world_size
self.base_scheduler = scheduler
self.warmup_iters = max(num_epochs * iters_per_epoch, 1)
if self.warmup_iters > 1:
self.init_lr_ratio = init_lr_ratio
else:
self.init_lr_ratio = 1.0 # Don't go from 0 to 1 in 1 iteration
super().__init__(optimizer, last_epoch)
def get_lr(self):
# Epoch is iters for me, since I step after each iteration
# (not after each epoch)
# Based on logic in step, this should only be called for the warmup
# iters. After that it should go to the base scheduler
assert self.last_epoch < self.warmup_iters # since it increments
return [
el * (self.init_lr_ratio + (1 - self.init_lr_ratio) *
(float(self.last_epoch) / self.warmup_iters))
for el in self.base_lrs
]
def step(self, *args, **kwargs):
if self.last_epoch < (self.warmup_iters - 1):
super().step(*args, **kwargs)
else:
self.base_scheduler.step(*args, **kwargs)
def state_dict(self):
"""Returns the state of the scheduler as a :class:`dict`.
It contains an entry for every variable in self.__dict__ which
is not the optimizer.
"""
base_sched_dict = self.base_scheduler.state_dict()
other_stuff = {
key: value
for key, value in self.__dict__.items() if key not in [
'base_scheduler', 'optimizer']
}
return {'base_sched_dict': base_sched_dict, 'other_stuff': other_stuff}
def load_state_dict(self, state_dict):
"""Loads the schedulers state.
Arguments:
state_dict (dict): scheduler state. Should be an object returned
from a call to :meth:`state_dict`.
"""
self.base_scheduler.__dict__.update(state_dict['base_sched_dict'])
self.__dict__.update(state_dict['other_stuff'])
|
AVT-main
|
common/scheduler.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
import math
import torch
from torch.utils.data import Sampler
import torch.distributed as dist
import torchvision.datasets.video_utils
class DistributedSampler(Sampler):
"""
Extension of DistributedSampler, as discussed in
https://github.com/pytorch/pytorch/issues/23430
"""
def __init__(self, dataset, num_replicas=None, rank=None, shuffle=False):
if num_replicas is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
num_replicas = dist.get_world_size()
if rank is None:
if not dist.is_available():
raise RuntimeError("Requires distributed package to be available")
rank = dist.get_rank()
self.dataset = dataset
self.num_replicas = num_replicas
self.rank = rank
self.epoch = 0
self.num_samples = int(math.ceil(len(self.dataset) * 1.0 / self.num_replicas))
self.total_size = self.num_samples * self.num_replicas
self.shuffle = shuffle
def __iter__(self):
# deterministically shuffle based on epoch
g = torch.Generator()
g.manual_seed(self.epoch)
if self.shuffle:
indices = torch.randperm(len(self.dataset), generator=g).tolist()
else:
indices = list(range(len(self.dataset)))
# add extra samples to make it evenly divisible
indices += indices[:(self.total_size - len(indices))]
assert len(indices) == self.total_size
# subsample
indices = indices[self.rank:self.total_size:self.num_replicas]
assert len(indices) == self.num_samples
if isinstance(self.dataset, Sampler):
orig_indices = list(iter(self.dataset))
indices = [orig_indices[i] for i in indices]
return iter(indices)
def __len__(self):
return self.num_samples
def set_epoch(self, epoch):
self.epoch = epoch
class UniformClipSampler(torch.utils.data.Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video, equally spaced
Arguments:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
def __init__(self, video_clips, max_clips_per_video):
if not isinstance(video_clips, torchvision.datasets.video_utils.VideoClips):
raise TypeError("Expected video_clips to be an instance of VideoClips, "
"got {}".format(type(video_clips)))
self.video_clips = video_clips
self.max_clips_per_video = max_clips_per_video
def __iter__(self):
idxs = []
s = 0
# select at most max_clips_per_video for each video, uniformly spaced
for c in self.video_clips.clips:
length = len(c)
step = max(length // self.max_clips_per_video, 1)
sampled = torch.arange(length)[::step] + s
s += length
idxs.append(sampled)
idxs = torch.cat(idxs).tolist()
return iter(idxs)
def __len__(self):
return sum(min(len(c), self.max_clips_per_video) for c in self.video_clips.clips)
class RandomClipSampler(torch.utils.data.Sampler):
"""
Samples at most `max_video_clips_per_video` clips for each video randomly
Arguments:
video_clips (VideoClips): video clips to sample from
max_clips_per_video (int): maximum number of clips to be sampled per video
"""
def __init__(self, video_clips, max_clips_per_video):
if not isinstance(video_clips, torchvision.datasets.video_utils.VideoClips):
raise TypeError("Expected video_clips to be an instance of VideoClips, "
"got {}".format(type(video_clips)))
self.video_clips = video_clips
self.max_clips_per_video = max_clips_per_video
def __iter__(self):
idxs = []
s = 0
# select at most max_clips_per_video for each video, randomly
for c in self.video_clips.clips:
length = len(c)
size = min(length, self.max_clips_per_video)
sampled = torch.randperm(length)[:size] + s
s += length
idxs.append(sampled)
idxs = torch.cat(idxs)
# shuffle all clips randomly
perm = torch.randperm(len(idxs))
idxs = idxs[perm].tolist()
return iter(idxs)
def __len__(self):
return sum(min(len(c), self.max_clips_per_video) for c in self.video_clips.clips)
|
AVT-main
|
common/sampler.py
|
AVT-main
|
external/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Variants of MSE loss."""
import torch.nn as nn
class NormedMSE(nn.MSELoss):
def forward(self, inp, tgt, *args, **kwargs):
"""
Args:
inp: (*, C)
tgt: (*, C)
Will normalize the input before the loss
"""
inp = nn.functional.normalize(inp, dim=-1, p=2)
tgt = nn.functional.normalize(tgt, dim=-1, p=2)
return super().forward(inp, tgt, *args, **kwargs)
|
AVT-main
|
loss_fn/mse.py
|
AVT-main
|
loss_fn/__init__.py
|
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Cross entropy loss, that works with multi-dim input."""
import torch
import torch.nn as nn
from common.cluster import KmeansAssigner
class MultiDimCrossEntropy(nn.CrossEntropyLoss):
def forward(self, inp, tgt, *args, **kwargs):
"""
Args:
inp: (*, C)
tgt: (*, )
Will reshape the flatten initial dimensions and then incur loss
"""
assert inp.ndim == tgt.ndim + 1
assert inp.shape[:-1] == tgt.shape
res = super().forward(inp.reshape(-1, inp.size(-1)), tgt.reshape(
(-1, )), *args, **kwargs)
if torch.numel(res) == torch.numel(tgt):
# Reduction was not done, so reshape back to orig shape
res = res.reshape(tgt.shape)
return res
class QuantizeAndCrossEntropy(MultiDimCrossEntropy):
"""Given a set of cluster centers, project the features to that before
incurring the loss."""
def __init__(self, centroids_fpath, norm=True, *args, **kwargs):
super().__init__(*args, **kwargs)
self.assigner = KmeansAssigner(centroids_fpath)
self.norm = norm
def forward(self, inp, tgt):
"""
Args:
inp: (*, C)
tgt: (*, C)
Will reshape the flatten initial dimensions and then incur loss
"""
# Normalize L2 both target and input, since that's how I'm computing
# centroids
if self.norm:
inp = nn.functional.normalize(inp, dim=-1, p=2)
tgt = nn.functional.normalize(tgt, dim=-1, p=2)
# assign the GT and predictions to the centroids
inp_proj = torch.mm(inp.flatten(0, 1),
self.centroids.t()).view(inp.shape[:-1] +
self.centroids.shape[:1])
# the weights of project layer are the centroids, so pick from there
tgt_proj_q = self.assigner(tgt)
return super().forward(inp_proj, tgt_proj_q)
|
AVT-main
|
loss_fn/multidim_xentropy.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""The SimCLR InfoNCE loss."""
import torch
import torch.nn as nn
from common import utils
LARGE_NUM = 1e9
class MILCrossEntropyLoss(nn.Module):
def __init__(self, mil_type='sum', reduction='mean'):
super().__init__()
self.mil_type = mil_type
self.reduction = reduction
def forward(self, *args, **kwargs):
if self.mil_type == 'sum':
return self.forward_sum(*args, **kwargs)
elif self.mil_type == 'max':
return self.forward_max(*args, **kwargs)
else:
raise NotImplementedError(f'Unknown type {self.mil_type}')
def forward_sum(self, pred, labels_onehot):
"""
Args:
pred: BxC is the output
labels: BxC is 1s for positive, and 0s for negatives
Based on https://github.com/antoine77340/MIL-NCE_HowTo100M/blob/master/loss.py
Or the MIL-NCE paper Eq 1 (https://arxiv.org/pdf/1912.06430.pdf)
"""
assert pred.shape == labels_onehot.shape
# In the MILNCE code there is a sum, followed by logsumexp. I think
# using the labels to select the positive samples and then doing
# logsumexp will have the same effect.
pos_pred = pred[labels_onehot.bool()].reshape((pred.size(0), -1))
numerator = torch.logsumexp(pos_pred, dim=1)
denominotor = torch.logsumexp(pred, dim=1)
loss = denominotor - numerator
if self.reduction == 'mean':
loss = torch.mean(loss)
elif self.reduction == 'none':
pass
else:
raise NotImplementedError(f'Unknown reduction {self.reduction}')
return loss
def forward_max(self, pred, labels_onehot):
"""
Args:
pred: BxC is the output
labels: BxC is 1s for positive, and 0s for negatives
Based on Appendix A (https://arxiv.org/pdf/1912.06430.pdf)
"""
assert pred.shape == labels_onehot.shape
# Do max before, and then logsumexp. Works since exp is monotonic fn
# so the max with exp or without will be the same.
pos_pred = pred[labels_onehot.bool()].reshape((pred.size(0), -1))
pos_pred = torch.max(pos_pred, dim=1, keepdim=True)[0]
neg_pred = pred[~labels_onehot.bool()].reshape((pred.size(0), -1))
numerator = torch.logsumexp(pos_pred, dim=1)
denominotor = torch.logsumexp(torch.cat([pos_pred, neg_pred], dim=1),
dim=1)
return torch.mean(denominotor - numerator)
class DistributedSimclrInfoNCELoss(nn.Module):
def __init__(self,
temperature: float = 0.1,
target_to_output_loss=True,
mil_type='sum',
reduction='mean'):
super().__init__()
self.temperature = temperature
self.criterion = MILCrossEntropyLoss(mil_type, reduction=reduction)
# This defines whether the reverse part of the loss, from target to
# the output features, is incurred.
self.target_to_output_loss = target_to_output_loss
def forward(self, output: torch.Tensor,
target: torch.Tensor) -> torch.Tensor:
"""
Args:
output: BxC
target: BxC or BxKxC <-- In case of MIL NCE, K is the number of
positives for each batch element.
Following https://github.com/google-research/simclr/blob/master/objective.py
"""
# Normalize first, before the gather -- so that all the features I get
# are normalized
output = nn.functional.normalize(output, dim=-1, p=2)
target = nn.functional.normalize(target, dim=-1, p=2)
# To be consistent with MIL-NCE input, convert K to batch dim,
# and repeat the output to same value for each repeated target
elt_for_back_loss = 0
if target.ndim == 3:
num_matching = target.size(1)
target_flat = target.reshape((-1, target.size(-1)))
# Keep the first one for the back loss
target = target[:, elt_for_back_loss]
else:
num_matching = 1
target_flat = target
# Gather all the outputs and all the targets
output_all = self.gather_embeddings(output)
target_flat_all = self.gather_embeddings(target_flat)
batch_size = output.size(0)
replica_id = utils.get_rank()
# -> (B, B_full * num_matching)
labels_onehot = torch.zeros((batch_size, output_all.size(0)),
dtype=output.dtype,
device=output.device)
extra_zeros = torch.zeros((batch_size, output_all.size(0)),
dtype=output.dtype,
device=output.device)
ones_diag = torch.eye(batch_size,
batch_size,
dtype=output.dtype,
device=output.device)
labels_onehot[:, replica_id * batch_size:(replica_id + 1) *
batch_size] = ones_diag
labels_onehot_interleaved = labels_onehot.repeat_interleave(
num_matching, dim=1)
# (B, C) * (B_full, C) -> (B, B_full)
logits_aa = torch.mm(output, output_all.t() / self.temperature)
# (B, C) * (B_full * num_matching, C) -> (B, B_full * num_matching)
logits_ab = torch.mm(output, target_flat_all.t() / self.temperature)
logits_aa = logits_aa - labels_onehot * LARGE_NUM
loss = self.criterion(
torch.cat([logits_ab, logits_aa], 1),
torch.cat([labels_onehot_interleaved, extra_zeros], 1))
if self.target_to_output_loss:
# Keep only the first prediction, since that is what I will incur
# reverse loss with
target_all = target_flat_all[elt_for_back_loss::num_matching]
logits_bb = torch.mm(target, target_all.t() / self.temperature)
logits_bb = logits_bb - labels_onehot * LARGE_NUM
logits_ba = torch.mm(target, output_all.t() / self.temperature)
loss = loss + self.criterion(
torch.cat([logits_ba, logits_bb], 1),
torch.cat([labels_onehot, extra_zeros], 1))
return loss
def gather_embeddings(self, embedding: torch.Tensor) -> torch.Tensor:
"""
Do a gather over all embeddings, so we can compute the loss.
Final shape is like: (batch_size * num_gpus) x embedding_dim
"""
if torch.distributed.is_available(
) and torch.distributed.is_initialized():
# gather all embeddings.
embedding_gathered = utils.gather_from_all(embedding)
else:
embedding_gathered = embedding
return embedding_gathered
class MultiDimDistributedSimclrInfoNCELoss(DistributedSimclrInfoNCELoss):
"""
Fold in the initial dimensions and run simple NCE.
"""
def forward(self, output: torch.Tensor, target: torch.Tensor, *args,
**kwargs) -> torch.Tensor:
return super().forward(output.flatten(0, -2), target.flatten(0, -2),
*args, **kwargs)
|
AVT-main
|
loss_fn/simclr_infonce.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Utils for notebook."""
import sys
import os
import os.path as osp
import glob
from collections import OrderedDict
from collections.abc import Iterable
import json
import subprocess
import pickle as pkl
import logging
import h5py
import math
import operator
import pathlib
import pandas as pd
import moviepy.editor as mpy
from tqdm import tqdm
import proglog
import numpy as np
from scipy.special import softmax
import torch
# from omegaconf import OmegaConf
import hydra
from hydra.experimental import initialize as hydra_initialize, compose as hydra_compose
import matplotlib
from matplotlib import pylab
import matplotlib.pyplot as plt
from matplotlib import rcParams
import seaborn as sns
# from tqdm import tqdm
from tqdm.notebook import tqdm
sys.path.append('..')
from external.rulstm.RULSTM.utils import topk_recall
from launch import subselect_dict_keys_diff
from datasets import epic_kitchens
CODE_DIR = str(pathlib.Path(__file__).parent.resolve() / '../')
OUTPUT_DIR = f'{CODE_DIR}/OUTPUTS/'
RESULTS_SAVE_DIR_PREFIX = 'results' # This is the prefix, can have multiple, if >1 eval datasets
DATASET_EVAL_CFG_KEY = 'dataset_eval'
DATASET_EVAL_CFG_KEY_SUFFIX = ''
proglog.notebook() # so moviepy uses notebook tqdm
SQRT2 = math.sqrt(2)
sns.set_style("whitegrid")
rcParams['mathtext.fontset'] = 'custom'
rcParams['mathtext.rm'] = 'Bitstream Vera Sans'
rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic'
rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold'
rcParams['mathtext.fontset'] = 'stix'
rcParams['font.family'] = 'STIXGeneral'
matplotlib.rc('axes', edgecolor='k')
matplotlib.rc('font', size=30)
def save_graph(fig, outfpath, root_dir='./', **kwargs):
# Any postprocessing of the graphs
sns.despine(top=True, right=True, left=False, bottom=False)
# Save code
final_oufpath = os.path.join(root_dir, outfpath)
os.makedirs(osp.dirname(final_oufpath), exist_ok=True)
fig.savefig(final_oufpath,
bbox_inches='tight',
transparent=True,
pad_inches=0,
**kwargs)
def allkeys(obj, keys=[]):
"""Recursively find all leaf keys in h5. """
keys = []
for key in obj.keys():
if isinstance(obj[key], h5py.Group):
keys += [f'{key}/{el}' for el in allkeys(obj[key])]
else:
keys.append(key)
return keys
class EmptyResdirError(ValueError):
pass
def gen_load_resfiles(resdir):
resfiles = glob.glob(osp.join(resdir, '*.pth'))
if len(resfiles) == 0:
resfiles = glob.glob(osp.join(resdir, '*.h5'))
if len(resfiles) == 0:
raise EmptyResdirError(f'Didnt find any resfiles in {resdir}')
for resfile in resfiles:
if resfile.endswith('.pth'):
output_dict = {
key: val.numpy() if torch.torch.is_tensor(val) else val
for key, val in torch.load(resfile).items()
}
else:
output_dict = {}
with h5py.File(resfile, 'r') as fin:
for key in allkeys(fin):
try:
output_dict[key] = fin[key][()]
except AttributeError as err:
# Happens for the string keys... need to figure what
# to do here
logging.warning('Unable to load %s (%s)', key, err)
yield output_dict
def read_results(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
# TODO allow to read only certain keys, eg some times we only need logits
# which would be faster to read
res_per_layer = {
key: OrderedDict()
for key in data if key not in ['epoch']
}
if len(res_per_layer) == 0:
raise ValueError('No logits found in the output. Note that code was '
'changed Aug 26 2020 that renames "output" to '
'"logits" etc. So might need to rerun testing.')
logging.info('Reading from resfiles')
for data in gen_load_resfiles(resdir):
for i, idx in enumerate(data['idx']):
idx = int(idx)
for key in res_per_layer:
if idx not in res_per_layer[key]:
res_per_layer[key][idx] = []
res_per_layer[key][idx].append(data[key][i])
# Mean over all the multiple predictions per key
final_res = {}
for key in res_per_layer:
if len(res_per_layer[key]) == 0:
continue
max_idx = max(res_per_layer[key].keys())
key_output = np.zeros([
max_idx + 1,
] + list(res_per_layer[key][0][0].shape))
for idx in res_per_layer[key]:
key_output[idx] = np.mean(np.stack(res_per_layer[key][idx]),
axis=0)
final_res[key] = key_output
return final_res
def get_epoch_from_resdir(conf_path, run_id=0, results_dir='results/'):
resdir = osp.join(OUTPUT_DIR, conf_path, str(run_id), results_dir)
data = next(gen_load_resfiles(resdir))
if 'epoch' not in data:
return None
return np.min(data['epoch'])
def read_all_results(conf_path, run_id=0):
resdirs = glob.glob(
osp.join(OUTPUT_DIR, conf_path, str(run_id),
RESULTS_SAVE_DIR_PREFIX + '*'))
all_res = {}
for resdir in resdirs:
resdir_bname = osp.basename(resdir)
all_res[resdir_bname] = read_results(conf_path,
run_id,
results_dir=resdir_bname)
return all_res
def read_file_into_list(fpath):
"""Read cli from file into a string."""
# TODO: Ideally reuse this from the launch script
args_lst = []
with open(fpath, 'r') as fin:
for line in fin:
args = line.split('#')[0].strip()
if not args: # Empty
continue
args_lst.append(args)
# Importing this on the global scope does not work .. gives the
# super(cls, self).. error
# https://thomas-cokelaer.info/blog/2011/09/382/
# Probably some issue with auto package reload in notebooks for py2.7
# packages..
from hydra._internal.core_plugins.basic_sweeper import BasicSweeper
from hydra.core.override_parser.overrides_parser import OverridesParser
sweeper = BasicSweeper(max_batch_size=None)
parser = OverridesParser.create()
overrides = parser.parse_overrides(args_lst)
run_args = sweeper.split_arguments(overrides, max_batch_size=None)[0]
return run_args
def get_config(cfg_fpath, run_id=0):
# outdir = osp.join(OUTPUT_DIR, cfg_fpath, str(run_id))
overrides_all = read_file_into_list('../' + cfg_fpath)
# https://github.com/facebookresearch/hydra/issues/716 should fix the issue
# with interpolation not working in notebook etc.
# However it can't handle ":" style custom interpolation, so need to
# override those.
cfg_all = []
for overrides in overrides_all:
overrides.append('cwd="../"')
with hydra_initialize(config_path='../conf'):
cfg = hydra_compose(config_name='config.yaml',
return_hydra_config=True,
overrides=overrides)
cfg_all.append(cfg)
if run_id is None:
return cfg_all
else:
return cfg_all[run_id]
def get_dataset(cfg_fpath,
run_id=0,
dataset_cfg_key=DATASET_EVAL_CFG_KEY,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
cfg = get_config(cfg_fpath, run_id)
sys.path.append('../')
dataset = hydra.utils.instantiate(getattr(
cfg, dataset_cfg_key + dataset_key_suffix),
frames_per_clip=1,
_recursive_=False)
return dataset
def overlay_text(clip, texts):
"""
Args:
clip: Moviepy clip
texts: List of 2 strings (corr to GT and pred) to overlay onto the clip
"""
bg_color = 'white' if texts[0] == texts[1] else 'pink'
texts[0] = 'GT: ' + texts[0]
texts[1] = 'Pred: ' + texts[1]
textclip = (mpy.TextClip(str(texts), bg_color=bg_color).set_duration(
clip.duration).set_pos(("right", "top")))
return mpy.CompositeVideoClip([clip, textclip])
def compute_topk(predictions, labels, k, classes=None):
"""
Args:
predictions (N, K)
labels (N,)
classes: (C', ): Set of classes to compute over. By default, uses
all classes
"""
if classes is None:
classes = np.unique(labels)
# Subselect items that belong to the classes
# Converting to list since classses are at times dict_values and that
# doesn't directly convert to np.array
reqd_elts = np.isin(labels, list(classes))
predictions = predictions[reqd_elts]
labels = labels[reqd_elts]
top_predictions = np.argpartition(predictions, -k, axis=-1)[:, -k:]
ratio_solved = np.mean(
np.any(labels[:, np.newaxis] == top_predictions, axis=-1))
return ratio_solved * 100.0
def combine_verb_noun_preds(res_verb, res_noun):
"""
Args:
res_verb (matrix with NxC1 dims)
res_noun (matrix with NxC2 dims)
Returns:
res_action (matrix with Nx(C1 * C2) dims)
"""
num_elts = res_verb.shape[0]
# normalize the predictions using softmax
res_verb = softmax(res_verb, axis=-1)
res_noun = softmax(res_noun, axis=-1)
# Cross product to get the combined score
return np.einsum('ij,ik->ijk', res_verb, res_noun).reshape((num_elts, -1))
def compute_conf_mat(predictions, target):
def to_onehot(indices, num_classes):
onehot = torch.zeros(indices.shape[0],
num_classes,
*indices.shape[1:],
device=indices.device)
# rgirdhar: When test on test set, there will be some data points where
# we don't have the labels
return onehot.scatter_(1, indices[indices >= 0].unsqueeze(1), 1)
num_classes = predictions.shape[1]
assert predictions.shape[0] == target.shape[0]
with torch.no_grad():
target_1hot = to_onehot(target, num_classes)
target_1hot_t = target_1hot.transpose(0, 1).float()
pred_idx = torch.argmax(predictions, dim=1)
pred_1hot = to_onehot(pred_idx.reshape(-1), num_classes)
pred_1hot = pred_1hot.float()
confusion_matrix = torch.matmul(target_1hot_t, pred_1hot)
return confusion_matrix
def mean_class_accuracy(conf_mat):
# Increase floating point precision similar to forecasting HOI
conf_mat = conf_mat.type(torch.float64)
cls_cnt = conf_mat.sum(dim=1) + 1e-15
cls_hit = conf_mat.diag()
cls_acc = (cls_hit / cls_cnt).mean().item()
return cls_acc
def compute_accuracy(predictions, labels, classes=None):
"""
Args:
predictions: (B, C) logits
labels: (B, )
classes: OrderedDict[name (str), cls_id (int)]
"""
# This can happen when computing tail class accuracies and it's not
# specified for the test set
if predictions.size == 0:
return [float('nan')] * 5
labels = labels.astype(np.int64)
if classes is not None:
classes_to_keep = list(classes.values())
else:
classes_to_keep = range(max(labels) + 1)
top_1 = compute_topk(predictions, labels, 1, classes=classes_to_keep)
top_5 = compute_topk(predictions, labels, 5, classes=classes_to_keep)
try:
ar_outputs = topk_recall(predictions,
labels,
k=5,
classes=classes_to_keep)
if isinstance(ar_outputs, tuple):
# This happens if RULSTM code is modified to return per-class AR
# values
ar5, ar5_per_cls = ar_outputs
ar5_per_cls = {k: v * 100.0 for k, v in ar5_per_cls.items()}
else:
ar5 = ar_outputs
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
except ZeroDivisionError:
# This happens when it can't find any true classes, the code
# can't deal with that
ar5 = float('nan')
ar5_per_cls = {c: float('nan') for c in classes_to_keep}
# Compute a mean class accuracy (used in EGTEA) -- accuracy per class and
# then mean over the classes
conf_mat = compute_conf_mat(torch.from_numpy(predictions),
torch.from_numpy(labels))
# Make sure conf mat makes sense
top_1_confmat = 100.0 * (conf_mat.diag()[classes_to_keep].sum() /
conf_mat[classes_to_keep].sum())
if (not np.isnan(top_1) and not np.isnan(top_1_confmat)
and not np.isclose(top_1, top_1_confmat, atol=1.0)):
# Using a large atol margin cos conf_mat comp happens on GPUs and can
# be non deterministic, so might not match sometimes..
# Save the outputs for analysis
with open('debug_acc.pkl', 'wb') as fout:
pkl.dump(predictions, fout)
pkl.dump(labels, fout)
pkl.dump(conf_mat, fout)
raise ValueError(f'top1 ({top_1}) doesnt match what I get from '
f'conf_mat ({top_1_confmat}). This could happen '
f'if the model predicts all 0s for some data points '
f'and hence argmax is not defined and behaves '
f'differently in numpy and torch '
f'(https://github.com/pytorch/pytorch/issues/14147)')
top1_meancls = 100.0 * mean_class_accuracy(conf_mat)
return top_1, top_5, ar5 * 100, top1_meancls, ar5_per_cls
def print_accuracies_epic(metrics: dict, prefix: str = ''):
print(f"[{prefix}] Accuracies verb/noun/action: "
f"{metrics['vtop1']:.1f} {metrics['vtop5']:.1f} "
f"{metrics['ntop1']:.1f} {metrics['ntop5']:.1f} "
f"{metrics['atop1']:.1f} {metrics['atop5']:.1f} ")
print(f"[{prefix}] Mean class top-1 accuracies verb/noun/action: "
f"{metrics['vtop1_meancls']:.1f} "
f"{metrics['ntop1_meancls']:.1f} "
f"{metrics['atop1_meancls']:.1f} ")
print(f"[{prefix}] Recall@5 verb/noun/action: "
f"{metrics['vrec5']:.1f} {metrics['nrec5']:.1f} "
f"{metrics['arec5']:.1f} ")
print(f"[{prefix}] Recall@5 many shot verb/noun/action: "
f"{metrics['vrec5_ms']:.1f} {metrics['nrec5_ms']:.1f} "
f"{metrics['arec5_ms']:.1f} ")
if 'vrec5_tail' in metrics:
# assuming the others for tail/unseen will be in there too, since
# they are all computed at one place for ek100
print(f"[{prefix}] Recall@5 tail verb/noun/action: "
f"{metrics['vrec5_tail']:.1f} {metrics['nrec5_tail']:.1f} "
f"{metrics['arec5_tail']:.1f} ")
print(f"[{prefix}] Recall@5 unseen verb/noun/action: "
f"{metrics['vrec5_unseen']:.1f} {metrics['nrec5_unseen']:.1f} "
f"{metrics['arec5_unseen']:.1f} ")
def get_logits_from_results(results):
if 'logits' in results:
return results['logits']
# Newer version, as of Nov 3 2020
logits_keys = [key for key in results.keys() if key.startswith('logits/')]
if len(logits_keys) == 1:
return results[logits_keys[0]]
# Else, return all of them in a dict
return {key: results[key] for key in logits_keys}
def get_epic_action_accuracy(run_info_verb, run_info_noun):
# Compute action accuracies implicitly from verb and noun
# TODO also compute with many-shot classes for EPIC 55
res_verb = get_logits_from_results(read_results(*run_info_verb))
res_noun = get_logits_from_results(read_results(*run_info_noun))
dataset_verb = get_dataset(*run_info_verb)
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
res_verb, dataset_verb.df['verb_class'].values)
dataset_noun = get_dataset(*run_info_noun)
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
res_noun, dataset_noun.df['noun_class'].values)
assert (len(dataset_verb.df) == len(res_verb) == len(dataset_noun.df) ==
len(res_noun))
res_action = combine_verb_noun_preds(res_verb, res_noun)
true_action = (
dataset_verb.df['verb_class'].values * len(dataset_noun.classes) +
dataset_noun.df['noun_class'].values)
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
res_action, true_action)
print_accuracies_epic({
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': float('nan'), # TODO
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': float('nan'), # TODO
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': float('nan'), # TODO
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
})
def epic100_unseen_tail_eval(probs, dataset):
"""
probs: contains 3 elements: predictions for verb, noun and action
"""
# based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L379
unseen_participants_ids = pd.read_csv(osp.join(
dataset.rulstm_annotation_dir,
'validation_unseen_participants_ids.csv'),
names=['id'],
squeeze=True)
tail_verbs_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_verbs_ids.csv'),
names=['id'],
squeeze=True)
tail_nouns_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_nouns_ids.csv'),
names=['id'],
squeeze=True)
tail_actions_ids = pd.read_csv(osp.join(dataset.rulstm_annotation_dir,
'validation_tail_actions_ids.csv'),
names=['id'],
squeeze=True)
# Now based on https://github.com/fpv-iplab/rulstm/blob/d44612e4c351ff668f149e2f9bc870f1e000f113/RULSTM/main.py#L495
unseen_bool_idx = dataset.df.narration_id.isin(
unseen_participants_ids).values
tail_verbs_bool_idx = dataset.df.narration_id.isin(tail_verbs_ids).values
tail_nouns_bool_idx = dataset.df.narration_id.isin(tail_nouns_ids).values
tail_actions_bool_idx = dataset.df.narration_id.isin(
tail_actions_ids).values
# For tail
_, _, vrec5_tail, _, _ = compute_accuracy(
probs[0][tail_verbs_bool_idx],
dataset.df.verb_class.values[tail_verbs_bool_idx])
_, _, nrec5_tail, _, _ = compute_accuracy(
probs[1][tail_nouns_bool_idx],
dataset.df.noun_class.values[tail_nouns_bool_idx])
_, _, arec5_tail, _, _ = compute_accuracy(
probs[2][tail_actions_bool_idx],
dataset.df.action_class.values[tail_actions_bool_idx])
# for unseen
_, _, vrec5_unseen, _, _ = compute_accuracy(
probs[0][unseen_bool_idx],
dataset.df.verb_class.values[unseen_bool_idx])
_, _, nrec5_unseen, _, _ = compute_accuracy(
probs[1][unseen_bool_idx],
dataset.df.noun_class.values[unseen_bool_idx])
_, _, arec5_unseen, _, _ = compute_accuracy(
probs[2][unseen_bool_idx],
dataset.df.action_class.values[unseen_bool_idx])
return dict(
vrec5_tail=vrec5_tail,
nrec5_tail=nrec5_tail,
arec5_tail=arec5_tail,
vrec5_unseen=vrec5_unseen,
nrec5_unseen=nrec5_unseen,
arec5_unseen=arec5_unseen,
)
def compute_accuracies_epic(probs, dataset):
manyshot_classes = dataset.classes_manyshot
vtop1, vtop5, vrec5, vtop1_meancls, vrec5_per_cls = compute_accuracy(
probs[0], dataset.df.verb_class.values)
vrec5_ms, nrec5_ms, arec5_ms = float('nan'), float('nan'), float('nan')
if 'verb' in manyshot_classes:
_, _, vrec5_ms, _, _ = compute_accuracy(
probs[0],
dataset.df.verb_class.values,
classes=manyshot_classes['verb'])
ntop1, ntop5, nrec5, ntop1_meancls, nrec5_per_cls = compute_accuracy(
probs[1], dataset.df.noun_class.values)
if 'noun' in manyshot_classes:
_, _, nrec5_ms, _, _ = compute_accuracy(
probs[1],
dataset.df.noun_class.values,
classes=manyshot_classes['noun'])
atop1, atop5, arec5, atop1_meancls, arec5_per_cls = compute_accuracy(
probs[2], dataset.df.action_class.values)
if 'action' in manyshot_classes:
_, _, arec5_ms, _, _ = compute_accuracy(
probs[2],
dataset.df.action_class.values,
classes=manyshot_classes['action'])
res = {
'vtop1': vtop1,
'vtop5': vtop5,
'vrec5': vrec5,
'vrec5_ms': vrec5_ms,
'vtop1_meancls': vtop1_meancls,
'vrec5_per_cls': vrec5_per_cls,
'ntop1': ntop1,
'ntop5': ntop5,
'nrec5': nrec5,
'nrec5_ms': nrec5_ms,
'ntop1_meancls': ntop1_meancls,
'nrec5_per_cls': nrec5_per_cls,
'atop1': atop1,
'atop5': atop5,
'arec5': arec5,
'arec5_ms': arec5_ms,
'atop1_meancls': atop1_meancls,
'arec5_per_cls': arec5_per_cls,
}
if dataset.version == epic_kitchens.EPIC100_VERSION:
res.update(epic100_unseen_tail_eval(probs, dataset))
return res
def get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX):
res_action = get_logits_from_results(
read_results(*run_info, results_dir=f'results{dataset_key_suffix}'))
dataset = get_dataset(*run_info, dataset_key_suffix=dataset_key_suffix)
if isinstance(res_action, dict):
print(f'Found logits outputs for verb noun as well [{run_info}]')
# It has multiple heads for verb/noun as well
res_verb = res_action['logits/verb']
res_noun = res_action['logits/noun']
res_action = res_action['logits/action']
else:
res_action_probs = softmax(res_action, axis=-1)
# Marginalize the other dimension, using the mapping matrices I store
# in the dataset obj
res_verb = np.matmul(
res_action_probs,
dataset.class_mappings[('verb', 'action')].numpy())
res_noun = np.matmul(
res_action_probs,
dataset.class_mappings[('noun', 'action')].numpy())
accuracies = compute_accuracies_epic([res_verb, res_noun, res_action],
dataset)
# Returning the actual scores for actions instead of the probs. Found
# better results with this, and Sener et al. ECCV'20 does the same.
scores = [res_verb, res_noun, res_action]
return accuracies, scores, dataset
def read_scores_from_pkl(pkl_fpath):
"""
This is to read the data as I dump in the ActionBanks code
"""
with open(pkl_fpath, 'rb') as fin:
scores = pkl.load(fin)
return [
scores['verb_scores'], scores['noun_scores'], scores['action_scores']
]
def load_json(fpath, verb_noun_to_action, nclasses):
"""
Args:
fpath: Path to the json
verb_noun_to_action: Dict from (verb_id, noun_id) to action_id
nclasses: A list of 3 elements, with the label space for verb/noun/act
Returns: a dict with
{uid1: score1, uid2: score2 ...}
"""
assert len(nclasses) == 3, 'One for verb/noun/action'
with open(fpath, 'r') as fin:
preds = json.load(fin)
# Res for verb/noun/action
all_res = []
for j, space in enumerate(['verb', 'noun', 'action']):
# Convert to a {uid: <scores>} format
res = {}
for key, val in preds['results'].items():
# Will be using 0 for all the scores not defined. Should be fine given
# top 100 should be enough for late fusion etc, metrics are like top-5
# anyway.
scores = np.zeros((nclasses[j], ))
for i, score in val[space].items():
if space == 'action':
# Since for actions the "key" is (verb, noun) tuple,
# need to convert it to an action index by
# verb_id * noun_count + noun_id
idx = tuple(int(el) for el in i.split(','))
idx = verb_noun_to_action[idx]
else:
idx = int(i)
scores[idx] = score
res[key] = scores
all_res.append(res)
return all_res
def _concat_with_uids(scores, dataset, uid_key):
# Make a dict with the IDs from the dataset
# There will be 3 elements in scores -- verb, noun, action
return [
dict(
zip([str(el)
for el in dataset.df[uid_key].values], scores_per_space))
for scores_per_space in scores
]
def _normalize_scores(scores, p):
"""This brings the scores between 0 to 1, and normalizes by """
res = []
for scores_per_space in scores:
res.append({
uid: val / (np.linalg.norm(val, ord=p, axis=-1) + 0.000001)
for uid, val in scores_per_space.items()
})
return res
def _get_avg_norm_scores(scores, p):
"""Remove the UID keys etc, and then compute."""
scores = np.array([val for _, val in scores.items()])
return np.mean(np.linalg.norm(scores, ord=p, axis=-1), axis=0)
def get_epic_marginalize_late_fuse(
run_infos,
weights=1.0,
dataset_key_suffix=DATASET_EVAL_CFG_KEY_SUFFIX,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
"""
Args:
eventual_fname: This is used to read prepackaged outputs from result
files, and using the filename to know which file to look for
when a directory is passed in as run info.
normalize_before_combine: Set to non-None to normalize the features
by that p-norm, and then combine. So the weights would have to be
defined w.r.t normalized features.
"""
all_scores = []
all_datasets = []
for run_info_id, run_info in enumerate(run_infos):
if isinstance(run_info[0], dict):
# This is likely a pre-computed scores (so eg a nested
# get_epic_marginalize.. function). So I just use the scores as is.
scores = run_info
elif os.path.isdir(run_info[0]):
assert len(all_datasets) > 0, (
'Need at least 1 datasets to be read before reading from json '
'to figure the verb/noun -> action_id and '
'to figure the total number of classes to gen feat vectors')
scores = load_json(
os.path.join(run_info[0], eventual_fname),
all_datasets[-1].verb_noun_to_action,
[list(el.values())[0].shape[-1] for el in all_scores[-1]])
elif run_info[0].endswith('.pkl'):
# This is the input used to read predictions from the action_banks
# codebase, where I dump output into pkl and read here for late
# fusion.
scores = read_scores_from_pkl(run_info[0])
assert len(
all_datasets) > 0, 'At least one run_info must be passed in'
scores = _concat_with_uids(scores, all_datasets[-1], uid_key)
else:
accuracies, scores, dataset = get_epic_marginalize_verb_noun(
run_info, dataset_key_suffix=dataset_key_suffix)
scores = _concat_with_uids(scores, dataset, uid_key)
print_accuracies_epic(accuracies, prefix=run_info)
all_datasets.append(dataset)
if normalize_before_combine is not None:
scores = _normalize_scores(scores, p=normalize_before_combine)
logging.warning(
'Adding scores from run_info %d with avg action L1 norm of %f',
run_info_id, _get_avg_norm_scores(scores[-1], p=1))
all_scores.append(scores)
# Late fuse
if isinstance(weights, float):
weights = [weights] * len(run_infos)
else:
assert len(weights) == len(run_infos)
# broadcastable_weights = np.array(weights)[:, np.newaxis, np.newaxis]
# Combined scores by combining the corresponding score for each uid.
combined = []
for space_id in range(3): # verb/noun/action
scores_for_space = [scores[space_id] for scores in all_scores]
# Take the union of all the UIDs we have score for
total_uids = set.union(*[set(el.keys()) for el in scores_for_space])
logging.warning('Combined UIDs: %d. UIDs in the runs %s',
len(total_uids),
[len(el.keys()) for el in scores_for_space])
combined_for_space = {}
for uid in total_uids:
combined_for_space[uid] = []
for run_id, scores_for_space_per_run in enumerate(
scores_for_space):
if uid in scores_for_space_per_run:
combined_for_space[uid].append(
scores_for_space_per_run[uid] * weights[run_id])
combined_for_space[uid] = np.sum(np.stack(combined_for_space[uid]),
axis=0)
combined.append(combined_for_space)
# Now to compute accuracies, need to convert back to np arrays from dict.
# Would only work for parts that are in the dataset
combined_np = []
for combined_for_space in combined:
combined_np.append(
np.array([
combined_for_space[str(uid)]
for uid in all_datasets[-1].df[uid_key].values
]))
accuracies = compute_accuracies_epic(combined_np, all_datasets[-1])
return accuracies, combined, all_datasets[-1]
def summarize_results(cfg_name, metric='arec5'):
"""
Read all runs corr to cfg_name, and show the results in a human readable
form with the config overrides (unique) that were active. It averages
over runs too.
"""
run_cfgs = read_file_into_list('../' + cfg_name)
run_cfgs_hydra = get_config(cfg_name, run_id=None)
# Convert to dicts
run_cfgs = [(i, dict([el.split('=') for el in conf]))
for i, conf in enumerate(run_cfgs)]
# Keep only the stuff that changes across them
run_cfgs = subselect_dict_keys_diff(run_cfgs)
all_res = {}
for (run_id, params), cfg_hydra in tqdm(zip(run_cfgs, run_cfgs_hydra),
total=len(run_cfgs),
desc='Loading results'):
try:
accuracies, _, _ = get_epic_marginalize_verb_noun(
(cfg_name, run_id))
epoch = get_epoch_from_resdir(cfg_name, run_id)
except (EmptyResdirError, OSError): # H5 didn't let it read
continue
if epoch != cfg_hydra.train.num_epochs:
# This training has not finished
continue
run_id = 0
if 'run_id' in params:
run_id = int(params['run_id'])
del params['run_id']
params_hash = tuple(sorted(list(params.items())))
if params_hash not in all_res:
all_res[params_hash] = {}
all_res[params_hash][run_id] = accuracies[metric]
for params_hash in all_res:
run_ids, values = zip(*all_res[params_hash].items())
print(f'{params_hash} [{run_ids}]: [{values}] '
f'mean: {np.mean(values)}, std: {np.std(values)}')
def plot_per_cls_perf(run_infos_all: list,
names: list,
metrics: list = ['vrec5_per_cls', 'nrec5_per_cls'],
cls_types: list = ['verb', 'noun'],
show_topn: int = 10,
xticks_rotation: float = 0,
show_subset: callable = None,
outfpath: str = 'figs/improved/'):
"""
Args:
run_infos_all: [[(cfg, sweep_id), (cfg, sweep_id)...],
[(cfg, sweep_id), (cfg, sweep_id)...], ...]
names: The name for each run_info group
metrics: There will be 1 graph for each
"""
assert len(run_infos_all) == len(names)
assert len(metrics) == len(cls_types)
final_accs = {cls_type: [] for cls_type in cls_types}
for i, run_infos in enumerate(tqdm(run_infos_all, desc='Reading acc')):
for run_id, run_info in enumerate(run_infos):
cfg_fpath, sweep_id = run_info
all_accuracies, _, dataset = get_epic_marginalize_verb_noun(
(cfg_fpath, sweep_id))
for metric, cls_type in zip(metrics, cls_types):
accuracies = all_accuracies[metric]
assert isinstance(accuracies,
dict), 'Supports per-class for now'
classes = operator.attrgetter(f'{cls_type}_classes')(dataset)
cls_id_to_name = {v: k for k, v in classes.items()}
for cls_id, score in accuracies.items():
final_accs[cls_type].append({
'method':
names[i],
'run_id':
run_id,
'cls_name':
cls_id_to_name[cls_id],
'accuracy':
score,
})
for cls_type in final_accs:
accs = pd.DataFrame(final_accs[cls_type])
# Print logs
for method in names:
for run_id in accs.run_id.unique():
this_acc = (accs[accs.method == method][
accs.run_id == run_id].accuracy.mean())
print(f'Check {method} {run_id}: {this_acc}')
mean_acc_by_cls = accs.groupby(['method',
'cls_name']).mean().reset_index()
first_col = mean_acc_by_cls[mean_acc_by_cls.method == names[0]]
last_col = mean_acc_by_cls[mean_acc_by_cls.method == names[-1]]
merged = first_col[['cls_name', 'accuracy'
]].merge(last_col[['cls_name', 'accuracy']],
on='cls_name',
how='outer',
suffixes=['_first', '_last'])
# get the largest gains
gains = (merged['accuracy_last'] -
merged['accuracy_first']).sort_values()
gained_labels = merged.loc[gains.index].cls_name.tolist()
if show_subset is not None:
gained_labels = [el for el in gained_labels if show_subset(el)]
gained_labels = gained_labels[-show_topn:]
accs_largegains = accs[accs.cls_name.isin(gained_labels)]
fig = plt.figure(num=None,
figsize=(2 * len(gained_labels), 4),
dpi=300)
ax = sns.barplot(x='cls_name',
y='accuracy',
hue='method',
data=accs_largegains,
order=gained_labels,
errwidth=1.0)
ax.set_xlabel('Classes')
ax.set_ylabel('Recall @ 5')
ax.set_xticklabels(ax.get_xticklabels(),
rotation=xticks_rotation,
ha='center')
plt.show()
save_graph(fig, os.path.join(outfpath, cls_type + '.pdf'))
def get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix,
uid_key='uid',
eventual_fname='seen.json',
normalize_before_combine=None):
_, combined, dataset = get_epic_marginalize_late_fuse(
run_infos,
weights,
dataset_key_suffix=dataset_key_suffix,
uid_key=uid_key,
eventual_fname=eventual_fname,
normalize_before_combine=normalize_before_combine)
results = {}
# Now the following may not be true since if the run_info contains an
# actual json, it might have more rows etc.
# assert len(combined[0]) == len(dataset)
action_to_verb_noun = {
val: key
for key, val in dataset.verb_noun_to_action.items()
}
for uid in tqdm(combined[0].keys(), desc='Computing res'):
verb_res = {f'{j}': val for j, val in enumerate(combined[0][uid])}
noun_res = {f'{j}': val for j, val in enumerate(combined[1][uid])}
top_100_actions = sorted(np.argpartition(combined[2][uid],
-100)[-100:],
key=lambda x: -combined[2][uid][x])
action_res = {
','.join((str(el)
for el in action_to_verb_noun[j])): combined[2][uid][j]
for j in top_100_actions
}
results[f'{uid}'] = {
'verb': verb_res,
'noun': noun_res,
'action': action_res,
}
# Add in all the discarded dfs with uniform distribution
if dataset.discarded_df is not None:
for _, row in dataset.discarded_df.iterrows():
if str(row[uid_key]) in results:
continue
results[f'{row[uid_key]}'] = {
'verb':
{f'{j}': 0.0
for j in range(len(dataset.verb_classes))},
'noun':
{f'{j}': 0.0
for j in range(len(dataset.noun_classes))},
'action': {f'0,{j}': 0.0
for j in range(100)},
}
output_dict = {
'version': f'{dataset.version}',
'challenge': dataset.challenge_type,
'results': results
}
return output_dict
def package_results_for_submission(run_infos,
weights,
normalize_before_combine=None):
res_s1 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='',
eventual_fname='seen.json',
normalize_before_combine=normalize_before_combine)
res_s2 = get_struct_outputs_per_dataset(
run_infos,
weights,
dataset_key_suffix='_s2',
eventual_fname='unseen.json',
normalize_before_combine=normalize_before_combine)
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'seen.json'), 'w') as fout:
json.dump(res_s1, fout, indent=4)
with open(osp.join(output_dir, 'unseen.json'), 'w') as fout:
json.dump(res_s2, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/seen.json '
f'{output_dir}/unseen.json ',
shell=True)
def package_results_for_submission_ek100(run_infos, weights, sls=[1, 4, 4]):
res = get_struct_outputs_per_dataset(run_infos,
weights,
dataset_key_suffix='',
uid_key='narration_id',
eventual_fname='test.json')
res['sls_pt'] = sls[0]
res['sls_tl'] = sls[1]
res['sls_td'] = sls[2]
# write it out in the first run's output dir
output_dir = osp.join(OUTPUT_DIR, run_infos[0][0], str(run_infos[0][1]),
'challenge')
print(f'Saving outputs to {output_dir}')
os.makedirs(output_dir, exist_ok=True)
with open(osp.join(output_dir, 'test.json'), 'w') as fout:
json.dump(res, fout, indent=4)
subprocess.check_output(
f'zip -j {output_dir}/submit.zip '
f'{output_dir}/test.json ',
shell=True)
|
AVT-main
|
notebooks/utils.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Modular implementation of the basic train ops
"""
from typing import Dict, Union, Tuple
import torch
import torch.nn as nn
import hydra
from hydra.types import TargetConf
from common import utils
from datasets.base_video_dataset import FUTURE_PREFIX
from models.base_model import PAST_LOGITS_PREFIX
from loss_fn.multidim_xentropy import MultiDimCrossEntropy
class NoLossAccuracy(nn.Module):
def __init__(self, *args, **kwargs):
super().__init__()
def forward(self, *args, **kwargs):
return {}, {}
class BasicLossAccuracy(nn.Module):
def __init__(self, dataset, device, balance_classes=False):
super().__init__()
kwargs = {'ignore_index': -1}
if balance_classes:
assert dataset.class_balanced_sampling is False, (
'Do not re-weight the losses, and do balanced sampling')
weight = torch.zeros((len(dataset.classes, )),
device=device,
dtype=torch.float)
for cls_id, count in dataset.classes_counts.items():
weight[cls_id] = count
weight = weight / torch.sum(weight) # To get ratios for non -1 cls
weight = 1 / (weight + 0.00001)
kwargs['weight'] = weight
kwargs['reduction'] = 'none' # to get batch level output
self.cls_criterion = MultiDimCrossEntropy(**kwargs)
def forward(self, outputs, target, target_subclips):
"""
Args:
outputs['logits'] torch.Tensor (B, num_classes) or
(B, T, num_classes)
Latter in case of dense prediction
target: {type: (B) or (B, T')}; latter in case of dense prediction
target_subclips: {type: (B, #clips, T)}: The target for each input
frame
"""
losses = {}
accuracies = {}
for tgt_type, tgt_val in target.items():
logits = outputs[f'logits/{tgt_type}']
assert logits.ndim == tgt_val.ndim + 1
loss = self.cls_criterion(logits, tgt_val)
dataset_max_classes = logits.size(-1)
acc1, acc5 = utils.accuracy(logits,
tgt_val,
topk=(1, min(5, dataset_max_classes)))
# Don't use / in loss since I use the config to set weights, and
# can't use / there.
losses[f'cls_{tgt_type}'] = loss
accuracies[f'acc1/{tgt_type}'] = acc1
accuracies[f'acc5/{tgt_type}'] = acc5
# Incur past losses
past_logits_key = f'{PAST_LOGITS_PREFIX}logits/{tgt_type}'
# If this key exists, means we asked for classifier on the last
# layer, so the loss should be incurred.
if past_logits_key in outputs and target_subclips is not None:
past_logits = outputs[past_logits_key]
# Take mode over the frames to get the subclip level loss
past_target = torch.mode(target_subclips[tgt_type], -1)[0]
assert past_logits.shape[:-1] == past_target.shape, (
f'The subclips should be set such that the past logits '
f'and past targets match in shape. Currently they are '
f'{past_logits.shape} and {past_target.shape}')
losses[f'past_cls_{tgt_type}'] = self.cls_criterion(
past_logits, past_target)
# Else likely not using subclips, so no way to do this loss
return losses, accuracies
class Basic:
def __init__(self,
model,
device,
dataset,
cls_loss_acc_fn: TargetConf,
reg_criterion: TargetConf = None):
super().__init__()
self.model = model
self.device = device
self.cls_loss_acc_fn = hydra.utils.instantiate(cls_loss_acc_fn,
dataset, device)
del reg_criterion # not used here
def _basic_preproc(self, data, train_mode):
if not isinstance(data, dict):
video, target = data
# Make a dict so that later code can use it
data = {}
data['video'] = video
data['target'] = target
data['idx'] = -torch.ones_like(target)
if train_mode:
self.model.train()
else:
self.model.eval()
return data
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
"""
Args:
data (dict): Dictionary of all the data from the data loader
"""
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {}
target_subclips = {}
for key in data['target'].keys():
target[key] = data['target'][key].to(self.device,
non_blocking=True)
outputs, aux_losses = self.model(video,
target_shape=next(
iter(target.values())).shape)
if 'target_subclips' in data:
for key in data['target_subclips'].keys():
target_subclips[key] = data['target_subclips'][key].to(
self.device, non_blocking=True)
else:
target_subclips = None
losses, accuracies = self.cls_loss_acc_fn(outputs, target,
target_subclips)
losses.update(aux_losses)
return data, outputs, losses, accuracies
class PredFutureFeat(Basic):
def __init__(self,
*args,
reg_criterion: TargetConf = None,
future_target: str = 'temp_agg_projected',
incur_loss_style: str = 'separately',
combine_future_losses: TargetConf = {'_target_': 'torch.min'},
cumulative_future: bool = False,
**kwargs):
'''
Args:
incur_loss_style (str): Defines how to incur losses for multiple
futures. Could do 'separately', and then combine using
`combine_future_losses`. Or 'together', such as for MIL-NCE.
'''
super().__init__(*args, **kwargs)
self.reg_criterion = hydra.utils.instantiate(reg_criterion)
self.future_target = future_target
self.incur_loss_style = incur_loss_style
self.combine_future_losses = combine_future_losses
self.cumulative_future = cumulative_future
def __call__(
self,
data: Union[Dict[str, torch.Tensor], # If dict
Tuple[torch.Tensor, torch.Tensor]], # vid, target
train_mode: bool = True):
data = self._basic_preproc(data, train_mode)
video = data['video'].to(self.device, non_blocking=True)
target = {
key: val.to(self.device, non_blocking=True)
for key, val in data['target'].items()
}
batch_size = video.size(0)
if train_mode:
# At test time, I don't sample the extra future video, since
# that is only used during training
all_videos = [video]
nfutures = len(
[key for key in data.keys() if key.startswith(FUTURE_PREFIX)])
for i in range(nfutures):
future_vid = data[f'{FUTURE_PREFIX}_{i}_video'].to(
self.device, non_blocking=True)
all_videos.append(future_vid)
video = torch.cat(all_videos, dim=0) # Add to batch dim
outputs_full, aux_losses = self.model(video)
# Just the actual video for outputs
outputs = {key: val[:batch_size] for key, val in outputs_full.items()}
# if self.cls_loss_wt != 0:
# Doing this makes some layers not have gradients and it gives errors,
# so just leaving it here for now. The gradient should be 0 anyway
losses, accuracies = self.cls_loss_acc_fn(outputs, target)
losses.update(aux_losses)
losses['cls'] = losses['cls']
if train_mode:
# Incur the regression losses, for each of the futures
reg_losses = []
if self.incur_loss_style == 'separately':
for i in range(nfutures):
future_feats = outputs_full[self.future_target][
(i + 1) * batch_size:(i + 2) * batch_size]
if self.cumulative_future:
future_feats = torch.cumsum(future_feats, 0)
# Divide by the position to get mean of features until then
future_feats = future_feats / (torch.range(
1,
future_feats.size(0),
device=future_feats.device,
dtype=future_feats.dtype).unsqueeze(1))
loss = self.reg_criterion(outputs['future_projected'],
future_feats)
reg_losses.append(loss)
final_reg_loss = hydra.utils.call(self.combine_future_losses,
torch.stack(reg_losses))
elif self.incur_loss_style == 'together':
future_feats = outputs_full[self.future_target][batch_size:]
future_feats = future_feats.reshape(
(-1, batch_size, future_feats.size(-1))).transpose(0, 1)
final_reg_loss = self.reg_criterion(
outputs['future_projected'], future_feats)
else:
raise NotImplementedError(self.incur_loss_style)
losses['reg'] = final_reg_loss
return data, outputs, losses, accuracies
|
AVT-main
|
func/train_eval_ops.py
|
from . import train
|
AVT-main
|
func/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
"""Training code."""
from typing import Union, Sequence
import datetime
import os
import time
import sys
import logging
import itertools
import operator
import psutil
import h5py
import subprocess
from tqdm import tqdm
import numpy as np
# Need to import this here, as with pytorch 1.7.1 (or some other CLIP dep)
# it's giving a segmentation fault
# https://github.com/pytorch/pytorch/issues/30651
# Needs to imported before torchvision it seems
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.utils.data
from torch.utils.data.dataloader import default_collate
from torch import nn
import torchvision
import torchvision.datasets.video_utils
from torchvision.datasets.samplers import (DistributedSampler,
UniformClipSampler,
RandomClipSampler)
import torch.distributed as dist
import hydra
from omegaconf import OmegaConf
from models import base_model
from common import scheduler, utils, transforms as T
from common.log import MetricLogger, setup_tbx, get_default_loggers
from datasets.data import get_dataset
from notebooks import utils as nb_utils
__all__ = ['main', 'evaluate', 'train_one_epoch', 'initial_setup']
RESULTS_SAVE_DIR = 'results' # Don't put a "/" at the end, will add later
CKPT_FNAME = 'checkpoint.pth'
DATASET_TRAIN_CFG_KEY = 'dataset_train'
DATASET_EVAL_CFG_KEY = 'dataset_eval'
STR_UID_MAXLEN = 64 # Max length of the string UID stored in H5PY
def store_checkpoint(fpaths: Union[str, Sequence[str]], model, optimizer,
lr_scheduler, epoch):
"""
Args:
fpaths: List of paths or a single path, where to store.
model: the model to be stored
optimizer, lr_scheduler
epoch: How many epochs have elapsed when this model is being stored.
"""
model_without_ddp = model
if isinstance(model, nn.parallel.DistributedDataParallel):
model_without_ddp = model.module
checkpoint = {
"model": model_without_ddp.state_dict(),
"optimizer": optimizer.state_dict(),
"lr_scheduler": lr_scheduler.state_dict(),
"epoch": epoch,
}
if not isinstance(fpaths, list):
fpaths = [fpaths]
for fpath in fpaths:
logging.info('Storing ckpt at epoch %f to %s', epoch, fpath)
utils.save_on_master(checkpoint, fpath)
def _store_video_logs(data, key, step_id, print_large_freq, metric_logger):
"""
Args:
data[key] -> video (B, #clips, 3, T, H, W)
"""
if metric_logger.writer is None:
return
if step_id % print_large_freq != 0:
return
if key not in data:
return
video = data[key]
if video.ndim != 6:
return
## Store the videos
# Swap dims to get N*#clips,T,C,H,W format used by tensorboard
video = torch.flatten(video, 0, 1)
vid_log = torch.transpose(video, 1, 2)
vid_log = vid_log - vid_log.min()
vid_log = vid_log / vid_log.max()
kwargs = {}
if 'video_info' in data:
# Can't specify different frame rate for videos, so use the min
kwargs['fps'] = max(
data['video_info']['video_fps'].min().cpu().numpy().tolist(), 4)
metric_logger.writer.add_video(key, vid_log, step_id, **kwargs)
def _store_scalar_logs(name, val, step_id, print_freq, metric_logger):
if metric_logger.writer is None:
return
if step_id % print_freq != 0:
return
metric_logger.writer.add_scalar(name, val, step_id)
def _get_memory_usage_gb():
mem = psutil.virtual_memory()
return mem.used / (1024**3)
def _compute_final_acc_from_stored(results_dir, dataset):
results = nb_utils.read_results(os.getcwd(), '', results_dir)
accs = {}
for key in results.keys():
if not key.startswith('logits/'):
continue
base_key = key[len('logits/'):]
top1, top5, ar5, top1_meancls, _ = nb_utils.compute_accuracy(
results[key], results[f'target/{base_key}'])
_, _, ar5_ms, _, _ = nb_utils.compute_accuracy(
results[key], results[f'target/{base_key}'],
dataset.classes_manyshot[base_key])
accs[f'final_acc/{base_key}/top1'] = top1
accs[f'final_acc/{base_key}/top1_meanOverClasses'] = top1_meancls
accs[f'final_acc/{base_key}/top5'] = top5
accs[f'final_acc/{base_key}/AR5'] = ar5
accs[f'final_acc/{base_key}/AR5_manyshot'] = ar5_ms
return accs
def train_one_epoch(
train_eval_op,
optimizer,
lr_scheduler,
data_loader,
epoch: int,
partial_epoch: float,
metric_logger,
logger,
last_saved_time,
# kwargs:
print_freq,
print_large_freq,
grad_clip_params,
loss_wts, # All the loss wts go here
save_freq: float, # num epochs to save at. Could be fractional.
save_freq_min: float, # Save a checkpoint every this many minutes
save_intermediates: bool,
):
"""
Args:
epoch (int) defines how many full epochs have finished
partial_epoch (float): Defines the ratio of the last epoch that was
finished before the current model was written out
"""
header = 'Epoch: [{}]'.format(epoch)
batches_per_epoch = len(data_loader)
# Run the data loader for the partial epochs
partial_iters = int(batches_per_epoch * partial_epoch)
if partial_iters > 0:
# TODO: Figure a better way to do this ... too slow
for i, _ in tqdm(enumerate(data_loader),
desc=(f'Loading and throwing data for '
f'{partial_epoch:0.8f} epochs or '
f'{partial_iters} iters'),
total=partial_iters):
if i >= partial_iters:
break
if save_freq:
save_freq_steps = int(save_freq * batches_per_epoch)
logger.info('Storing checkpoints every %0.8f epochs, or '
'%d steps', save_freq, save_freq_steps)
if save_freq_min:
logger.info('Storing checkpoints every %0.2f mins', save_freq_min)
for i, data in enumerate(
metric_logger.log_every(data_loader, print_freq, header),
partial_iters):
step_id = epoch * batches_per_epoch + i
cur_epoch = step_id / batches_per_epoch # Fractional value
time_now = datetime.datetime.now()
mins_since_last_saved = (time_now -
last_saved_time).total_seconds() / 60.0
if (save_freq and step_id % save_freq_steps == 0) or (
save_freq_min and (mins_since_last_saved >= save_freq_min)):
# Not storing in the main checkpoint, keeping that only for the
# models at full epoch boundaries. So set save_intermediates true
# to save models at these points
ckpt_names = []
if save_intermediates:
ckpt_names.append(f'checkpoint_ep{cur_epoch:.8f}.pth')
store_checkpoint(ckpt_names, train_eval_op.model, optimizer,
lr_scheduler, cur_epoch)
last_saved_time = time_now
start_time = time.time()
data, _, losses, accuracies = train_eval_op(data, train_mode=True)
# Reduce the losses, since by default I no longer reduce the losses,
# to be able to store the outputs
losses = {key: torch.mean(val) for key, val in losses.items()}
# Weight the losses
losses_wtd = []
for key, val in losses.items():
this_loss_wt = operator.attrgetter(key)(loss_wts)
# This will ensure only non 0 loss wts contribute, else otherwise
# the weight decay will still be associated with this loss.
if this_loss_wt > 0:
losses_wtd.append(this_loss_wt * val)
# Use the total loss to backprop etc
loss = torch.sum(torch.stack(losses_wtd))
if torch.isnan(loss):
raise ValueError('The loss is NaN!')
optimizer.zero_grad()
loss.backward()
# Clip the gradients if asked for
if grad_clip_params['max_norm'] is not None:
params_being_optimized = []
for param_group in optimizer.param_groups:
params_being_optimized += param_group['params']
assert len(params_being_optimized) > 0, 'Shouldnt be training else'
torch.nn.utils.clip_grad_norm_(params_being_optimized,
**grad_clip_params)
optimizer.step()
batch_size = data_loader.batch_size
metric_logger.update(loss=loss.item(),
lr=optimizer.param_groups[0]['lr'])
metric_logger.meters['clips/s'].update(batch_size /
(time.time() - start_time))
# Store logs in a sane way
for acc_key, acc_val in accuracies.items():
metric_logger.meters[acc_key].update(acc_val.item(), n=batch_size)
for loss_key, loss_val in losses.items():
_store_scalar_logs(f'train_per_iter/loss/{loss_key}', loss_val,
step_id, print_freq, metric_logger)
_store_scalar_logs('train_per_iter/loss', loss, step_id, print_freq,
metric_logger)
_store_scalar_logs('train_per_iter/lr',
optimizer.param_groups[0]['lr'], step_id,
print_freq, metric_logger)
_store_scalar_logs('train_per_iter/sys/cpu_mem_use_gb',
_get_memory_usage_gb(), step_id, print_freq,
metric_logger)
# Store video logs for all videos (future, current etc)
[
_store_video_logs(data, key, step_id, print_large_freq,
metric_logger) for key in data
if key.endswith('video')
]
if not isinstance(lr_scheduler.base_scheduler,
scheduler.ReduceLROnPlateau):
# If it is, then that is handled in the main training loop,
# since it uses the validation accuracy to step down
lr_scheduler.step()
return last_saved_time
def store_append_h5(endpoints, output_dir):
output_fpath = os.path.join(output_dir, f'{utils.get_rank()}.h5')
os.makedirs(output_dir, exist_ok=True)
with h5py.File(output_fpath, 'a') as fout:
for key, val in endpoints.items():
if key not in fout:
fout.create_dataset(key,
data=val,
compression='gzip',
compression_opts=9,
chunks=True,
maxshape=(None, ) + val.shape[1:])
else:
fout[key].resize((fout[key].shape[0] + val.shape[0], ) +
val.shape[1:])
fout[key][-val.shape[0]:, ...] = val
def _evaluate_store_logs(logger, metric_logger, acc_keys, store, this_save_dir,
data_key, data_loader, epoch, loss_names):
# gather the stats from all processes
metric_logger.synchronize_between_processes()
# gather all accuracies
final_accuracies = {}
# Using the loop variable name from earlier .. but ok for now to get
# the keys
for acc_key in acc_keys:
final_accuracies[acc_key] = metric_logger.meters[acc_key].global_avg
if store:
dist.barrier() # all the processes have written out the res
# Compute the AR@5: will have to read the stored outputs
final_accuracies.update(
_compute_final_acc_from_stored(this_save_dir, data_loader.dataset))
# store logs in a sane way
for acc_key, acc_val in final_accuracies.items():
_store_scalar_logs(f'eval_per_epoch{data_key}/{acc_key}', acc_val,
int(round(epoch)), 1, metric_logger)
for loss_name in loss_names:
_store_scalar_logs(f'eval_per_epoch{data_key}/loss_{loss_name}',
metric_logger.meters[loss_name].global_avg,
int(round(epoch)), 1, metric_logger)
logger.info('[{data_key}]'.format(data_key=data_key))
for key in metric_logger.meters:
logging.info('%s: %f', key, metric_logger.meters[key].global_avg)
return final_accuracies
def evaluate(
train_eval_op,
data_loaders: dict,
tb_writer,
logger,
epoch: float, # Can be a partial epoch
store=True,
store_endpoint='logits',
only_run_featext=False):
"""
Args:
data_loaders: A dict from key (name) to a data loader. Allows to
multiple dataloaders for testing on.
only_run_featext (bool): Set this to true and it will return after the
features are extracted and won't compute final numbers etc. So
it will never try to sync processes etc, which leads to crashes.
"""
all_metric_loggers = {}
final_accuracies = {}
for data_key, data_loader in data_loaders.items():
logger.info('Running evaluation for {0}{1}'.format(
DATASET_EVAL_CFG_KEY, data_key))
header = f'[{data_key}] Test:'
metric_logger = MetricLogger(delimiter=' ',
writer=tb_writer,
stat_set='val' + data_key,
logger=logger)
all_metric_loggers[data_key] = metric_logger
this_save_dir = RESULTS_SAVE_DIR + data_key + '/'
if not only_run_featext:
# Delete the stored output features files, since with H5 they
# might be getting appended and will blow up. Note that if
# feature extraction was the goal and we wanted to append,
# need to set in the config to not delete the old files so it
# can append to what has already been computed
logger.info('Clearing %s/%s/*', os.getcwd(), this_save_dir)
subprocess.call(f'rm -r {this_save_dir}/*', shell=True)
for data in metric_logger.log_every(data_loader, 2, header):
with torch.no_grad():
data, outputs, losses, accuracies = train_eval_op(
data, train_mode=False)
# Reduce the losses, since by default I no longer reduce the
# losses, to be able to store the outputs
losses_reduced = {
key: torch.mean(val)
for key, val in losses.items()
}
loss = torch.sum(torch.stack(list(losses_reduced.values())))
if store:
# allow to store logits and logits_regression if that's in too
all_logits = {
key: outputs[key].detach().cpu().numpy()
for key in outputs if key.startswith(store_endpoint)
}
all_logits.update({'idx': data['idx'].detach().cpu().numpy()})
uid_data = np.array(data['uid'])
# If strings, convert format to work with HDF5
if uid_data.dtype.kind == 'U':
# So that it can store upto 64 char strings -- will be
# used by the hdf5 too
assert int(uid_data.dtype.str[2:]) < STR_UID_MAXLEN, (
f'Make sure UID data is smaller than '
f'{STR_UID_MAXLEN}, or update that value of '
f'STR_UID_MAXLEN')
uid_data = uid_data.astype(f'S{STR_UID_MAXLEN}')
all_logits.update({'uid': uid_data})
# Storing the actual per batch/elt unreduced losses for
# potential analysis
all_logits.update({
'loss/' + key: val.detach().cpu()
for key, val in losses.items()
})
if not only_run_featext:
# store the targets as well
all_logits.update({
'target/' + key: val.detach().cpu().numpy()
for key, val in data['target'].items()
})
# Do the actual storage into HDF5s that can append to the
# stuff from previous batch. Doing it here rather than
# collecting (as I used to do) so that this can be used
# for feature extraction where storing into a list will
# be too expensive
all_logits.update({'epoch': np.array([epoch])})
store_append_h5(all_logits, this_save_dir)
# FIXME need to take into account that the datasets
# could have been padded in distributed setup
batch_size = data_loader.batch_size
metric_logger.update(loss=loss.item())
for acc_key, acc_val in accuracies.items():
metric_logger.meters[acc_key].update(acc_val.item(),
n=batch_size)
for loss_name, loss_val in losses_reduced.items():
metric_logger.meters[loss_name].update(loss_val.item(),
n=batch_size)
if not only_run_featext:
final_accuracies[data_key] = _evaluate_store_logs(
logger, metric_logger, accuracies.keys(), store, this_save_dir,
data_key, data_loader, epoch, losses_reduced.keys())
if only_run_featext:
# None of the rest is needed
return 0.0
# Return the accuracy on the main evaluation dataset, which must be the
# one which doesn't have any prefix (i.e. in the dataset_eval)
# Returning the accuracy metric that is most relevant to the dataset.
main_dataset_key = ''
main_metric = final_accuracies[main_dataset_key][
data_loaders[main_dataset_key].dataset.primary_metric]
return main_metric
def initial_setup(cfg, logger):
torchvision.set_video_backend(cfg.pytorch.video_backend)
if cfg.data_parallel:
dist_info = {}
dist_info['distributed'] = False
dist_info['world_size'] = torch.cuda.device_count()
# In DDP we set these params for a single process
cfg.train.batch_size *= dist_info['world_size']
cfg.eval.batch_size *= dist_info['world_size']
else:
dist_info = utils.init_distributed_mode(logger,
dist_backend=cfg.dist_backend)
logger.info("Dist info:", dist_info)
logger.info("torch version: %s", torch.__version__)
logger.info("torchvision version: %s", torchvision.__version__)
logger.info("hydra version: %s", hydra.__version__)
device = torch.device('cuda')
torch.backends.cudnn.benchmark = True
writer = setup_tbx('logs/', SummaryWriter)
return dist_info, device, writer
def init_model(model, ckpt_path, modules_to_keep, logger):
"""Initialize model with weights from ckpt_path.
Args:
ckpt_path (str): A string with path to file
modules_to_keep (str): A comma sep string with the module name prefix
that should be loaded from the checkpoint
"""
logger.debug('Initing %s with ckpt path: %s, using modules in it %s',
model, ckpt_path, modules_to_keep)
checkpoint = torch.load(ckpt_path, map_location="cpu")
if 'model' in checkpoint.keys():
state_dict = checkpoint['model']
elif 'state_dict' in checkpoint.keys():
state_dict = checkpoint['state_dict']
elif 'classy_state_dict' in checkpoint.keys():
state_dict = checkpoint['classy_state_dict']
# This is likely coming from a VISSL codebase, so the actual trunk
# params will be as follows. Ideally support this more generally TODO
state_dict = state_dict['base_model']['model']['trunk']
else:
state_dict = checkpoint
if modules_to_keep:
# Keep only the elements of state_dict that match modules to keep.
# Also, remove that prefix from the names
filtered_state_dict = {}
for key, val in state_dict.items():
for mod_name in modules_to_keep.split(','):
if key.startswith(mod_name):
filtered_state_dict[key[len(mod_name):]] = val
continue
state_dict = filtered_state_dict
# Ignore any parameters/buffers (bn mean/var) where shape does not match
for name, param in itertools.chain(model.named_parameters(),
model.named_buffers()):
if name in state_dict and state_dict[name].shape != param.shape:
logger.warning('Ckpt shape mismatch for %s (%s vs %s). Ignoring.',
name, state_dict[name].shape, param.shape)
del state_dict[name]
missing_keys, unexp_keys = model.load_state_dict(state_dict, strict=False)
logger.warning('Could not init from %s: %s', ckpt_path, missing_keys)
logger.warning('Unused keys in %s: %s', ckpt_path, unexp_keys)
def collate_fn_remove_audio(batch):
"""Remove audio from the batch.
Also remove any None(s) -- those were data points I wasn't able to read.
Not needed, and it doesn't batch properly since it is different length.
"""
batch = list(filter(lambda x: x is not None, batch))
if isinstance(batch[0], tuple):
batch = [(d[0], d[2]) for d in batch]
return default_collate(batch)
def _get_resize_shape(data_cfg):
scale_h = data_cfg.scale_h
scale_w = data_cfg.scale_w
if isinstance(scale_w, int) and scale_w == -1:
resize_shape = scale_h
else:
assert (not isinstance(scale_h, int) or scale_h != -1), (
'If using -1, must be used for scale_w. The smaller side will be '
'scaled by that size.')
resize_shape = (scale_h, scale_w)
return resize_shape
def _get_pixel_mean_std(data_cfg):
return {'mean': tuple(data_cfg.mean), 'std': tuple(data_cfg.std)}
def _set_all_bn_to_not_track_running_mean(model):
"""
Set all batch norm layers to not use running mean.
"""
for module in model.modules():
# This should be able to capture any BatchNorm1d, 2d, 3d etc.
if isinstance(module, nn.modules.batchnorm._BatchNorm):
module.track_running_stats = False
return model
def main(cfg):
logger = logging.getLogger(__name__)
dist_info, device, writer = initial_setup(cfg, logger)
# Data loading code
logger.info("Loading data")
logger.info("\t Loading datasets")
st = time.time()
# separate these into get transforms
# TODO: This is gotten too complex: clean up, make interface better
transform_train = [
T.ToTensorVideo(),
T.Resize(_get_resize_shape(cfg.data_train)),
T.RandomHorizontalFlipVideo(cfg.data_train.flip_p),
T.ColorJitterVideo(brightness=cfg.data_train.color_jitter_brightness,
contrast=cfg.data_train.color_jitter_contrast,
saturation=cfg.data_train.color_jitter_saturation,
hue=cfg.data_train.color_jitter_hue),
torchvision.transforms.Lambda(
lambda x: x * cfg.data_train.scale_pix_val),
torchvision.transforms.Lambda(lambda x: x[[2, 1, 0], ...])
if cfg.data_train.reverse_channels else torchvision.transforms.Compose(
[]),
T.NormalizeVideo(**_get_pixel_mean_std(cfg.data_train)),
]
if cfg.data_train.crop_size is not None:
transform_train.append(
T.RandomCropVideo(
(cfg.data_train.crop_size, cfg.data_train.crop_size)), )
transform_train = torchvision.transforms.Compose(transform_train)
transform_eval = [
T.ToTensorVideo(),
T.Resize(_get_resize_shape(cfg.data_eval)),
torchvision.transforms.Lambda(
lambda x: x * cfg.data_eval.scale_pix_val),
torchvision.transforms.Lambda(lambda x: x[[2, 1, 0], ...]) if
cfg.data_eval.reverse_channels else torchvision.transforms.Compose([]),
T.NormalizeVideo(**_get_pixel_mean_std(cfg.data_eval)),
]
if cfg.data_eval.crop_size is not None:
transform_eval.append(
T.MultiCropVideo(
(cfg.data_eval.crop_size, cfg.data_eval.crop_size),
cfg.data_eval.eval_num_crops, cfg.data_eval.eval_flip_crops))
transform_eval = torchvision.transforms.Compose(transform_eval)
datasets_train = [
get_dataset(getattr(cfg, el), cfg.data_train, transform_train, logger)
for el in cfg.keys() if el.startswith(DATASET_TRAIN_CFG_KEY)
]
if len(datasets_train) > 1:
dataset = torch.utils.data.ConcatDataset(datasets_train)
else:
dataset = datasets_train[0]
# could be multiple test datasets
datasets_test = {
el[len(DATASET_EVAL_CFG_KEY):]:
get_dataset(getattr(cfg, el), cfg.data_eval, transform_eval, logger)
for el in cfg.keys() if el.startswith(DATASET_EVAL_CFG_KEY)
}
logger.info("Took %d", time.time() - st)
logger.info("Creating data loaders")
train_sampler = None
test_samplers = {key: None for key in datasets_test}
if hasattr(dataset, 'video_clips'):
assert cfg.train.shuffle_data, 'TODO'
train_sampler = RandomClipSampler(getattr(dataset, 'video_clips'),
cfg.data_train.train_bs_multiplier)
test_samplers = {
key: UniformClipSampler(val.video_clips,
cfg.data_eval.val_clips_per_video)
for key, val in datasets_test.items()
}
if dist_info['distributed']:
train_sampler = DistributedSampler(train_sampler)
test_samplers = [DistributedSampler(el) for el in test_samplers]
elif dist_info['distributed']:
# Distributed, but doesn't have video_clips
if cfg.data_train.use_dist_sampler:
train_sampler = torch.utils.data.distributed.DistributedSampler(
dataset,
num_replicas=dist_info['world_size'],
rank=dist_info['rank'],
shuffle=cfg.train.shuffle_data)
if cfg.data_eval.use_dist_sampler:
test_samplers = {
key: torch.utils.data.distributed.DistributedSampler(
val,
num_replicas=dist_info['world_size'],
rank=dist_info['rank'],
shuffle=False)
for key, val in datasets_test.items()
}
data_loader = torch.utils.data.DataLoader(
dataset,
batch_size=cfg.train.batch_size,
sampler=train_sampler,
num_workers=cfg.data_train.workers,
pin_memory=False, # usually hurts..
shuffle=(train_sampler is None and cfg.train.shuffle_data),
collate_fn=collate_fn_remove_audio,
)
data_loaders_test = {
key: torch.utils.data.DataLoader(
val,
# Since no backprop, so can have a larger batch size
batch_size=cfg.eval.batch_size or cfg.train.batch_size * 4,
sampler=test_samplers[key],
num_workers=cfg.data_eval.workers,
pin_memory=False, # Usually hurts..
shuffle=False,
collate_fn=collate_fn_remove_audio,
)
for key, val in datasets_test.items()
}
num_classes = {key: len(val) for key, val in dataset.classes.items()}
logger.info('Creating model with %s classes', num_classes)
model = base_model.BaseModel(cfg.model,
num_classes=num_classes,
class_mappings=dataset.class_mappings)
logger.debug('Model: %s', model)
if dist_info['distributed'] and cfg.sync_bn:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
if cfg.train.init_from_model:
# This can have structure as follows:
# <module name>:<path to init model>;<module name>:<path>: ...
for module_ckpt in cfg.train.init_from_model:
elts = module_ckpt
if len(elts) == 1:
model_to_init = model
ckpt_modules_to_keep = None
ckpt_path = elts[0]
elif len(elts) == 2:
model_to_init = operator.attrgetter(elts[0])(model)
ckpt_modules_to_keep = None
ckpt_path = elts[1]
elif len(elts) == 3:
model_to_init = operator.attrgetter(elts[0])(model)
ckpt_modules_to_keep = elts[1]
ckpt_path = elts[2]
else:
raise ValueError(f'Incorrect formatting {module_ckpt}')
init_model(model_to_init, ckpt_path, ckpt_modules_to_keep, logger)
model.to(device)
if cfg.opt.classifier_only:
assert len(cfg.opt.lr_wd) == 1
assert cfg.opt.lr_wd[0][0] == 'classifier'
model = _set_all_bn_to_not_track_running_mean(model)
params = []
for this_module_names, this_lr, this_wd in cfg.opt.lr_wd:
if OmegaConf.get_type(this_module_names) != list:
this_module_names = [this_module_names]
this_modules = [
operator.attrgetter(el)(model) if el != '__all__' else model
for el in this_module_names
]
this_params_bias_bn = {}
this_params_rest = {}
for this_module_name, this_module in zip(this_module_names,
this_modules):
for name, param in this_module.named_parameters():
# ignore the param without grads
if not param.requires_grad:
continue
# May not always have a ".bias" if it's the last element, and no
# module name
if name.endswith('bias') or ('.bn' in name):
this_params_bias_bn[this_module_name + '.' + name] = param
else:
this_params_rest[this_module_name + '.' + name] = param
this_scaled_lr = this_lr * dist_info['world_size']
if cfg.opt.scale_lr_by_bs:
this_scaled_lr *= cfg.train.batch_size
params.append({
'params': this_params_rest.values(),
'lr': this_scaled_lr,
'weight_decay': this_wd,
})
logger.info('Using LR %f WD %f for parameters %s', params[-1]['lr'],
params[-1]['weight_decay'], this_params_rest.keys())
params.append({
'params': this_params_bias_bn.values(),
'lr': this_scaled_lr,
'weight_decay': this_wd * cfg.opt.bias_bn_wd_scale,
})
logger.info('Using LR %f WD %f for parameters %s', params[-1]['lr'],
params[-1]['weight_decay'], this_params_bias_bn.keys())
# Remove any parameters for which LR is 0; will save GPU usage
params_final = []
for param_lr in params:
if param_lr['lr'] != 0.0:
params_final.append(param_lr)
else:
for param in param_lr['params']:
param.requires_grad = False
optimizer = hydra.utils.instantiate(cfg.opt.optimizer, params_final)
# convert scheduler to be per iteration,
# not per epoch, for warmup that lasts
# between different epochs
main_scheduler = hydra.utils.instantiate(
cfg.opt.scheduler,
optimizer,
iters_per_epoch=len(data_loader),
world_size=dist_info['world_size'])
lr_scheduler = hydra.utils.instantiate(cfg.opt.warmup,
optimizer,
main_scheduler,
iters_per_epoch=len(data_loader),
world_size=dist_info['world_size'])
last_saved_ckpt = CKPT_FNAME
start_epoch = 0
if os.path.isfile(last_saved_ckpt):
checkpoint = torch.load(last_saved_ckpt, map_location='cpu')
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
start_epoch = checkpoint['epoch']
logger.warning('Loaded model from %s (ep %f)', last_saved_ckpt,
start_epoch)
if dist_info['distributed'] and not cfg.eval.eval_fn.only_run_featext:
# If only feat ext, then each gpu is going to test separately anyway,
# no need for communication between the models
logger.info('Wrapping model into DDP')
model = torch.nn.parallel.DistributedDataParallel(
model,
device_ids=[dist_info['gpu']],
output_device=dist_info['gpu'])
elif cfg.data_parallel:
logger.info('Wrapping model into DP')
device_ids = range(dist_info['world_size'])
model = torch.nn.parallel.DataParallel(model, device_ids=device_ids)
# TODO add an option here to support val mode training
# Passing in the training dataset, since that will be used for computing
# weights for classes etc.
train_eval_op = hydra.utils.instantiate(cfg.train_eval_op,
model,
device,
dataset,
_recursive_=False)
if cfg.test_only:
logger.info("Starting test_only")
hydra.utils.call(cfg.eval.eval_fn, train_eval_op, data_loaders_test,
writer, logger, start_epoch)
return
logger.info("Start training")
start_time = time.time()
# Get training metric logger
stat_loggers = get_default_loggers(writer, start_epoch, logger)
best_acc1 = 0.0
partial_epoch = start_epoch - int(start_epoch)
start_epoch = int(start_epoch)
last_saved_time = datetime.datetime(1, 1, 1, 0, 0)
epoch = 0 # Since using this var to write the checkpoint output, so init to sth
for epoch in range(start_epoch, cfg.train.num_epochs):
if dist_info['distributed'] and train_sampler is not None:
train_sampler.set_epoch(epoch)
last_saved_time = hydra.utils.call(cfg.train.train_one_epoch_fn,
train_eval_op, optimizer,
lr_scheduler, data_loader, epoch,
partial_epoch,
stat_loggers["train"], logger,
last_saved_time)
partial_epoch = 0 # Reset, for future epochs
store_checkpoint([CKPT_FNAME], model, optimizer, lr_scheduler,
epoch + 1)
if cfg.train.eval_freq and epoch % cfg.train.eval_freq == 0:
acc1 = hydra.utils.call(cfg.eval.eval_fn, train_eval_op,
data_loaders_test, writer, logger,
epoch + 1)
else:
acc1 = 0
if cfg.train.store_best and acc1 >= best_acc1:
store_checkpoint('checkpoint_best.pth', model, optimizer,
lr_scheduler, epoch + 1)
best_acc1 = acc1
if isinstance(lr_scheduler.base_scheduler,
scheduler.ReduceLROnPlateau):
lr_scheduler.step(acc1)
# reset all meters in the metric logger
for log in stat_loggers:
stat_loggers[log].reset_meters()
# Store the final model to checkpoint
store_checkpoint([CKPT_FNAME], model, optimizer, lr_scheduler, epoch + 1)
total_time = time.time() - start_time
total_time_str = str(datetime.timedelta(seconds=int(total_time)))
logger.info('Training time %s', total_time_str)
|
AVT-main
|
func/train.py
|
from functools import partial
from types import SimpleNamespace
from typing import Optional, List
import numpy as np
import scipy.optimize
import scipy.special
import sklearn.metrics.pairwise as skmetrics
def Phi(
D: np.ndarray,
edge_list: np.ndarray = None,
):
"""
Given an n x d matrix of (example, slices), calculate the potential
matrix.
Includes correlations modeled by the edges in the `edge_list`.
Args:
D (np.ndarray): n x d matrix of (example, slice)
edge_list (np.ndarray): k x 2 matrix of edge correlations to be modeled.
edge_list[i, :] should be indices for a pair of columns of D.
Returns:
Potential matrix. Equals D when edge_list is None, otherwise adds additional
(x_i * x_j) "cross-terms" corresponding to the edges in the `edge_list`.
Examples:
>>> D = np.random.choice([-1, 1], size=(100, 6))
>>> edge_list = np.array([(0, 1), (1, 4)])
>>> Phi(D, edge_list)
"""
if edge_list is not None:
pairwise_terms = (
D[np.arange(len(D)), edge_list[:, 0][:, np.newaxis]].T
* D[np.arange(len(D)), edge_list[:, 1][:, np.newaxis]].T
)
return np.concatenate([D, pairwise_terms], axis=1)
else:
return D
def log_partition_ratio(
x: np.ndarray,
Phi_D_src: np.ndarray,
n_src: int,
):
"""
Calculate the log-partition ratio in the KLIEP problem.
"""
return np.log(n_src) - scipy.special.logsumexp(Phi_D_src.dot(x))
def mandoline(
D_src: np.ndarray,
D_tgt: np.ndarray,
edge_list: np.ndarray,
sigma: float=None,
):
"""
Mandoline solver.
Args:
D_src: (n_src x d) matrix of (example, slices) for the source distribution.
D_tgt: (n_tgt x d) matrix of (example, slices) for the source distribution.
edge_list: list of edge correlations between slices that should be modeled.
sigma: optional parameter that activates RBF kernel-based KLIEP with scale
`sigma`.
Returns: SimpleNamespace that contains
opt: result of scipy.optimize
Phi_D_src: source potential matrix used in Mandoline
Phi_D_tgt: target potential matrix used in Mandoline
n_src: number of source samples
n_tgt: number of target samples
edge_list: the `edge_list` parameter passed as input
"""
# Copy and binarize the input matrices to -1/1
D_src, D_tgt = np.copy(D_src), np.copy(D_tgt)
if np.min(D_src) == 0:
D_src[D_src == 0] = -1
D_tgt[D_tgt == 0] = -1
# Edge list encoding dependencies between gs
if edge_list is not None:
edge_list = np.array(edge_list)
# Create the potential matrices
Phi_D_tgt, Phi_D_src = Phi(D_tgt, edge_list), Phi(D_src, edge_list)
# Number of examples
n_src, n_tgt = Phi_D_src.shape[0], Phi_D_tgt.shape[0]
def f(x):
obj = Phi_D_tgt.dot(x).sum() - n_tgt * scipy.special.logsumexp(Phi_D_src.dot(x))
return -obj
# Set the kernel
kernel = partial(skmetrics.rbf_kernel, gamma=sigma)
def llkliep_f(x):
obj = kernel(
Phi_D_tgt, x[:, np.newaxis]
).sum() - n_tgt * scipy.special.logsumexp(kernel(Phi_D_src, x[:, np.newaxis]))
return -obj
# Solve
if not sigma:
opt = scipy.optimize.minimize(
f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
else:
opt = scipy.optimize.minimize(
llkliep_f, np.random.randn(Phi_D_tgt.shape[1]), method="BFGS"
)
return SimpleNamespace(
opt=opt,
Phi_D_src=Phi_D_src,
Phi_D_tgt=Phi_D_tgt,
n_src=n_src,
n_tgt=n_tgt,
edge_list=edge_list,
)
def log_density_ratio(D, solved):
"""
Calculate the log density ratio for a solved Mandoline run.
"""
Phi_D = Phi(D, None)
return Phi_D.dot(solved.opt.x) + log_partition_ratio(
solved.opt.x, solved.Phi_D_src, solved.n_src
)
def get_k_most_unbalanced_gs(D_src, D_tgt, k):
"""
Get the top k slices that shift most between source and target
distributions.
Uses difference in marginals between each slice.
"""
marginal_diff = np.abs(D_src.mean(axis=0) - D_tgt.mean(axis=0))
differences = np.sort(marginal_diff)[-k:]
indices = np.argsort(marginal_diff)[-k:]
return list(indices), list(differences)
def weighted_estimator(weights: Optional[np.ndarray], mat: np.ndarray):
"""
Calculate a weighted empirical mean over a matrix of samples.
Args:
weights (Optional[np.ndarray]):
length n array of weights that sums to 1. Calculates an unweighted
mean if `weights` is None.
mat (np.ndarray):
(n x r) matrix of empirical observations that is being averaged.
Returns:
Length r np.ndarray of weighted means.
"""
assert np.sum(weights) == 1, "`weights` must sum to 1."
if weights is None:
return np.mean(mat, axis=0)
return np.sum(weights[:, np.newaxis] * mat, axis=0)
def estimate_performance(
D_src: np.ndarray,
D_tgt: np.ndarray,
edge_list: np.ndarray,
empirical_mat_list_src: List[np.ndarray],
):
"""
Estimate performance on a target distribution using slice information from the
source and target data.
This function runs Mandoline to calculate the importance weights to reweight
the source data.
Args:
D_src (np.ndarray): (n_src x d) matrix of (example, slices) for the source
distribution.
D_tgt (np.ndarray): (n_tgt x d) matrix of (example, slices) for the target
distribution.
edge_list (np.ndarray):
empirical_mat_list_src (List[np.ndarray]):
Returns:
SimpleNamespace with 3 attributes
- `all_estimates` is a list of SimpleNamespace objects with
2 attributes
- `weighted` is the estimate for the target distribution
- `source` is the estimate for the source distribution
- `solved`: result of scipy.optimize Mandoline solver
- `weights`: self-normalized importance weights used to weight the source data
"""
# Run the solver
solved = mandoline(D_src, D_tgt, edge_list)
# Compute the weights on the source dataset
density_ratios = np.e ** log_density_ratio(solved.Phi_D_src, solved)
# Self-normalized importance weights
weights = density_ratios / np.sum(density_ratios)
all_estimates = []
for mat_src in empirical_mat_list_src:
# Estimates is a 1-D array of estimates for each mat e.g.
# each mat can correspond to a model's (n x 1) error matrix
weighted_estimates = weighted_estimator(weights, mat_src)
source_estimates = weighted_estimator(
np.ones(solved.n_src) / solved.n_src, mat_src
)
all_estimates.append(
SimpleNamespace(
weighted=weighted_estimates,
source=source_estimates,
)
)
return SimpleNamespace(
all_estimates=all_estimates,
solved=solved,
weights=weights,
)
|
mandoline-main
|
mandoline.py
|
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Useful links:
Streamlit cheatsheet:
https://docs.streamlit.io/library/cheatsheet
Also check the components we provide for demos in metastreamlit:
https://github.com/fairinternal/metastreamlit
You can request new components by creating an issue
"""
# Designed to run from controllable_agent with streamlit run demo/main.py
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "" # avoid using CUDA
import sys
import time
import logging
import tempfile
from pathlib import Path
from collections import OrderedDict
import streamlit as st
try:
import url_benchmark
base = Path(url_benchmark.__file__).absolute().parents[1]
except ImportError:
base = Path(__file__).absolute().parents[1]
# we need to add base repo to be able to import url_benchmark
# we need to add url_benchmar to be able to reload legacy checkpoints
for fp in [base, base / "url_benchmark"]:
assert fp.exists()
if str(fp) not in sys.path:
sys.path.append(str(fp))
print("base", base)
from url_benchmark import pretrain
import numpy as np
import torch
import torch.nn.functional as F
from controllable_agent import runner
from url_benchmark import goals
from url_benchmark import utils
from url_benchmark.video import VideoRecorder
logger = logging.getLogger(__name__)
st.set_page_config(
page_title="Controllable agent - Meta AI",
menu_items={"About": "This demo is powered by the code available at https://github.com/facebookresearch/controllable_agent\nCopyright 2022 Meta Inc. Available under MIT Licence."},
)
# st.title('Controllable agent')
st.sidebar.write('# Controllable Agent Demo')
st.sidebar.write("### Optimize Any Reward Function with a Single Pretrained Agent")
st.sidebar.write("***Ahmed Touati, Jérémy Rapin, Yann Ollivier***")
st.sidebar.write("A controllable agent is a reinforcement learning agent whose _reward function can be set in real time_, without any additional learning or fine-tuning, based on a reward-free pretraining phase.")
st.sidebar.write("""The controllable agent here uses the _forward-backward representation_ from our papers:
* [Does Zero-Shot Reinforcement Learning Exist?](https://arxiv.org/abs/2209.14935)
* [Learning One Representation to Optimize All Rewards](https://arxiv.org/abs/2103.07945) (Neurips 2021)
""")
st.sidebar.write("The [code is open-source](https://github.com/facebookresearch/controllable_agent).")
model_path = Path("/checkpoint/jrapin/ca/models")
if not model_path.exists():
model_path = base / "models"
# having more cases will trigger a dropdown box
CASES = {
# Update the following path to a checkpoint that exists in you system
"walker - 221020 (rnd init)": model_path / "walker_rnd_init_65697627_11_221020.pt",
}
CASES = {x: y for x, y in CASES.items() if y.exists()}
if len(CASES) > 1:
case = st.selectbox(
'Which model do you want to load?',
list(CASES)
)
else:
case = list(CASES)[0]
assert case is not None
@st.cache(max_entries=1, allow_output_mutation=True)
def load_workspace(case: str):
checkpoint = CASES[case]
hp = runner.HydraEntryPoint(base / "url_benchmark/anytrain.py")
ws = hp.workspace(task="walker_walk", replay_buffer_episodes=2000, goal_space="walker_pos_speed_z", append_goal_to_observation=True)
ws.train_env.reset()
with checkpoint.open("rb") as f:
payload = torch.load(f, map_location=ws.device)
ws.agent = payload["agent"]
ws.agent.cfg.device = ws.cfg.device
replay = payload["replay_loader"]
ws.replay_loader = replay
ws.replay_storage = replay
return ws
# load
ws = load_workspace(case)
recorder = VideoRecorder(base, camera_id=ws.video_recorder.camera_id, use_wandb=False)
recorder.enabled = True
reward = goals.WalkerEquation("x")
reward._precompute_for_demo(ws) # precompute before first run
ws.replay_loader._storage.clear() # clear memory since not used anymore
params = list(reward._extract(reward._env))
params_str = ", ".join(f"`{x}`" for x in params)
st.write("##### Try Your Own Reward Function for Walker")
st.write(f"Enter a Walker reward function to maximize, such as `-vx` or `exp(-(x-8)**2)`\n\n This can be any Python equation using {params_str} (horizontal and vertical position, horizontal and vertical speed, sine of torso angle, angular momentum)")
string = st.text_input("Reward function:", value=st.session_state.get("prefill", ""))
# st.session_state.pop("prefill", None)
col1, col2 = st.columns(2)
early_stopping = True
last_physics = np.ndarray([])
if string and string is not None:
reward = goals.WalkerEquation(string)
reward._precompute_for_demo(ws) # loads from cached workspace if already precomputed
logger.info(f"Running reward: {string}") # for the console
col1.write(f"Running reward `{string}`") # use code formating to avoid italic from **
if not reward._precomputed:
meta = pretrain._init_eval_meta(ws, custom_reward=reward)
else:
print("Inferring from precomputed data")
meta = reward._from_precomputed()
col1.write("Applying the policy for 500 time steps and generating video (this may take 10-15s)")
# play
env = ws._make_env()
time_step = env.reset()
recorder.init(env)
total_reward = 0
k = 0
durations = dict(model=0.0, env=0.0, render=0.0)
t_start = time.time()
while k < 500 and not time_step.last():
k += 1
t0 = time.time()
with torch.no_grad(), utils.eval_mode(ws.agent):
action = ws.agent.act(time_step.observation,
meta,
1000000,
eval_mode=True)
t1 = time.time()
time_step = env.step(action)
t2 = time.time()
recorder.record(env)
t3 = time.time()
durations["model"] += t1 - t0
durations["env"] += t2 - t1
durations["render"] += t3 - t2
total_reward += reward.from_env(env)
distance = np.linalg.norm(time_step.physics - last_physics) / time_step.physics.size
if early_stopping and distance < 5e-6:
print(f"Early stopping at time step {k}")
break
last_physics = time_step.physics
print(f"Total play time {time.time() - t_start:.2f}s with {durations}")
state = reward._extract(env)
state_str = " ".join(f"{x}={y:.2f}" for x, y in state.items())
col1.write(
f"Average reward is {total_reward / k}\n\n"
f'Final state is {state_str}'
)
name = "demo.mp4"
with tempfile.TemporaryDirectory() as tmp:
recorder.save_dir = Path(tmp)
t0 = time.time()
recorder.save(name)
print(f"Saved video to {recorder.save_dir / name} in {time.time() - t0:.2f}s, now serving it.")
col = st.columns([1, 3, 1])[1]
with col:
col2.video(str(recorder.save_dir / name))
st.write("---")
st.write(f"""**Note**: multiplicative rewards are a good way to combine constraints on the agent. For instance, `z**4 * exp(-abs(x-5))` makes the agent try to jump around `x=5`""")
st.write(f"""This agent is far from perfect, and it is still easy to make it fail. For instance, the variable `x` works well only in the range well-covered in the trainset (typically -15 to 15). Rewards like `x**2` or `exp(x)` produce bad results, presumably because they are largest far away from the trainset. On the other hand, `x**2 * (x<20) * (x>-20)` works better, because the reward is restricted to a well-explored zone. Also, the variable `vz` does not seem to do much. """)
with st.expander("How Does This Work?"):
st.write(r"""
The algorithms are directly taken from our papers (see side bar). At pre-training time, two representations $F(s,a,z)$ and $B(s)$ ("forward" and "backward") were learned, as well as a parametric policy $\pi_z(s)$. Here $z$ is a hidden variable in representation space.
When a new reward function $r$ is set, the app computes the hidden variable $z=\mathbb{E}[r(s)B(s)]$ using 5,000 states $s$ from the training set, using the provided function $r$. Then the policy $\pi_z$ with parameter $z$ is deployed.
The dimension of $F$, $B$ and $z$ is 50. The networks are small multilayer perceptrons. The training set was initialized by a standard exploration algorithm, Random Network Distillation. It is made of 2,000 length-1,000 trajectories. Then we learn $F$, $B$ and $\pi_z$ using the method described in our papers, and we update the training set by sampling random $z$ and applying the corresponding policy.
For $B$, we only provide a subset of variables from the full state $s$, namely, the six variables `x,z,vx,vz,up,am` mentioned above, to focus training on those. Our theory guarantees that, if the networks minimize the loss well, all reward functions depending on those variables will be optimized.
###### How do we Learn $F$, $B$ and $\pi$? Causes and Effects
Intuitively, $F(s,a,z)$ represents the "effects" of following $\pi_z$ starting at state-action $(s,a)$, while $B(s')$ represents the possible "causes" leading to state $s'$.
If it's easy to reach $s'$ while starting at $s,a$ and following $\pi_z$ for many steps, then the dot product $F(s,a,z)^TB(s')$ will be large, meaning, we align the representation vectors $F(s,a,z)$ and $B(s')$. The precise equation (below) uses the cumulated long-term transition probabilities between states.
The policy $\pi_z$ is trained to return an action $a$ that maximizes $F(s,a,z)^T z$.
The full set of equations is:""")
st.latex(r'''\begin{cases}
\pi_z(s)=\mathrm{argmax}_a \, F(s,a,z)^T z\\
F(s,a,z)^T B(s') \rho(s') = \sum_t \gamma^t \Pr(s_t=s'|s_0=s,a_0=a,\pi_z)
\end{cases}
''')
st.write("""
Here $\\rho$ is the distribution of states in the training set (we don't need to know $\\rho$, just to sample from it).
Our theory guarantees this provides all optimal policies if training is successful:
**Theorem.** *Assume the equations above hold. Then the optimal policy for any reward function $r$ can be obtained by evaluating* """)
st.latex(r''' z=\mathbb{E}[r(s)B(s)] ''')
st.write(r""" *on states sampled from the training distribution, and applying policy $\pi_z$.*
*Moreover, approximate solutions still provide approximately optimal policies.*
The equation on $F$ and $B$ seems hard to handle, but it can be rewritten as a kind of generalized Bellman equation for $F^T B$, which we use for training. There is no representation collapse ($F=B=0$ does not satisfy the equation). There is no sparse reward problem from $\Pr(s_t=s')$, thanks to our probability-measure-valued treatment of the equation.
Overall, this is somewhat similar to a world model except:
* There is no planning at test time
* We never synthesize states or imaginary trajectories
* We learn long-term transition probabilities for many policies instead of one-step, policy-independent next states
""")
st.write("##### Some Examples")
reward_texts = [
("vx", "run as fast as possible"),
("x < -4", "go to the left until x<-4"),
("1 / z", "be close to the ground"),
("-up", "be upside down"),
("-up * x * (x > 0)", "be to the right and upside down"),
("(1-up) * exp(-abs(x-10))", "be upside down around x=10"),
("exp(-abs(x - 8)) * up / z", "be around x=8, upright, and close to the ground: crouch at x=8"),
("exp(-abs(x - 10)) * up * z**4", "be around x=10, upright, and very high: jump at x=10"),
("vx/z**2", "crawl"),
("exp(-abs(vx - 2)) * up", "move slowly (speed=2) and stay upright"),
("vx * (1 - up) / z", "move as fast as possible, upside down, close to the ground"),
("vx * (1 + up * cos(x / 4))", "run upright or rolling depending on cos(x/4)"),
]
def _prefill(eq: str) -> None:
st.session_state["prefill"] = eq
for reward, text in reward_texts:
cols = st.columns(3)
cols[0].write(f"`{reward}`")
cols[1].write(text)
cols[2].button("Try", key=reward, on_click=_prefill, args=(reward,))
# col[2].write("video TODO")
|
controllable_agent-main
|
demo/main.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.