python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
from setuptools import setup, find_packages
setup(
name = 'kronecker-attention-pytorch',
packages = find_packages(),
version = '0.0.6',
license='MIT',
description = 'Kronecker Attention - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/kronecker-attention-pytorch',
keywords = [
'artificial intelligence',
'attention mechanism'
],
install_requires=[
'torch',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
kronecker-attention-pytorch-master
|
setup.py
|
from kronecker_attention_pytorch.kronecker_attention_pytorch import KroneckerSelfAttention
|
kronecker-attention-pytorch-master
|
kronecker_attention_pytorch/__init__.py
|
import torch
from torch import nn, einsum
from einops import rearrange, repeat
import torch.nn.functional as F
class KroneckerSelfAttention(nn.Module):
def __init__(self, dim, heads, dim_heads = 32):
super().__init__()
hidden_dim = heads * dim_heads
self.heads = heads
self.to_qkv = nn.Conv1d(dim, hidden_dim * 3, 1, bias = False)
self.to_out = nn.Conv1d(hidden_dim, dim, 1)
def forward(self, x):
h = x.shape[-2]
x = torch.cat((x.mean(dim=-1), x.mean(dim=-2)), dim=-1)
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b (qkv h d) n -> qkv b h d n', h=self.heads, qkv=3)
dots = einsum('bhdi,bhdj->bhij', q, k)
attn = dots.softmax(dim=-1)
out = einsum('bhij,bhdj->bhdi', attn, v)
out = rearrange(out, 'b h d n -> b (h d) n')
out = self.to_out(out)
# outer sum
out = rearrange(out[..., :h], 'b c (n 1) -> b c n 1') + rearrange(out[..., h:], 'b c (1 n) -> b c 1 n')
return out
|
kronecker-attention-pytorch-master
|
kronecker_attention_pytorch/kronecker_attention_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'contrastive_learner',
packages = find_packages(),
version = '0.1.1',
license='MIT',
description = 'Self-supervised contrastive learning made simple',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/contrastive-learner',
keywords = ['self-supervised learning', 'artificial intelligence'],
install_requires=[
'torch',
'kornia'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
contrastive-learner-master
|
setup.py
|
from contrastive_learner.contrastive_learner import ContrastiveLearner
|
contrastive-learner-master
|
contrastive_learner/__init__.py
|
import copy
import random
from functools import wraps
import torch
from torch import nn
import torch.nn.functional as F
from torchvision.models import resnet50
from kornia import augmentation as augs
from kornia import filters
# helper functions
def identity(x): return x
def default(val, def_val):
return def_val if val is None else val
def flatten(t):
return t.reshape(t.shape[0], -1)
def safe_concat(arr, el, dim=0):
if arr is None:
return el
return torch.cat((arr, el), dim=dim)
def singleton(cache_key):
def inner_fn(fn):
@wraps(fn)
def wrapper(self, *args, **kwargs):
instance = getattr(self, cache_key)
if instance is not None:
return instance
instance = fn(self, *args, **kwargs)
setattr(self, cache_key, instance)
return instance
return wrapper
return inner_fn
# losses
def contrastive_loss(queries, keys, temperature = 0.1):
b, device = queries.shape[0], queries.device
logits = queries @ keys.t()
logits = logits - logits.max(dim=-1, keepdim=True).values
logits /= temperature
return F.cross_entropy(logits, torch.arange(b, device=device))
def nt_xent_loss(queries, keys, temperature = 0.1):
b, device = queries.shape[0], queries.device
n = b * 2
projs = torch.cat((queries, keys))
logits = projs @ projs.t()
mask = torch.eye(n, device=device).bool()
logits = logits[~mask].reshape(n, n - 1)
logits /= temperature
labels = torch.cat(((torch.arange(b, device=device) + b - 1), torch.arange(b, device=device)), dim=0)
loss = F.cross_entropy(logits, labels, reduction='sum')
loss /= n
return loss
# augmentation utils
class RandomApply(nn.Module):
def __init__(self, fn, p):
super().__init__()
self.fn = fn
self.p = p
def forward(self, x):
if random.random() > self.p:
return x
return self.fn(x)
# exponential moving average
class EMA():
def __init__(self, beta):
super().__init__()
self.beta = beta
def update_average(self, old, new):
if old is None:
return new
return old * self.beta + (1 - self.beta) * new
def update_moving_average(ema_updater, ma_model, current_model):
for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):
old_weight, up_weight = ma_params.data, current_params.data
ma_params.data = ema_updater.update_average(old_weight, up_weight)
# hidden layer extractor class
class OutputHiddenLayer(nn.Module):
def __init__(self, net, layer = -2):
super().__init__()
self.net = net
self.layer = layer
self.hidden = None
self._register_hook()
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _register_hook(self):
def hook(_, __, output):
self.hidden = output
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(hook)
def forward(self, x):
if self.layer == -1:
return self.net(x)
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
# main class
class ContrastiveLearner(nn.Module):
def __init__(self, net, image_size, hidden_layer = -2, project_hidden = True, project_dim=128, augment_both=True, use_nt_xent_loss=False, augment_fn = None, use_bilinear = False, use_momentum = False, momentum_value = 0.999, key_encoder = None, temperature = 0.1):
super().__init__()
self.net = OutputHiddenLayer(net, layer=hidden_layer)
DEFAULT_AUG = nn.Sequential(
RandomApply(augs.ColorJitter(0.8, 0.8, 0.8, 0.2), p=0.8),
augs.RandomGrayscale(p=0.2),
augs.RandomHorizontalFlip(),
RandomApply(filters.GaussianBlur2d((3, 3), (1.5, 1.5)), p=0.1),
augs.RandomResizedCrop((image_size, image_size))
)
self.augment = default(augment_fn, DEFAULT_AUG)
self.augment_both = augment_both
self.temperature = temperature
self.use_nt_xent_loss = use_nt_xent_loss
self.project_hidden = project_hidden
self.projection = None
self.project_dim = project_dim
self.use_bilinear = use_bilinear
self.bilinear_w = None
self.use_momentum = use_momentum
self.ema_updater = EMA(momentum_value)
self.key_encoder = key_encoder
# for accumulating queries and keys across calls
self.queries = None
self.keys = None
# send a mock image tensor to instantiate parameters
self.forward(torch.randn(1, 3, image_size, image_size))
@singleton('key_encoder')
def _get_key_encoder(self):
key_encoder = copy.deepcopy(self.net)
key_encoder._register_hook()
return key_encoder
@singleton('bilinear_w')
def _get_bilinear(self, hidden):
_, dim = hidden.shape
return nn.Parameter(torch.eye(dim, device=device, dtype=dtype)).to(hidden)
@singleton('projection')
def _get_projection_fn(self, hidden):
_, dim = hidden.shape
return nn.Sequential(
nn.Linear(dim, dim, bias = False),
nn.LeakyReLU(inplace=True),
nn.Linear(dim, self.project_dim, bias = False)
).to(hidden)
def reset_moving_average(self):
assert self.use_momentum, 'must be using momentum method for key encoder'
del self.key_encoder
self.key_encoder = None
def update_moving_average(self):
assert self.key_encoder is not None, 'key encoder has not been created yet'
self.key_encoder = update_moving_average(self.ema_updater, self.key_encoder, self.net)
def calculate_loss(self):
assert self.queries is not None and self.keys is not None, 'no queries or keys accumulated'
loss_fn = nt_xent_loss if self.use_nt_xent_loss else contrastive_loss
loss = loss_fn(self.queries, self.keys, temperature = self.temperature)
self.queries = self.keys = None
return loss
def forward(self, x, accumulate = False):
b, c, h, w, device = *x.shape, x.device
transform_fn = self.augment if self.augment_both else noop
query_encoder = self.net
queries = query_encoder(transform_fn(x))
key_encoder = self.net if not self.use_momentum else self._get_key_encoder()
keys = key_encoder(self.augment(x))
if self.use_momentum:
keys = keys.detach()
queries, keys = map(flatten, (queries, keys))
if self.use_bilinear:
W = self._get_bilinear(keys)
keys = (W @ keys.t()).t()
project_fn = self._get_projection_fn(queries) if self.project_hidden else identity
queries, keys = map(project_fn, (queries, keys))
self.queries = safe_concat(self.queries, queries)
self.keys = safe_concat(self.keys, keys)
return self.calculate_loss() if not accumulate else None
|
contrastive-learner-master
|
contrastive_learner/contrastive_learner.py
|
from setuptools import setup, find_packages
setup(
name = 'MaMMUT-pytorch',
packages = find_packages(exclude=[]),
version = '0.0.6',
license='MIT',
description = 'MaMMUT - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/MaMMUT-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'multimodal',
'attention mechanism',
'contrastive learning'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
MaMMUT-pytorch-main
|
setup.py
|
import torch
from torch import einsum, nn
import torch.nn.functional as F
import torch.distributed as dist
from torch.autograd import Function
from einops import rearrange, repeat
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(numer, denom):
return (numer % denom) == 0
# distributed
def all_gather_variable_batch(t):
device, rank, world_size = t.device, dist.get_rank(), dist.get_world_size()
size = torch.tensor(t.shape[0], device = device, dtype = torch.long)
sizes = [torch.empty_like(size, device = device, dtype = torch.long) for i in range(world_size)]
dist.all_gather(sizes, size)
sizes = torch.stack(sizes)
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = 0)
gathered_tensors = [torch.empty_like(padded_t, device = device, dtype = padded_t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, padded_t)
gathered_tensor = torch.cat(gathered_tensors)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
gathered_tensor = gathered_tensor[mask]
sizes = sizes.tolist()
return gathered_tensor, sizes
class AllGather(Function):
@staticmethod
def forward(ctx, x):
assert dist.is_initialized() and dist.get_world_size() > 1
x, batch_sizes = all_gather_variable_batch(x)
ctx.batch_sizes = batch_sizes
return x
@staticmethod
def backward(ctx, grads):
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = 0)
return grads_by_rank[rank]
all_gather = AllGather.apply
# normalization
# they use layernorm without bias, something that pytorch does not offer
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# residual
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, *args, **kwargs):
return self.fn(x, *args, **kwargs) + x
# to latents
class EmbedToLatents(nn.Module):
def __init__(self, dim, dim_latents):
super().__init__()
self.to_latents = nn.Linear(dim, dim_latents, bias=False)
def forward(self, x):
latents = self.to_latents(x)
return F.normalize(latents, dim=-1)
# rotary positional embedding
# https://arxiv.org/abs/2104.09864
class RotaryEmbedding(nn.Module):
def __init__(self, dim):
super().__init__()
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq)
def forward(self, max_seq_len, *, device):
seq = torch.arange(max_seq_len, device=device, dtype=self.inv_freq.dtype)
freqs = einsum("i , j -> i j", seq, self.inv_freq)
return torch.cat((freqs, freqs), dim=-1)
def rotate_half(x):
x = rearrange(x, "... (j d) -> ... j d", j=2)
x1, x2 = x.unbind(dim=-2)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# classic Noam Shazeer paper, except here they use SwiGLU instead of the more popular GEGLU for gating the feedforward
# https://arxiv.org/abs/2002.05202
class SwiGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim=-1)
return F.silu(gate) * x
# parallel attention and feedforward with residual
# discovered by Wang et al + EleutherAI from GPT-J fame
class ParallelTransformerBlock(nn.Module):
def __init__(self, dim, dim_head=64, heads=8, ff_mult=4):
super().__init__()
self.norm = LayerNorm(dim)
attn_inner_dim = dim_head * heads
ff_inner_dim = dim * ff_mult
self.fused_dims = (attn_inner_dim, dim_head, dim_head, (ff_inner_dim * 2))
self.heads = heads
self.scale = dim_head**-0.5
self.rotary_emb = RotaryEmbedding(dim_head)
self.fused_attn_ff_proj = nn.Linear(dim, sum(self.fused_dims), bias=False)
self.attn_out = nn.Linear(attn_inner_dim, dim, bias=False)
self.ff_out = nn.Sequential(
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
)
# for caching causal mask and rotary embeddings
self.mask = None
self.pos_emb = None
def get_mask(self, n, device):
if self.mask is not None and self.mask.shape[-1] >= n:
return self.mask[:n, :n].to(device)
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.mask = mask
return mask
def get_rotary_embedding(self, n, device):
if self.pos_emb is not None and self.pos_emb.shape[-2] >= n:
return self.pos_emb[:n].to(device)
pos_emb = self.rotary_emb(n, device=device)
self.pos_emb = pos_emb
return pos_emb
def forward(self, x, attn_mask=None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device, h = x.shape[1], x.device, self.heads
# pre layernorm
x = self.norm(x)
# attention queries, keys, values, and feedforward inner
q, k, v, ff = self.fused_attn_ff_proj(x).split(self.fused_dims, dim=-1)
# split heads
# they use multi-query single-key-value attention, yet another Noam Shazeer paper
# they found no performance loss past a certain scale, and more efficient decoding obviously
# https://arxiv.org/abs/1911.02150
q = rearrange(q, "b n (h d) -> b h n d", h=h)
# rotary embeddings
positions = self.get_rotary_embedding(n, device)
q, k = map(lambda t: apply_rotary_pos_emb(positions, t), (q, k))
# scale
q = q * self.scale
# similarity
sim = einsum("b h i d, b j d -> b h i j", q, k)
# causal mask
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# extra attention mask - for masking out attention from text CLS token to padding
if exists(attn_mask):
attn_mask = rearrange(attn_mask, 'b i j -> b 1 i j')
sim = sim.masked_fill(~attn_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum("b h i j, b j d -> b h i d", attn, v)
# merge heads
out = rearrange(out, "b h n d -> b n (h d)")
return self.attn_out(out) + self.ff_out(ff)
# cross attention - using multi-query + one-headed key / values as in PaLM w/ optional parallel feedforward
class CrossAttention(nn.Module):
def __init__(
self,
dim,
*,
context_dim=None,
dim_head=64,
heads=8,
parallel_ff=False,
ff_mult=4,
norm_context=False
):
super().__init__()
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = heads * dim_head
context_dim = default(context_dim, dim)
self.norm = LayerNorm(dim)
self.context_norm = LayerNorm(context_dim) if norm_context else nn.Identity()
self.to_q = nn.Linear(dim, inner_dim, bias=False)
self.to_kv = nn.Linear(context_dim, dim_head * 2, bias=False)
self.to_out = nn.Linear(inner_dim, dim, bias=False)
# whether to have parallel feedforward
ff_inner_dim = ff_mult * dim
self.ff = nn.Sequential(
nn.Linear(dim, ff_inner_dim * 2, bias=False),
SwiGLU(),
nn.Linear(ff_inner_dim, dim, bias=False)
) if parallel_ff else None
def forward(self, x, context):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
# pre-layernorm, for queries and context
x = self.norm(x)
context = self.context_norm(context)
# get queries
q = self.to_q(x)
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
# scale
q = q * self.scale
# get key / values
k, v = self.to_kv(context).chunk(2, dim=-1)
# query / key similarity
sim = einsum('b h i d, b j d -> b h i j', q, k)
# attention
attn = sim.softmax(dim=-1)
# aggregate
out = einsum('b h i j, b j d -> b h i d', attn, v)
# merge and combine heads
out = rearrange(out, 'b h n d -> b n (h d)')
out = self.to_out(out)
# add parallel feedforward (for multimodal layers)
if exists(self.ff):
out = out + self.ff(x)
return out
# transformer
class MaMMUT(nn.Module):
def __init__(
self,
*,
dim,
num_tokens,
depth,
cross_attend_every=1,
cross_attend_layers=None,
dim_latents=None,
image_dim=None,
num_img_queries=256,
dim_head=64,
heads=8,
ff_mult=4,
img_encoder=None,
caption_loss_weight=1.,
contrastive_loss_weight=1.,
pad_id=0
):
super().__init__()
self.dim = dim
self.pad_id = pad_id
self.caption_loss_weight = caption_loss_weight
self.contrastive_loss_weight = contrastive_loss_weight
# token embeddings
self.token_emb = nn.Embedding(num_tokens, dim)
self.text_cls_token = nn.Parameter(torch.randn(dim))
# image encoder
self.img_encoder = img_encoder
# attention pooling for image tokens
self.img_queries = nn.Parameter(torch.randn(num_img_queries + 1, dim)) # num image queries for multimodal, but 1 extra CLS for contrastive learning
self.img_attn_pool = CrossAttention(dim=dim, context_dim=image_dim, dim_head=dim_head, heads=heads, norm_context=True)
self.img_attn_pool_norm = LayerNorm(dim)
self.text_cls_norm = LayerNorm(dim)
# to latents
dim_latents = default(dim_latents, dim)
self.img_to_latents = EmbedToLatents(dim, dim_latents)
self.text_to_latents = EmbedToLatents(dim, dim_latents)
# contrastive learning temperature
self.temperature = nn.Parameter(torch.Tensor([1.]))
# layers
self.layers = nn.ModuleList([])
for ind in range(depth):
layer = ind + 1
has_cross_attn = divisible_by(layer, cross_attend_every)
if exists(cross_attend_layers):
assert isinstance(cross_attend_layers, tuple)
has_cross_attn = layer in cross_attend_layers
self.layers.append(nn.ModuleList([
Residual(ParallelTransformerBlock(dim=dim, dim_head=dim_head, heads=heads, ff_mult=ff_mult)),
Residual(CrossAttention(dim=dim, dim_head=dim_head, heads=heads, parallel_ff=True, ff_mult=ff_mult)) if has_cross_attn else None
]))
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias=False)
)
# they used embedding weight tied projection out to logits, not common, but works
self.to_logits[-1].weight = self.token_emb.weight
nn.init.normal_(self.token_emb.weight, std=0.02)
# is data parallel
self.is_data_parallel = dist.is_initialized() and dist.get_world_size() > 1
def embed_text(self, text):
batch, device = text.shape[0], text.device
seq = text.shape[1]
text_tokens = self.token_emb(text)
# append text cls tokens
text_cls_tokens = repeat(self.text_cls_token, 'd -> b 1 d', b=batch)
text_tokens = torch.cat((text_tokens, text_cls_tokens), dim=-2)
# create specific mask for text cls token at the end
# to prevent it from attending to padding
cls_mask = rearrange(text!=self.pad_id, 'b j -> b 1 j')
attn_mask = F.pad(cls_mask, (0, 1, seq, 0), value=True)
# go through layers, but do not cross attend
for attn_ff, _ in self.layers:
text_tokens = attn_ff(text_tokens, attn_mask=attn_mask)
# get text cls token
text_tokens, text_cls_tokens = text_tokens[:, :-1], text_tokens[:, -1]
text_embeds = self.text_cls_norm(text_cls_tokens)
return text_embeds, text_tokens
def embed_image(self, images=None, image_tokens=None):
# encode images into embeddings
# with the img_encoder passed in at init
# it can also accept precomputed image tokens
assert not (exists(images) and exists(image_tokens))
if exists(images):
assert exists(self.img_encoder), 'img_encoder must be passed in for automatic image encoding'
image_tokens = self.img_encoder(images)
# attention pool image tokens
img_queries = repeat(self.img_queries, 'n d -> b n d', b=image_tokens.shape[0])
img_queries = self.img_attn_pool(img_queries, image_tokens)
img_queries = self.img_attn_pool_norm(img_queries)
return img_queries[:, 0], img_queries[:, 1:]
def forward(
self,
text,
text_mask = None,
images=None,
image_tokens=None,
labels=None,
return_loss=False,
return_embeddings=False
):
batch, device = text.shape[0], text.device
if return_loss and not exists(labels):
text, labels = text[:, :-1], text[:, 1:]
text_embeds, _ = self.embed_text(text)
image_embeds, image_tokens = self.embed_image(images=images, image_tokens=image_tokens)
# return embeddings if that is what the researcher wants
if return_embeddings:
return text_embeds, image_embeds
# go through layers
text_tokens = self.token_emb(text)
for attn_ff, cross_attn in self.layers:
text_tokens = attn_ff(text_tokens)
if exists(cross_attn):
text_tokens = cross_attn(text_tokens, image_tokens)
logits = self.to_logits(text_tokens)
if not return_loss:
return logits
# shorthand
ce = F.cross_entropy
# calculate caption loss (cross entropy loss)
logits = rearrange(logits, 'b n c -> b c n')
caption_loss = ce(logits, labels, ignore_index=self.pad_id)
caption_loss = caption_loss * self.caption_loss_weight
# embedding to latents
text_latents = self.text_to_latents(text_embeds)
image_latents = self.img_to_latents(image_embeds)
# if data parallel, need to gather all latents from all machines
if self.is_data_parallel:
latents = torch.stack((text_latents, image_latents), dim = 1)
latents = all_gather(latents)
text_latents, image_latents = latents.unbind(dim = 1)
# calculate contrastive loss
sim = einsum('i d, j d -> i j', text_latents, image_latents)
sim = sim * self.temperature.exp()
contrastive_labels = torch.arange(batch, device=device)
contrastive_loss = (ce(sim, contrastive_labels) + ce(sim.t(), contrastive_labels)) * 0.5
contrastive_loss = contrastive_loss * self.contrastive_loss_weight
return caption_loss + contrastive_loss
|
MaMMUT-pytorch-main
|
mammut_pytorch/mammut_pytorch.py
|
from mammut_pytorch.mammut_pytorch import MaMMUT
|
MaMMUT-pytorch-main
|
mammut_pytorch/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'slot_attention',
packages = find_packages(),
version = '1.1.2',
license='MIT',
description = 'Implementation of Slot Attention in Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/slot-attention',
keywords = ['attention', 'artificial intelligence'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
slot-attention-master
|
setup.py
|
import torch
from torch import nn
from torch.nn import init
class WeightedAttention(nn.Module):
def __init__(self, dim, eps = 1e-8, softmax_dim = 1, weighted_mean_dim = 2):
super().__init__()
self.norm_input = nn.LayerNorm(dim)
self.norm_context = nn.LayerNorm(dim)
self.to_q = nn.Linear(dim, dim)
self.to_k = nn.Linear(dim, dim)
self.to_v = nn.Linear(dim, dim)
self.eps = eps
self.scale = dim ** -0.5
self.softmax_dim = softmax_dim
self.weighted_mean_dim = weighted_mean_dim
def forward(self, inputs, context):
inputs = self.norm_input(inputs)
context = self.norm_context(context)
q = self.to_q(inputs)
k = self.to_k(context)
v = self.to_v(context)
dots = torch.einsum('bid,bjd->bij', q, k) * self.scale
attn = dots.softmax(dim = self.softmax_dim) + self.eps
attn = attn / attn.sum(dim = self.weighted_mean_dim, keepdim=True)
updates = torch.einsum('bjd,bij->bid', v, attn)
return updates
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return x + self.fn(x)
class GatedResidual(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.gru = nn.GRUCell(dim, dim)
self.fn = fn
def forward(self, *args):
inputs = args[0]
b, _, d = inputs.shape
updates = self.fn(*args)
inputs = self.gru(
updates.reshape(-1, d),
inputs.reshape(-1, d)
)
return inputs.reshape(b, -1, d)
class FeedForward(nn.Module):
def __init__(self, dim, hidden_dim):
super().__init__()
hidden_dim = max(dim, hidden_dim)
self.net = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.ReLU(inplace = True),
nn.Linear(hidden_dim, dim)
)
self.norm = nn.LayerNorm(dim)
def forward(self, x):
x = self.norm(x)
return self.net(x)
class SlotAttentionExperimental(nn.Module):
def __init__(self, num_slots, dim, iters = 3, eps = 1e-8, hidden_dim = 128):
super().__init__()
scale = dim ** -0.5
self.num_slots = num_slots
self.iters = iters
self.norm_inputs = nn.LayerNorm(dim)
self.slots_mu = nn.Parameter(torch.randn(1, 1, dim))
self.slots_logsigma = nn.Parameter(torch.zeros(1, 1, dim))
init.xavier_uniform_(self.slots_logsigma)
self.slots_to_inputs_attn = GatedResidual(dim, WeightedAttention(dim, eps = eps))
self.slots_ff = GatedResidual(dim, FeedForward(dim, hidden_dim))
self.inputs_to_slots_attn = GatedResidual(dim, WeightedAttention(dim, eps = eps, softmax_dim = 2, weighted_mean_dim = 1))
self.inputs_ff = GatedResidual(dim, FeedForward(dim, hidden_dim))
def forward(self, inputs, num_slots = None):
b, n, d, device, dtype = *inputs.shape, inputs.device, inputs.dtype
n_s = num_slots if num_slots is not None else self.num_slots
mu = self.slots_mu.expand(b, n_s, -1)
sigma = self.slots_logsigma.exp().expand(b, n_s, -1)
slots = mu + sigma * torch.randn(mu.shape, device = device, dtype = dtype)
inputs = self.norm_inputs(inputs)
for _ in range(self.iters):
slots = self.slots_to_inputs_attn(slots, inputs)
slots = self.slots_ff(slots)
inputs = self.inputs_to_slots_attn(inputs, slots)
inputs = self.inputs_ff(inputs)
return slots, inputs
|
slot-attention-master
|
slot_attention/slot_attention_experimental.py
|
from slot_attention.slot_attention import SlotAttention
from slot_attention.slot_attention_experimental import SlotAttentionExperimental
|
slot-attention-master
|
slot_attention/__init__.py
|
import torch
from torch import nn
from torch.nn import init
class SlotAttention(nn.Module):
def __init__(self, num_slots, dim, iters = 3, eps = 1e-8, hidden_dim = 128):
super().__init__()
self.num_slots = num_slots
self.iters = iters
self.eps = eps
self.scale = dim ** -0.5
self.slots_mu = nn.Parameter(torch.randn(1, 1, dim))
self.slots_logsigma = nn.Parameter(torch.zeros(1, 1, dim))
init.xavier_uniform_(self.slots_logsigma)
self.to_q = nn.Linear(dim, dim)
self.to_k = nn.Linear(dim, dim)
self.to_v = nn.Linear(dim, dim)
self.gru = nn.GRUCell(dim, dim)
hidden_dim = max(dim, hidden_dim)
self.mlp = nn.Sequential(
nn.Linear(dim, hidden_dim),
nn.ReLU(inplace = True),
nn.Linear(hidden_dim, dim)
)
self.norm_input = nn.LayerNorm(dim)
self.norm_slots = nn.LayerNorm(dim)
self.norm_pre_ff = nn.LayerNorm(dim)
def forward(self, inputs, num_slots = None):
b, n, d, device, dtype = *inputs.shape, inputs.device, inputs.dtype
n_s = num_slots if num_slots is not None else self.num_slots
mu = self.slots_mu.expand(b, n_s, -1)
sigma = self.slots_logsigma.exp().expand(b, n_s, -1)
slots = mu + sigma * torch.randn(mu.shape, device = device, dtype = dtype)
inputs = self.norm_input(inputs)
k, v = self.to_k(inputs), self.to_v(inputs)
for _ in range(self.iters):
slots_prev = slots
slots = self.norm_slots(slots)
q = self.to_q(slots)
dots = torch.einsum('bid,bjd->bij', q, k) * self.scale
attn = dots.softmax(dim=1) + self.eps
attn = attn / attn.sum(dim=-1, keepdim=True)
updates = torch.einsum('bjd,bij->bid', v, attn)
slots = self.gru(
updates.reshape(-1, d),
slots_prev.reshape(-1, d)
)
slots = slots.reshape(b, -1, d)
slots = slots + self.mlp(self.norm_pre_ff(slots))
return slots
|
slot-attention-master
|
slot_attention/slot_attention.py
|
""" Setup
"""
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
def _read_reqs(relpath):
fullpath = path.join(path.dirname(__file__), relpath)
with open(fullpath) as f:
return [s.strip() for s in f.readlines() if (s.strip() and not s.startswith("#"))]
REQUIREMENTS = _read_reqs("requirements.txt")
TRAINING_REQUIREMENTS = _read_reqs("requirements-training.txt")
exec(open('src/open_clip/version.py').read())
setup(
name='open_clip_torch',
version=__version__,
description='OpenCLIP',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/mlfoundations/open_clip',
author='',
author_email='',
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
'Intended Audience :: Education',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Software Development',
'Topic :: Software Development :: Libraries',
'Topic :: Software Development :: Libraries :: Python Modules',
],
# Note that this is a string of words separated by whitespace, not a list.
keywords='CLIP pretrained',
package_dir={'': 'src'},
packages=find_packages(where='src'),
include_package_data=True,
install_requires=REQUIREMENTS,
extras_require={
"training": TRAINING_REQUIREMENTS,
},
python_requires='>=3.7',
)
|
open_clip-main
|
setup.py
|
import pytest
import torch
from open_clip.hf_model import _POOLERS, HFTextEncoder
from transformers import AutoConfig
from transformers.modeling_outputs import BaseModelOutput
# test poolers
def test_poolers():
bs, sl, d = 2, 10, 5
h = torch.arange(sl).repeat(bs).reshape(bs, sl)[..., None] * torch.linspace(0.2, 1., d)
mask = torch.ones(bs, sl, dtype=torch.long)
mask[:2, 6:] = 0
x = BaseModelOutput(h)
for name, cls in _POOLERS.items():
pooler = cls()
res = pooler(x, mask)
assert res.shape == (bs, d), f"{name} returned wrong shape"
# test HFTextEncoder
@pytest.mark.parametrize("model_id", ["arampacha/roberta-tiny", "roberta-base", "xlm-roberta-base", "google/mt5-base"])
def test_pretrained_text_encoder(model_id):
bs, sl, d = 2, 10, 64
cfg = AutoConfig.from_pretrained(model_id)
model = HFTextEncoder(model_id, d, proj='linear')
x = torch.randint(0, cfg.vocab_size, (bs, sl))
with torch.no_grad():
emb = model(x)
assert emb.shape == (bs, d)
|
open_clip-main
|
tests/test_hf_model.py
|
import os
import pytest
import torch
import open_clip
import util_test
os.environ['CUDA_VISIBLE_DEVICES'] = ''
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
models_to_test = set(open_clip.list_models())
# testing excemptions
models_to_test = models_to_test.difference({
# not available with timm yet
# see https://github.com/mlfoundations/open_clip/issues/219
'convnext_xlarge',
'convnext_xxlarge',
'convnext_xxlarge_320',
'vit_medium_patch16_gap_256',
# exceeds GH runner memory limit
'ViT-bigG-14',
'ViT-e-14',
'mt5-xl-ViT-H-14',
'coca_base',
'coca_ViT-B-32',
'coca_roberta-ViT-B-32'
})
if 'OPEN_CLIP_TEST_REG_MODELS' in os.environ:
external_model_list = os.environ['OPEN_CLIP_TEST_REG_MODELS']
with open(external_model_list, 'r') as f:
models_to_test = set(f.read().splitlines()).intersection(models_to_test)
print(f"Selected models from {external_model_list}: {models_to_test}")
# TODO: add "coca_ViT-B-32" onece https://github.com/pytorch/pytorch/issues/92073 gets fixed
models_to_test = list(models_to_test)
models_to_test.sort()
models_to_test = [(model_name, False) for model_name in models_to_test]
models_to_jit_test = {"ViT-B-32"}
models_to_jit_test = list(models_to_jit_test)
models_to_jit_test = [(model_name, True) for model_name in models_to_jit_test]
models_to_test_fully = models_to_test + models_to_jit_test
@pytest.mark.regression_test
@pytest.mark.parametrize("model_name,jit", models_to_test_fully)
def test_inference_with_data(
model_name,
jit,
pretrained = None,
pretrained_hf = False,
precision = 'fp32',
force_quick_gelu = False,
):
util_test.seed_all()
model, _, preprocess_val = open_clip.create_model_and_transforms(
model_name,
pretrained = pretrained,
precision = precision,
jit = jit,
force_quick_gelu = force_quick_gelu,
pretrained_hf = pretrained_hf
)
model_id = f'{model_name}_{pretrained or pretrained_hf}_{precision}'
input_dir, output_dir = util_test.get_data_dirs()
# text
input_text_path = os.path.join(input_dir, 'random_text.pt')
gt_text_path = os.path.join(output_dir, f'{model_id}_random_text.pt')
if not os.path.isfile(input_text_path):
pytest.skip(reason = f"missing test data, expected at {input_text_path}")
if not os.path.isfile(gt_text_path):
pytest.skip(reason = f"missing test data, expected at {gt_text_path}")
input_text = torch.load(input_text_path)
gt_text = torch.load(gt_text_path)
y_text = util_test.inference_text(model, model_name, input_text)
assert (y_text == gt_text).all(), f"text output differs @ {input_text_path}"
# image
image_size = model.visual.image_size
if not isinstance(image_size, tuple):
image_size = (image_size, image_size)
input_image_path = os.path.join(input_dir, f'random_image_{image_size[0]}_{image_size[1]}.pt')
gt_image_path = os.path.join(output_dir, f'{model_id}_random_image.pt')
if not os.path.isfile(input_image_path):
pytest.skip(reason = f"missing test data, expected at {input_image_path}")
if not os.path.isfile(gt_image_path):
pytest.skip(reason = f"missing test data, expected at {gt_image_path}")
input_image = torch.load(input_image_path)
gt_image = torch.load(gt_image_path)
y_image = util_test.inference_image(model, preprocess_val, input_image)
assert (y_image == gt_image).all(), f"image output differs @ {input_image_path}"
if not jit:
model.eval()
model_out = util_test.forward_model(model, model_name, preprocess_val, input_image, input_text)
if type(model) not in [open_clip.CLIP, open_clip.CustomTextCLIP]:
assert type(model_out) == dict
else:
model.output_dict = True
model_out_dict = util_test.forward_model(model, model_name, preprocess_val, input_image, input_text)
assert (model_out_dict["image_features"] == model_out[0]).all()
assert (model_out_dict["text_features"] == model_out[1]).all()
assert (model_out_dict["logit_scale"] == model_out[2]).all()
model.output_dict = None
else:
model, _, preprocess_val = open_clip.create_model_and_transforms(
model_name,
pretrained = pretrained,
precision = precision,
jit = False,
force_quick_gelu = force_quick_gelu,
pretrained_hf = pretrained_hf
)
test_model = util_test.TestWrapper(model, model_name, output_dict=False)
test_model = torch.jit.script(test_model)
model_out = util_test.forward_model(test_model, model_name, preprocess_val, input_image, input_text)
assert model_out["test_output"].shape[-1] == 2
test_model = util_test.TestWrapper(model, model_name, output_dict=True)
test_model = torch.jit.script(test_model)
model_out = util_test.forward_model(test_model, model_name, preprocess_val, input_image, input_text)
assert model_out["test_output"].shape[-1] == 2
|
open_clip-main
|
tests/test_inference.py
|
import pytest
from training.data import get_dataset_size
@pytest.mark.parametrize(
"shards,expected_size",
[
('/path/to/shard.tar', 1),
('/path/to/shard_{000..000}.tar', 1),
('/path/to/shard_{000..009}.tar', 10),
('/path/to/shard_{000..009}_{000..009}.tar', 100),
('/path/to/shard.tar::/path/to/other_shard_{000..009}.tar', 11),
('/path/to/shard_{000..009}.tar::/path/to/other_shard_{000..009}.tar', 20),
(['/path/to/shard.tar'], 1),
(['/path/to/shard.tar', '/path/to/other_shard.tar'], 2),
]
)
def test_num_shards(shards, expected_size):
_, size = get_dataset_size(shards)
assert size == expected_size, f'Expected {expected_size} for {shards} but found {size} instead.'
|
open_clip-main
|
tests/test_num_shards.py
|
import os
import random
import numpy as np
from PIL import Image
import torch
if __name__ != '__main__':
import open_clip
os.environ['CUDA_VISIBLE_DEVICES'] = ''
def seed_all(seed = 0):
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
torch.use_deterministic_algorithms(True, warn_only=False)
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
def inference_text(model, model_name, batches):
y = []
tokenizer = open_clip.get_tokenizer(model_name)
with torch.no_grad():
for x in batches:
x = tokenizer(x)
y.append(model.encode_text(x))
return torch.stack(y)
def inference_image(model, preprocess_val, batches):
y = []
with torch.no_grad():
for x in batches:
x = torch.stack([preprocess_val(img) for img in x])
y.append(model.encode_image(x))
return torch.stack(y)
def forward_model(model, model_name, preprocess_val, image_batch, text_batch):
y = []
tokenizer = open_clip.get_tokenizer(model_name)
with torch.no_grad():
for x_im, x_txt in zip(image_batch, text_batch):
x_im = torch.stack([preprocess_val(im) for im in x_im])
x_txt = tokenizer(x_txt)
y.append(model(x_im, x_txt))
if type(y[0]) == dict:
out = {}
for key in y[0].keys():
out[key] = torch.stack([batch_out[key] for batch_out in y])
else:
out = []
for i in range(len(y[0])):
out.append(torch.stack([batch_out[i] for batch_out in y]))
return out
def random_image_batch(batch_size, size):
h, w = size
data = np.random.randint(255, size = (batch_size, h, w, 3), dtype = np.uint8)
return [ Image.fromarray(d) for d in data ]
def random_text_batch(batch_size, min_length = 75, max_length = 75):
t = open_clip.tokenizer.SimpleTokenizer()
# every token decoded as string, exclude SOT and EOT, replace EOW with space
token_words = [
x[1].replace('</w>', ' ')
for x in t.decoder.items()
if x[0] not in t.all_special_ids
]
# strings of randomly chosen tokens
return [
''.join(random.choices(
token_words,
k = random.randint(min_length, max_length)
))
for _ in range(batch_size)
]
def create_random_text_data(
path,
min_length = 75,
max_length = 75,
batches = 1,
batch_size = 1
):
text_batches = [
random_text_batch(batch_size, min_length, max_length)
for _ in range(batches)
]
print(f"{path}")
torch.save(text_batches, path)
def create_random_image_data(path, size, batches = 1, batch_size = 1):
image_batches = [
random_image_batch(batch_size, size)
for _ in range(batches)
]
print(f"{path}")
torch.save(image_batches, path)
def get_data_dirs(make_dir = True):
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'data')
input_dir = os.path.join(data_dir, 'input')
output_dir = os.path.join(data_dir, 'output')
if make_dir:
os.makedirs(input_dir, exist_ok = True)
os.makedirs(output_dir, exist_ok = True)
assert os.path.isdir(data_dir), f"data directory missing, expected at {input_dir}"
assert os.path.isdir(data_dir), f"data directory missing, expected at {output_dir}"
return input_dir, output_dir
def create_test_data_for_model(
model_name,
pretrained = None,
precision = 'fp32',
jit = False,
pretrained_hf = False,
force_quick_gelu = False,
create_missing_input_data = True,
batches = 1,
batch_size = 1,
overwrite = False
):
model_id = f'{model_name}_{pretrained or pretrained_hf}_{precision}'
input_dir, output_dir = get_data_dirs()
output_file_text = os.path.join(output_dir, f'{model_id}_random_text.pt')
output_file_image = os.path.join(output_dir, f'{model_id}_random_image.pt')
text_exists = os.path.exists(output_file_text)
image_exists = os.path.exists(output_file_image)
if not overwrite and text_exists and image_exists:
return
seed_all()
model, _, preprocess_val = open_clip.create_model_and_transforms(
model_name,
pretrained = pretrained,
precision = precision,
jit = jit,
force_quick_gelu = force_quick_gelu,
pretrained_hf = pretrained_hf
)
# text
if overwrite or not text_exists:
input_file_text = os.path.join(input_dir, 'random_text.pt')
if create_missing_input_data and not os.path.exists(input_file_text):
create_random_text_data(
input_file_text,
batches = batches,
batch_size = batch_size
)
assert os.path.isfile(input_file_text), f"missing input data, expected at {input_file_text}"
input_data_text = torch.load(input_file_text)
output_data_text = inference_text(model, model_name, input_data_text)
print(f"{output_file_text}")
torch.save(output_data_text, output_file_text)
# image
if overwrite or not image_exists:
size = model.visual.image_size
if not isinstance(size, tuple):
size = (size, size)
input_file_image = os.path.join(input_dir, f'random_image_{size[0]}_{size[1]}.pt')
if create_missing_input_data and not os.path.exists(input_file_image):
create_random_image_data(
input_file_image,
size,
batches = batches,
batch_size = batch_size
)
assert os.path.isfile(input_file_image), f"missing input data, expected at {input_file_image}"
input_data_image = torch.load(input_file_image)
output_data_image = inference_image(model, preprocess_val, input_data_image)
print(f"{output_file_image}")
torch.save(output_data_image, output_file_image)
def create_test_data(
models,
batches = 1,
batch_size = 1,
overwrite = False
):
models = list(set(models).difference({
# not available with timm
# see https://github.com/mlfoundations/open_clip/issues/219
'timm-convnext_xlarge',
'timm-vit_medium_patch16_gap_256'
}).intersection(open_clip.list_models()))
models.sort()
print(f"generating test data for:\n{models}")
for model_name in models:
print(model_name)
create_test_data_for_model(
model_name,
batches = batches,
batch_size = batch_size,
overwrite = overwrite
)
return models
def _sytem_assert(string):
assert os.system(string) == 0
class TestWrapper(torch.nn.Module):
output_dict: torch.jit.Final[bool]
def __init__(self, model, model_name, output_dict=True) -> None:
super().__init__()
self.model = model
self.output_dict = output_dict
if type(model) in [open_clip.CLIP, open_clip.CustomTextCLIP]:
self.model.output_dict = self.output_dict
config = open_clip.get_model_config(model_name)
self.head = torch.nn.Linear(config["embed_dim"], 2)
def forward(self, image, text):
x = self.model(image, text)
if self.output_dict:
out = self.head(x["image_features"])
else:
out = self.head(x[0])
return {"test_output": out}
def main(args):
global open_clip
import importlib
import shutil
import subprocess
import argparse
parser = argparse.ArgumentParser(description = "Populate test data directory")
parser.add_argument(
'-a', '--all',
action = 'store_true',
help = "create test data for all models"
)
parser.add_argument(
'-m', '--model',
type = str,
default = [],
nargs = '+',
help = "model(s) to create test data for"
)
parser.add_argument(
'-f', '--model_list',
type = str,
help = "path to a text file containing a list of model names, one model per line"
)
parser.add_argument(
'-s', '--save_model_list',
type = str,
help = "path to save the list of models that data was generated for"
)
parser.add_argument(
'-g', '--git_revision',
type = str,
help = "git revision to generate test data for"
)
parser.add_argument(
'--overwrite',
action = 'store_true',
help = "overwrite existing output data"
)
parser.add_argument(
'-n', '--num_batches',
default = 1,
type = int,
help = "amount of data batches to create (default: 1)"
)
parser.add_argument(
'-b', '--batch_size',
default = 1,
type = int,
help = "test data batch size (default: 1)"
)
args = parser.parse_args(args)
model_list = []
if args.model_list is not None:
with open(args.model_list, 'r') as f:
model_list = f.read().splitlines()
if not args.all and len(args.model) < 1 and len(model_list) < 1:
print("error: at least one model name is required")
parser.print_help()
parser.exit(1)
if args.git_revision is not None:
stash_output = subprocess.check_output(['git', 'stash']).decode().splitlines()
has_stash = len(stash_output) > 0 and stash_output[0] != 'No local changes to save'
current_branch = subprocess.check_output(['git', 'branch', '--show-current'])
if len(current_branch) < 1:
# not on a branch -> detached head
current_branch = subprocess.check_output(['git', 'rev-parse', 'HEAD'])
current_branch = current_branch.splitlines()[0].decode()
try:
_sytem_assert(f'git checkout {args.git_revision}')
except AssertionError as e:
_sytem_assert(f'git checkout -f {current_branch}')
if has_stash:
os.system(f'git stash pop')
raise e
open_clip = importlib.import_module('open_clip')
models = open_clip.list_models() if args.all else args.model + model_list
try:
models = create_test_data(
models,
batches = args.num_batches,
batch_size = args.batch_size,
overwrite = args.overwrite
)
finally:
if args.git_revision is not None:
test_dir = os.path.join(os.path.dirname(__file__), 'data')
test_dir_ref = os.path.join(os.path.dirname(__file__), 'data_ref')
if os.path.exists(test_dir_ref):
shutil.rmtree(test_dir_ref, ignore_errors = True)
if os.path.exists(test_dir):
os.rename(test_dir, test_dir_ref)
_sytem_assert(f'git checkout {current_branch}')
if has_stash:
os.system(f'git stash pop')
os.rename(test_dir_ref, test_dir)
if args.save_model_list is not None:
print(f"Saving model list as {args.save_model_list}")
with open(args.save_model_list, 'w') as f:
for m in models:
print(m, file=f)
if __name__ == '__main__':
import sys
main(sys.argv[1:])
|
open_clip-main
|
tests/util_test.py
|
import torch
from PIL import Image
from open_clip.factory import get_tokenizer
import pytest
import open_clip
import os
os.environ["CUDA_VISIBLE_DEVICES"] = ""
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
test_simple_models = [
# model, pretrained, jit, force_custom_text
("ViT-B-32", "laion2b_s34b_b79k", False, False),
("ViT-B-32", "laion2b_s34b_b79k", True, False),
("ViT-B-32", "laion2b_s34b_b79k", True, True),
("roberta-ViT-B-32", "laion2b_s12b_b32k", False, False),
]
@pytest.mark.parametrize("model_type,pretrained,jit,force_custom_text", test_simple_models)
def test_inference_simple(
model_type,
pretrained,
jit,
force_custom_text,
):
model, _, preprocess = open_clip.create_model_and_transforms(
model_type,
pretrained=pretrained,
jit=jit,
force_custom_text=force_custom_text,
)
tokenizer = get_tokenizer(model_type)
current_dir = os.path.dirname(os.path.realpath(__file__))
image = preprocess(Image.open(current_dir + "/../docs/CLIP.png")).unsqueeze(0)
text = tokenizer(["a diagram", "a dog", "a cat"])
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
assert text_probs.cpu().numpy()[0].tolist() == [1.0, 0.0, 0.0]
|
open_clip-main
|
tests/test_inference_simple.py
|
import requests
import torch
from PIL import Image
import hashlib
import tempfile
import unittest
from io import BytesIO
from pathlib import Path
from unittest.mock import patch
from urllib3 import HTTPResponse
from urllib3._collections import HTTPHeaderDict
import open_clip
from open_clip.pretrained import download_pretrained_from_url
class DownloadPretrainedTests(unittest.TestCase):
def create_response(self, data, status_code=200, content_type='application/octet-stream'):
fp = BytesIO(data)
headers = HTTPHeaderDict({
'Content-Type': content_type,
'Content-Length': str(len(data))
})
raw = HTTPResponse(fp, preload_content=False, headers=headers, status=status_code)
return raw
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_openaipublic(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()
urllib.request.urlopen.return_value = self.create_response(file_contents)
with tempfile.TemporaryDirectory() as root:
url = f'https://openaipublic.azureedge.net/clip/models/{expected_hash}/RN50.pt'
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_called_once()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_openaipublic_corrupted(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()
urllib.request.urlopen.return_value = self.create_response(b'corrupted pretrained model')
with tempfile.TemporaryDirectory() as root:
url = f'https://openaipublic.azureedge.net/clip/models/{expected_hash}/RN50.pt'
with self.assertRaisesRegex(RuntimeError, r'checksum does not not match'):
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_called_once()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_openaipublic_valid_cache(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()
urllib.request.urlopen.return_value = self.create_response(file_contents)
with tempfile.TemporaryDirectory() as root:
local_file = Path(root) / 'RN50.pt'
local_file.write_bytes(file_contents)
url = f'https://openaipublic.azureedge.net/clip/models/{expected_hash}/RN50.pt'
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_not_called()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_openaipublic_corrupted_cache(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()
urllib.request.urlopen.return_value = self.create_response(file_contents)
with tempfile.TemporaryDirectory() as root:
local_file = Path(root) / 'RN50.pt'
local_file.write_bytes(b'corrupted pretrained model')
url = f'https://openaipublic.azureedge.net/clip/models/{expected_hash}/RN50.pt'
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_called_once()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_mlfoundations(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()[:8]
urllib.request.urlopen.return_value = self.create_response(file_contents)
with tempfile.TemporaryDirectory() as root:
url = f'https://github.com/mlfoundations/download/v0.2-weights/rn50-quickgelu-{expected_hash}.pt'
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_called_once()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_url_from_mlfoundations_corrupted(self, urllib):
file_contents = b'pretrained model weights'
expected_hash = hashlib.sha256(file_contents).hexdigest()[:8]
urllib.request.urlopen.return_value = self.create_response(b'corrupted pretrained model')
with tempfile.TemporaryDirectory() as root:
url = f'https://github.com/mlfoundations/download/v0.2-weights/rn50-quickgelu-{expected_hash}.pt'
with self.assertRaisesRegex(RuntimeError, r'checksum does not not match'):
download_pretrained_from_url(url, root)
urllib.request.urlopen.assert_called_once()
@patch('open_clip.pretrained.urllib')
def test_download_pretrained_from_hfh(self, urllib):
model, _, preprocess = open_clip.create_model_and_transforms('hf-hub:hf-internal-testing/tiny-open-clip-model')
tokenizer = open_clip.get_tokenizer('hf-hub:hf-internal-testing/tiny-open-clip-model')
img_url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/coco_sample.png"
image = preprocess(Image.open(requests.get(img_url, stream=True).raw)).unsqueeze(0)
text = tokenizer(["a diagram", "a dog", "a cat"])
with torch.no_grad():
image_features = model.encode_image(image)
text_features = model.encode_text(text)
image_features /= image_features.norm(dim=-1, keepdim=True)
text_features /= text_features.norm(dim=-1, keepdim=True)
text_probs = (100.0 * image_features @ text_features.T).softmax(dim=-1)
self.assertTrue(torch.allclose(text_probs, torch.tensor([[0.0597, 0.6349, 0.3053]]), 1e-3))
|
open_clip-main
|
tests/test_download_pretrained.py
|
import os
import sys
import pytest
from PIL import Image
import torch
from training.main import main
os.environ["CUDA_VISIBLE_DEVICES"] = ""
if hasattr(torch._C, '_jit_set_profiling_executor'):
# legacy executor is too slow to compile large models for unit tests
# no need for the fusion performance here
torch._C._jit_set_profiling_executor(True)
torch._C._jit_set_profiling_mode(False)
@pytest.mark.skipif(sys.platform.startswith('darwin'), reason="macos pickle bug with locals")
def test_training():
main([
'--save-frequency', '1',
'--zeroshot-frequency', '1',
'--dataset-type', "synthetic",
'--train-num-samples', '16',
'--warmup', '1',
'--batch-size', '4',
'--lr', '1e-3',
'--wd', '0.1',
'--epochs', '1',
'--workers', '2',
'--model', 'RN50'
])
@pytest.mark.skipif(sys.platform.startswith('darwin'), reason="macos pickle bug with locals")
def test_training_coca():
main([
'--save-frequency', '1',
'--zeroshot-frequency', '1',
'--dataset-type', "synthetic",
'--train-num-samples', '16',
'--warmup', '1',
'--batch-size', '4',
'--lr', '1e-3',
'--wd', '0.1',
'--epochs', '1',
'--workers', '2',
'--model', 'coca_ViT-B-32'
])
@pytest.mark.skipif(sys.platform.startswith('darwin'), reason="macos pickle bug with locals")
def test_training_mt5():
main([
'--save-frequency', '1',
'--zeroshot-frequency', '1',
'--dataset-type', "synthetic",
'--train-num-samples', '16',
'--warmup', '1',
'--batch-size', '4',
'--lr', '1e-3',
'--wd', '0.1',
'--epochs', '1',
'--workers', '2',
'--model', 'mt5-base-ViT-B-32',
'--lock-text',
'--lock-text-unlocked-layers', '2'
])
@pytest.mark.skipif(sys.platform.startswith('darwin'), reason="macos pickle bug with locals")
def test_training_unfreezing_vit():
main([
'--save-frequency', '1',
'--zeroshot-frequency', '1',
'--dataset-type', "synthetic",
'--train-num-samples', '16',
'--warmup', '1',
'--batch-size', '4',
'--lr', '1e-3',
'--wd', '0.1',
'--epochs', '1',
'--workers', '2',
'--model', 'ViT-B-32',
'--lock-image',
'--lock-image-unlocked-groups', '5'
])
@pytest.mark.skipif(sys.platform.startswith('darwin'), reason="macos pickle bug with locals")
def test_training_clip_with_jit():
main([
'--save-frequency', '1',
'--zeroshot-frequency', '1',
'--dataset-type', "synthetic",
'--train-num-samples', '16',
'--warmup', '1',
'--batch-size', '4',
'--lr', '1e-3',
'--wd', '0.1',
'--epochs', '1',
'--workers', '2',
'--model', 'ViT-B-32',
'--torchscript'
])
|
open_clip-main
|
tests/test_training_simple.py
|
import argparse
import ast
def get_default_params(model_name):
# Params from paper (https://arxiv.org/pdf/2103.00020.pdf)
model_name = model_name.lower()
if "vit" in model_name:
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.98, "eps": 1.0e-6}
else:
return {"lr": 5.0e-4, "beta1": 0.9, "beta2": 0.999, "eps": 1.0e-8}
class ParseKwargs(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
kw = {}
for value in values:
key, value = value.split('=')
try:
kw[key] = ast.literal_eval(value)
except ValueError:
kw[key] = str(value) # fallback to string (avoid need to escape on command line)
setattr(namespace, self.dest, kw)
def parse_args(args):
parser = argparse.ArgumentParser()
parser.add_argument(
"--train-data",
type=str,
default=None,
help="Path to file(s) with training data",
)
parser.add_argument(
"--val-data",
type=str,
default=None,
help="Path to file(s) with validation data",
)
parser.add_argument(
"--train-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Required for webdataset if not available in info file.",
)
parser.add_argument(
"--val-num-samples",
type=int,
default=None,
help="Number of samples in dataset. Useful for webdataset if not available in info file.",
)
parser.add_argument(
"--dataset-type",
choices=["webdataset", "csv", "synthetic", "auto"],
default="auto",
help="Which type of dataset to process."
)
parser.add_argument(
"--dataset-resampled",
default=False,
action="store_true",
help="Whether to use sampling with replacement for webdataset shard selection."
)
parser.add_argument(
"--csv-separator",
type=str,
default="\t",
help="For csv-like datasets, which separator to use."
)
parser.add_argument(
"--csv-img-key",
type=str,
default="filepath",
help="For csv-like datasets, the name of the key for the image paths."
)
parser.add_argument(
"--csv-caption-key",
type=str,
default="title",
help="For csv-like datasets, the name of the key for the captions."
)
parser.add_argument(
"--imagenet-val",
type=str,
default=None,
help="Path to imagenet val set for conducting zero shot evaluation.",
)
parser.add_argument(
"--imagenet-v2",
type=str,
default=None,
help="Path to imagenet v2 for conducting zero shot evaluation.",
)
parser.add_argument(
"--logs",
type=str,
default="./logs/",
help="Where to store tensorboard logs. Use None to avoid storing logs.",
)
parser.add_argument(
"--log-local",
action="store_true",
default=False,
help="log files on local master, otherwise global master only.",
)
parser.add_argument(
"--name",
type=str,
default=None,
help="Optional identifier for the experiment when storing logs. Otherwise use current time.",
)
parser.add_argument(
"--workers", type=int, default=1, help="Number of dataloader workers per GPU."
)
parser.add_argument(
"--batch-size", type=int, default=64, help="Batch size per GPU."
)
parser.add_argument(
"--epochs", type=int, default=32, help="Number of epochs to train for."
)
parser.add_argument(
"--epochs-cooldown", type=int, default=None,
help="When scheduler w/ cooldown used, perform cooldown from total_epochs - cooldown_epochs onwards."
)
parser.add_argument("--lr", type=float, default=None, help="Learning rate.")
parser.add_argument("--beta1", type=float, default=None, help="Adam beta 1.")
parser.add_argument("--beta2", type=float, default=None, help="Adam beta 2.")
parser.add_argument("--eps", type=float, default=None, help="Adam epsilon.")
parser.add_argument("--wd", type=float, default=0.2, help="Weight decay.")
parser.add_argument(
"--warmup", type=int, default=10000, help="Number of steps to warmup for."
)
parser.add_argument(
"--use-bn-sync",
default=False,
action="store_true",
help="Whether to use batch norm sync.")
parser.add_argument(
"--skip-scheduler",
action="store_true",
default=False,
help="Use this flag to skip the learning rate decay.",
)
parser.add_argument(
"--lr-scheduler",
type=str,
default='cosine',
help="LR scheduler. One of: 'cosine', 'const' (constant), 'const-cooldown' (constant w/ cooldown). Default: cosine",
)
parser.add_argument(
"--lr-cooldown-end", type=float, default=0.0,
help="End learning rate for cooldown schedule. Default: 0"
)
parser.add_argument(
"--lr-cooldown-power", type=float, default=1.0,
help="Power for polynomial cooldown schedule. Default: 1.0 (linear decay)"
)
parser.add_argument(
"--save-frequency", type=int, default=1, help="How often to save checkpoints."
)
parser.add_argument(
"--save-most-recent",
action="store_true",
default=False,
help="Always save the most recent model trained to epoch_latest.pt.",
)
parser.add_argument(
"--zeroshot-frequency", type=int, default=2, help="How often to run zero shot."
)
parser.add_argument(
"--val-frequency", type=int, default=1, help="How often to run evaluation with val data."
)
parser.add_argument(
"--resume",
default=None,
type=str,
help="path to latest checkpoint (default: none)",
)
parser.add_argument(
"--precision",
choices=["amp", "amp_bf16", "amp_bfloat16", "bf16", "fp16", "fp32"],
default="amp",
help="Floating point precision."
)
parser.add_argument(
"--model",
type=str,
default="RN50",
help="Name of the vision backbone to use.",
)
parser.add_argument(
"--pretrained",
default='',
type=str,
help="Use a pretrained CLIP model weights with the specified tag or file path.",
)
parser.add_argument(
"--pretrained-image",
default=False,
action='store_true',
help="Load imagenet pretrained weights for image tower backbone if available.",
)
parser.add_argument(
"--lock-image",
default=False,
action='store_true',
help="Lock full image tower by disabling gradients.",
)
parser.add_argument(
"--lock-image-unlocked-groups",
type=int,
default=0,
help="Leave last n image tower layer groups unlocked.",
)
parser.add_argument(
"--lock-image-freeze-bn-stats",
default=False,
action='store_true',
help="Freeze BatchNorm running stats in image tower for any locked layers.",
)
parser.add_argument(
'--image-mean', type=float, nargs='+', default=None, metavar='MEAN',
help='Override default image mean value of dataset')
parser.add_argument(
'--image-std', type=float, nargs='+', default=None, metavar='STD',
help='Override default image std deviation of of dataset')
parser.add_argument('--aug-cfg', nargs='*', default={}, action=ParseKwargs)
parser.add_argument(
"--grad-checkpointing",
default=False,
action='store_true',
help="Enable gradient checkpointing.",
)
parser.add_argument(
"--local-loss",
default=False,
action="store_true",
help="calculate loss w/ local features @ global (instead of realizing full global @ global matrix)"
)
parser.add_argument(
"--gather-with-grad",
default=False,
action="store_true",
help="enable full distributed gradient for feature gather"
)
parser.add_argument(
'--force-image-size', type=int, nargs='+', default=None,
help='Override default image size'
)
parser.add_argument(
"--force-quick-gelu",
default=False,
action='store_true',
help="Force use of QuickGELU activation for non-OpenAI transformer models.",
)
parser.add_argument(
"--force-patch-dropout",
default=None,
type=float,
help="Override the patch dropout during training, for fine tuning with no dropout near the end as in the paper",
)
parser.add_argument(
"--force-custom-text",
default=False,
action='store_true',
help="Force use of CustomTextCLIP model (separate text-tower).",
)
parser.add_argument(
"--torchscript",
default=False,
action='store_true',
help="torch.jit.script the model, also uses jit version of OpenAI models if pretrained=='openai'",
)
parser.add_argument(
"--trace",
default=False,
action='store_true',
help="torch.jit.trace the model for inference / eval only",
)
parser.add_argument(
"--accum-freq", type=int, default=1, help="Update the model every --acum-freq steps."
)
# arguments for distributed training
parser.add_argument(
"--dist-url",
default="env://",
type=str,
help="url used to set up distributed training",
)
parser.add_argument(
"--dist-backend", default="nccl", type=str, help="distributed backend"
)
parser.add_argument(
"--report-to",
default='',
type=str,
help="Options are ['wandb', 'tensorboard', 'wandb,tensorboard']"
)
parser.add_argument(
"--wandb-notes",
default='',
type=str,
help="Notes if logging with wandb"
)
parser.add_argument(
"--wandb-project-name",
type=str,
default='open-clip',
help="Name of the project if logging with wandb.",
)
parser.add_argument(
"--debug",
default=False,
action="store_true",
help="If true, more information is logged."
)
parser.add_argument(
"--copy-codebase",
default=False,
action="store_true",
help="If true, we copy the entire base on the log directory, and execute from there."
)
parser.add_argument(
"--horovod",
default=False,
action="store_true",
help="Use horovod for distributed training."
)
parser.add_argument(
"--ddp-static-graph",
default=False,
action='store_true',
help="Enable static graph optimization for DDP in PyTorch >= 1.11.",
)
parser.add_argument(
"--no-set-device-rank",
default=False,
action="store_true",
help="Don't set device index from local rank (when CUDA_VISIBLE_DEVICES restricted to one per proc)."
)
parser.add_argument(
"--seed", type=int, default=0, help="Default random seed."
)
parser.add_argument(
"--grad-clip-norm", type=float, default=None, help="Gradient clip."
)
parser.add_argument(
"--lock-text",
default=False,
action='store_true',
help="Lock full text tower by disabling gradients.",
)
parser.add_argument(
"--lock-text-unlocked-layers",
type=int,
default=0,
help="Leave last n image tower layer groups unlocked.",
)
parser.add_argument(
"--lock-text-freeze-layer-norm",
default=False,
action='store_true',
help="Freeze BatchNorm running stats in image tower for any locked layers.",
)
parser.add_argument(
"--log-every-n-steps",
type=int,
default=100,
help="Log every n steps to tensorboard/console/wandb.",
)
parser.add_argument(
"--coca-caption-loss-weight",
type=float,
default=2.0,
help="Weight assigned to caption loss in CoCa."
)
parser.add_argument(
"--coca-contrastive-loss-weight",
type=float,
default=1.0,
help="Weight assigned to contrastive loss when training CoCa."
)
parser.add_argument(
"--remote-sync",
type=str,
default=None,
help="Optinoally sync with a remote path specified by this arg",
)
parser.add_argument(
"--remote-sync-frequency",
type=int,
default=300,
help="How frequently to sync to a remote directly if --remote-sync is not None.",
)
parser.add_argument(
"--remote-sync-protocol",
choices=["s3", "fsspec"],
default="s3",
help="How to do the remote sync backup if --remote-sync is not None.",
)
parser.add_argument(
"--delete-previous-checkpoint",
default=False,
action="store_true",
help="If true, delete previous checkpoint after storing a new one."
)
args = parser.parse_args(args)
# If some params are not passed, we use the default values based on model name.
default_params = get_default_params(args.model)
for name, val in default_params.items():
if getattr(args, name) is None:
setattr(args, name, val)
return args
|
open_clip-main
|
src/training/params.py
|
import argparse
import torch
import open_clip
import pandas as pd
from fvcore.nn import FlopCountAnalysis, flop_count_str, ActivationCountAnalysis
parser = argparse.ArgumentParser(description='OpenCLIP Profiler')
# benchmark specific args
parser.add_argument('--model', metavar='NAME', default='',
help='model(s) to profile')
parser.add_argument('--results-file', default='', type=str, metavar='FILENAME',
help='Output csv file for results')
def profile_fvcore(
model,
image_input_size=(3, 224, 224),
text_input_size=(77,),
batch_size=1,
detailed=False,
force_cpu=False
):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_image_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
example_text_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
fca = FlopCountAnalysis(model, (example_image_input, example_text_input))
aca = ActivationCountAnalysis(model, (example_image_input, example_text_input))
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
def profile_fvcore_text(
model,
text_input_size=(77,),
batch_size=1,
detailed=False,
force_cpu=False
):
if force_cpu:
model = model.to('cpu')
device = next(model.parameters()).device
example_input = torch.ones((batch_size,) + text_input_size, device=device, dtype=torch.int64)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
def profile_fvcore_image(
model,
image_input_size=(3, 224, 224),
batch_size=1,
detailed=False,
force_cpu=False
):
if force_cpu:
model = model.to('cpu')
device, dtype = next(model.parameters()).device, next(model.parameters()).dtype
example_input = torch.ones((batch_size,) + image_input_size, device=device, dtype=dtype)
fca = FlopCountAnalysis(model, example_input)
aca = ActivationCountAnalysis(model, example_input)
if detailed:
fcs = flop_count_str(fca)
print(fcs)
return fca.total(), aca.total()
def count_params(model):
return sum([m.numel() for m in model.parameters()])
def profile_model(model_name):
model = open_clip.create_model(model_name, force_custom_text=True, pretrained_hf=False)
model.eval()
if torch.cuda.is_available():
model = model.cuda()
if isinstance(model.visual.image_size, (tuple, list)):
image_input_size = (3,) + tuple(model.visual.image_size[-2:])
else:
image_input_size = (3, model.visual.image_size, model.visual.image_size)
text_input_size = (77,)
results = {}
results['model'] = model_name
results['image_size'] = image_input_size[1]
model_cfg = open_clip.get_model_config(model_name)
if model_cfg:
vision_cfg = open_clip.CLIPVisionCfg(**model_cfg['vision_cfg'])
text_cfg = open_clip.CLIPTextCfg(**model_cfg['text_cfg'])
results['image_width'] = int(vision_cfg.width)
results['text_width'] = int(text_cfg.width)
results['embed_dim'] = int(model_cfg['embed_dim'])
else:
results['image_width'] = 0
results['text_width'] = 0
results['embed_dim'] = 0
retries = 2
while retries:
retries -= 1
try:
macs, acts = profile_fvcore(
model, image_input_size=image_input_size, text_input_size=text_input_size, force_cpu=not retries)
image_macs, image_acts = profile_fvcore_image(
model.visual, image_input_size=image_input_size, force_cpu=not retries)
text_macs, text_acts = profile_fvcore_text(
model.text, text_input_size=text_input_size, force_cpu=not retries)
results['gmacs'] = round(macs / 1e9, 2)
results['macts'] = round(acts / 1e6, 2)
results['mparams'] = round(count_params(model) / 1e6, 2)
results['image_gmacs'] = round(image_macs / 1e9, 2)
results['image_macts'] = round(image_acts / 1e6, 2)
results['image_mparams'] = round(count_params(model.visual) / 1e6, 2)
results['text_gmacs'] = round(text_macs / 1e9, 2)
results['text_macts'] = round(text_acts / 1e6, 2)
results['text_mparams'] = round(count_params(model.text) / 1e6, 2)
except RuntimeError as e:
pass
return results
def main():
args = parser.parse_args()
# FIXME accept a text file name to allow lists of models in txt/csv
if args.model == 'all':
parsed_model = open_clip.list_models()
else:
parsed_model = args.model.split(',')
results = []
for m in parsed_model:
row = profile_model(m)
results.append(row)
df = pd.DataFrame(results, columns=results[0].keys())
df = df.sort_values('gmacs')
print(df)
if args.results_file:
df.to_csv(args.results_file, index=False)
if __name__ == '__main__':
main()
|
open_clip-main
|
src/training/profile.py
|
open_clip-main
|
src/training/__init__.py
|
|
import logging
def setup_logging(log_file, level, include_host=False):
if include_host:
import socket
hostname = socket.gethostname()
formatter = logging.Formatter(
f'%(asctime)s | {hostname} | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
else:
formatter = logging.Formatter('%(asctime)s | %(levelname)s | %(message)s', datefmt='%Y-%m-%d,%H:%M:%S')
logging.root.setLevel(level)
loggers = [logging.getLogger(name) for name in logging.root.manager.loggerDict]
for logger in loggers:
logger.setLevel(level)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logging.root.addHandler(stream_handler)
if log_file:
file_handler = logging.FileHandler(filename=log_file)
file_handler.setFormatter(formatter)
logging.root.addHandler(file_handler)
|
open_clip-main
|
src/training/logger.py
|
import torch
from contextlib import suppress
def get_autocast(precision):
if precision == 'amp':
return torch.cuda.amp.autocast
elif precision == 'amp_bfloat16' or precision == 'amp_bf16':
# amp_bfloat16 is more stable than amp float16 for clip training
return lambda: torch.cuda.amp.autocast(dtype=torch.bfloat16)
else:
return suppress
|
open_clip-main
|
src/training/precision.py
|
import os
import torch
import torch.distributed as dist
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def is_global_master(args):
return args.rank == 0
def is_local_master(args):
return args.local_rank == 0
def is_master(args, local=False):
return is_local_master(args) if local else is_global_master(args)
def is_using_horovod():
# NOTE w/ horovod run, OMPI vars should be set, but w/ SLURM PMI vars will be set
# Differentiating between horovod and DDP use via SLURM may not be possible, so horovod arg still required...
ompi_vars = ["OMPI_COMM_WORLD_RANK", "OMPI_COMM_WORLD_SIZE"]
pmi_vars = ["PMI_RANK", "PMI_SIZE"]
if all([var in os.environ for var in ompi_vars]) or all([var in os.environ for var in pmi_vars]):
return True
else:
return False
def is_using_distributed():
if 'WORLD_SIZE' in os.environ:
return int(os.environ['WORLD_SIZE']) > 1
if 'SLURM_NTASKS' in os.environ:
return int(os.environ['SLURM_NTASKS']) > 1
return False
def world_info_from_env():
local_rank = 0
for v in ('LOCAL_RANK', 'MPI_LOCALRANKID', 'SLURM_LOCALID', 'OMPI_COMM_WORLD_LOCAL_RANK'):
if v in os.environ:
local_rank = int(os.environ[v])
break
global_rank = 0
for v in ('RANK', 'PMI_RANK', 'SLURM_PROCID', 'OMPI_COMM_WORLD_RANK'):
if v in os.environ:
global_rank = int(os.environ[v])
break
world_size = 1
for v in ('WORLD_SIZE', 'PMI_SIZE', 'SLURM_NTASKS', 'OMPI_COMM_WORLD_SIZE'):
if v in os.environ:
world_size = int(os.environ[v])
break
return local_rank, global_rank, world_size
def init_distributed_device(args):
# Distributed training = training on more than one GPU.
# Works in both single and multi-node scenarios.
args.distributed = False
args.world_size = 1
args.rank = 0 # global rank
args.local_rank = 0
if args.horovod:
assert hvd is not None, "Horovod is not installed"
hvd.init()
args.local_rank = int(hvd.local_rank())
args.rank = hvd.rank()
args.world_size = hvd.size()
args.distributed = True
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
elif is_using_distributed():
if 'SLURM_PROCID' in os.environ:
# DDP via SLURM
args.local_rank, args.rank, args.world_size = world_info_from_env()
# SLURM var -> torch.distributed vars in case needed
os.environ['LOCAL_RANK'] = str(args.local_rank)
os.environ['RANK'] = str(args.rank)
os.environ['WORLD_SIZE'] = str(args.world_size)
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url,
world_size=args.world_size,
rank=args.rank,
)
else:
# DDP via torchrun, torch.distributed.launch
args.local_rank, _, _ = world_info_from_env()
torch.distributed.init_process_group(
backend=args.dist_backend,
init_method=args.dist_url)
args.world_size = torch.distributed.get_world_size()
args.rank = torch.distributed.get_rank()
args.distributed = True
if torch.cuda.is_available():
if args.distributed and not args.no_set_device_rank:
device = 'cuda:%d' % args.local_rank
else:
device = 'cuda:0'
torch.cuda.set_device(device)
else:
device = 'cpu'
args.device = device
device = torch.device(device)
return device
def broadcast_object(args, obj, src=0):
# broadcast a pickle-able python object from rank-0 to all ranks
if args.horovod:
return hvd.broadcast_object(obj, root_rank=src)
else:
if args.rank == src:
objects = [obj]
else:
objects = [None]
dist.broadcast_object_list(objects, src=src)
return objects[0]
def all_gather_object(args, obj, dst=0):
# gather a pickle-able python object across all ranks
if args.horovod:
return hvd.allgather_object(obj)
else:
objects = [None for _ in range(args.world_size)]
dist.all_gather_object(objects, obj)
return objects
|
open_clip-main
|
src/training/distributed.py
|
import json
import logging
import math
import os
import time
import numpy as np
import torch
import torch.nn.functional as F
from torch.nn.parallel.distributed import DistributedDataParallel
try:
import wandb
except ImportError:
wandb = None
from open_clip import get_cast_dtype, CLIP, CustomTextCLIP
from .distributed import is_master
from .zero_shot import zero_shot_eval
from .precision import get_autocast
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def postprocess_clip_output(model_out):
return {
"image_features": model_out[0],
"text_features": model_out[1],
"logit_scale": model_out[2]
}
def unwrap_model(model):
if hasattr(model, 'module'):
return model.module
else:
return model
def backward(total_loss, scaler):
if scaler is not None:
scaler.scale(total_loss).backward()
else:
total_loss.backward()
def train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, args, tb_writer=None):
device = torch.device(args.device)
autocast = get_autocast(args.precision)
cast_dtype = get_cast_dtype(args.precision)
model.train()
data['train'].set_epoch(epoch) # set epoch in process safe manner via sampler or shared_epoch
dataloader = data['train'].dataloader
num_batches_per_epoch = dataloader.num_batches // args.accum_freq
sample_digits = math.ceil(math.log(dataloader.num_samples + 1, 10))
if args.accum_freq > 1:
accum_images, accum_texts, accum_features = [], [], {}
losses_m = {}
batch_time_m = AverageMeter()
data_time_m = AverageMeter()
end = time.time()
for i, batch in enumerate(dataloader):
i_accum = i // args.accum_freq
step = num_batches_per_epoch * epoch + i_accum
if not args.skip_scheduler:
scheduler(step)
images, texts = batch
images = images.to(device=device, dtype=cast_dtype, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
data_time_m.update(time.time() - end)
optimizer.zero_grad()
if args.accum_freq == 1:
with autocast():
model_out = model(images, texts)
logit_scale = model_out["logit_scale"]
losses = loss(**model_out, output_dict=True)
total_loss = sum(losses.values())
losses["loss"] = total_loss
backward(total_loss, scaler)
else:
# First, cache the features without any gradient tracking.
with torch.no_grad():
with autocast():
model_out = model(images, texts)
model_out.pop("logit_scale")
for key, val in model_out.items():
if key in accum_features:
accum_features[key].append(val)
else:
accum_features[key] = [val]
accum_images.append(images)
accum_texts.append(texts)
# If (i + 1) % accum_freq is not zero, move on to the next batch.
if ((i + 1) % args.accum_freq) > 0:
# FIXME this makes data time logging unreliable when accumulating
continue
# Now, ready to take gradients for the last accum_freq batches.
# Re-do the forward pass for those batches, and use the cached features from the other batches as negatives.
# Call backwards each time, but only step optimizer at the end.
optimizer.zero_grad()
for j in range(args.accum_freq):
images = accum_images[j]
texts = accum_texts[j]
with autocast():
model_out = model(images, texts, output_dict=True)
logit_scale = model_out.pop("logit_scale")
for key, val in accum_features:
accumulated = accum_features[key]
accumulated = accumulated[:j] + [model_out[key]] + accumulated[j + 1:]
losses = loss(**accumulated, logit_scale=logit_scale, output_dict=True)
total_loss = sum(losses.values())
losses["loss"] = total_loss
backward(total_loss, scaler)
if scaler is not None:
if args.horovod:
optimizer.synchronize()
scaler.unscale_(optimizer)
if args.grad_clip_norm is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
with optimizer.skip_synchronize():
scaler.step(optimizer)
else:
if args.grad_clip_norm is not None:
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
scaler.step(optimizer)
scaler.update()
else:
if args.grad_clip_norm is not None:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.grad_clip_norm, norm_type=2.0)
optimizer.step()
# reset gradient accum, if enabled
if args.accum_freq > 1:
accum_images, accum_texts, accum_features = [], [], {}
# Note: we clamp to 4.6052 = ln(100), as in the original paper.
with torch.no_grad():
unwrap_model(model).logit_scale.clamp_(0, math.log(100))
batch_time_m.update(time.time() - end)
end = time.time()
batch_count = i_accum + 1
if is_master(args) and (i_accum % args.log_every_n_steps == 0 or batch_count == num_batches_per_epoch):
batch_size = len(images)
num_samples = batch_count * batch_size * args.accum_freq * args.world_size
samples_per_epoch = dataloader.num_samples
percent_complete = 100.0 * batch_count / num_batches_per_epoch
# NOTE loss is coarsely sampled, just master node and per log update
for key, val in losses.items():
if key not in losses_m:
losses_m[key] = AverageMeter()
losses_m[key].update(val.item(), batch_size)
logit_scale_scalar = logit_scale.item()
loss_log = " ".join(
[
f"{loss_name.capitalize()}: {loss_m.val:#.5g} ({loss_m.avg:#.5g})"
for loss_name, loss_m in losses_m.items()
]
)
logging.info(
f"Train Epoch: {epoch} [{num_samples:>{sample_digits}}/{samples_per_epoch} ({percent_complete:.0f}%)] "
f"Data (t): {data_time_m.avg:.3f} "
f"Batch (t): {batch_time_m.avg:.3f}, {args.accum_freq * args.batch_size * args.world_size / batch_time_m.val:#g}/s "
f"LR: {optimizer.param_groups[0]['lr']:5f} "
f"Logit Scale: {logit_scale_scalar:.3f} " + loss_log
)
# Save train loss / etc. Using non avg meter values as loggers have their own smoothing
log_data = {
"data_time": data_time_m.val,
"batch_time": batch_time_m.val,
"samples_per_second": args.accum_freq * args.batch_size * args.world_size / batch_time_m.val,
"scale": logit_scale_scalar,
"lr": optimizer.param_groups[0]["lr"]
}
log_data.update({name:val.val for name,val in losses_m.items()})
for name, val in log_data.items():
name = "train/" + name
if tb_writer is not None:
tb_writer.add_scalar(name, val, step)
if args.wandb:
assert wandb is not None, 'Please install wandb.'
wandb.log({name: val, 'step': step})
# resetting batch / data time meters per log window
batch_time_m.reset()
data_time_m.reset()
# end for
def evaluate(model, data, epoch, args, tb_writer=None):
metrics = {}
if not is_master(args):
return metrics
device = torch.device(args.device)
model.eval()
zero_shot_metrics = zero_shot_eval(model, data, epoch, args)
metrics.update(zero_shot_metrics)
autocast = get_autocast(args.precision)
cast_dtype = get_cast_dtype(args.precision)
if 'val' in data and (args.val_frequency and ((epoch % args.val_frequency) == 0 or epoch == args.epochs)):
dataloader = data['val'].dataloader
num_samples = 0
samples_per_val = dataloader.num_samples
# FIXME this does not scale past small eval datasets
# all_image_features @ all_text_features will blow up memory and compute very quickly
cumulative_loss = 0.0
cumulative_gen_loss = 0.0
all_image_features, all_text_features = [], []
with torch.no_grad():
for i, batch in enumerate(dataloader):
images, texts = batch
images = images.to(device=device, dtype=cast_dtype, non_blocking=True)
texts = texts.to(device=device, non_blocking=True)
with autocast():
model_out = model(images, texts, output_dict=True)
image_features = model_out["image_features"]
text_features = model_out["text_features"]
logit_scale = model_out["logit_scale"]
# features are accumulated in CPU tensors, otherwise GPU memory exhausted quickly
# however, system RAM is easily exceeded and compute time becomes problematic
all_image_features.append(image_features.cpu())
all_text_features.append(text_features.cpu())
logit_scale = logit_scale.mean()
logits_per_image = logit_scale * image_features @ text_features.t()
logits_per_text = logits_per_image.t()
batch_size = images.shape[0]
labels = torch.arange(batch_size, device=device).long()
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
gen_loss = maybe_compute_generative_loss(model_out)
cumulative_loss += total_loss * batch_size
num_samples += batch_size
if is_master(args) and (i % 100) == 0:
logging.info(
f"Eval Epoch: {epoch} [{num_samples} / {samples_per_val}]\t"
f"Clip Loss: {cumulative_loss / num_samples:.6f}\t")
if gen_loss is not None:
cumulative_gen_loss += gen_loss * batch_size
logging.info(
f"Generative Loss: {cumulative_gen_loss / num_samples:.6f}\t")
val_metrics = get_clip_metrics(
image_features=torch.cat(all_image_features),
text_features=torch.cat(all_text_features),
logit_scale=logit_scale.cpu(),
)
loss = cumulative_loss / num_samples
metrics.update(
{**val_metrics, "clip_val_loss": loss.item(), "epoch": epoch, "num_samples": num_samples}
)
if gen_loss is not None:
gen_loss = cumulative_gen_loss / num_samples
metrics.update({"val_generative_loss": gen_loss.item()})
if not metrics:
return metrics
logging.info(
f"Eval Epoch: {epoch} "
+ "\t".join([f"{k}: {round(v, 4):.4f}" for k, v in metrics.items()])
)
if args.save_logs:
for name, val in metrics.items():
if tb_writer is not None:
tb_writer.add_scalar(f"val/{name}", val, epoch)
with open(os.path.join(args.checkpoint_path, "results.jsonl"), "a+") as f:
f.write(json.dumps(metrics))
f.write("\n")
if args.wandb:
assert wandb is not None, 'Please install wandb.'
for name, val in metrics.items():
wandb.log({f"val/{name}": val, 'epoch': epoch})
return metrics
def get_clip_metrics(image_features, text_features, logit_scale):
metrics = {}
logits_per_image = (logit_scale * image_features @ text_features.t()).detach().cpu()
logits_per_text = logits_per_image.t().detach().cpu()
logits = {"image_to_text": logits_per_image, "text_to_image": logits_per_text}
ground_truth = torch.arange(len(text_features)).view(-1, 1)
for name, logit in logits.items():
ranking = torch.argsort(logit, descending=True)
preds = torch.where(ranking == ground_truth)[1]
preds = preds.detach().cpu().numpy()
metrics[f"{name}_mean_rank"] = preds.mean() + 1
metrics[f"{name}_median_rank"] = np.floor(np.median(preds)) + 1
for k in [1, 5, 10]:
metrics[f"{name}_R@{k}"] = np.mean(preds < k)
return metrics
def maybe_compute_generative_loss(model_out):
if "logits" in model_out and "labels" in model_out:
token_logits = model_out["logits"]
token_labels = model_out["labels"]
return F.cross_entropy(token_logits.permute(0, 2, 1), token_labels)
|
open_clip-main
|
src/training/train.py
|
import logging
import torch
import torch.nn.functional as F
from tqdm import tqdm
from open_clip import get_cast_dtype, get_tokenizer
from .precision import get_autocast
from .imagenet_zeroshot_data import imagenet_classnames, openai_imagenet_template
def zero_shot_classifier(model, classnames, templates, args):
tokenizer = get_tokenizer(args.model)
with torch.no_grad():
zeroshot_weights = []
for classname in tqdm(classnames):
texts = [template(classname) for template in templates] # format with class
texts = tokenizer(texts).to(args.device) # tokenize
if args.distributed and not args.horovod:
class_embeddings = model.module.encode_text(texts)
else:
class_embeddings = model.encode_text(texts)
class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0)
class_embedding /= class_embedding.norm()
zeroshot_weights.append(class_embedding)
zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device)
return zeroshot_weights
def accuracy(output, target, topk=(1,)):
pred = output.topk(max(topk), 1, True, True)[1].t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
return [float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) for k in topk]
def run(model, classifier, dataloader, args):
autocast = get_autocast(args.precision)
cast_dtype = get_cast_dtype(args.precision)
with torch.no_grad():
top1, top5, n = 0., 0., 0.
for images, target in tqdm(dataloader, unit_scale=args.batch_size):
images = images.to(args.device)
if cast_dtype is not None:
images = images.to(dtype=cast_dtype)
target = target.to(args.device)
with autocast():
# predict
if args.distributed and not args.horovod:
image_features = model.module.encode_image(images)
else:
image_features = model.encode_image(images)
image_features = F.normalize(image_features, dim=-1)
logits = 100. * image_features @ classifier
# measure accuracy
acc1, acc5 = accuracy(logits, target, topk=(1, 5))
top1 += acc1
top5 += acc5
n += images.size(0)
top1 = (top1 / n)
top5 = (top5 / n)
return top1, top5
def zero_shot_eval(model, data, epoch, args):
if 'imagenet-val' not in data and 'imagenet-v2' not in data:
return {}
if args.zeroshot_frequency == 0:
return {}
if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs:
return {}
logging.info('Starting zero-shot imagenet.')
logging.info('Building zero-shot classifier')
classifier = zero_shot_classifier(model, imagenet_classnames, openai_imagenet_template, args)
logging.info('Using classifier')
results = {}
if 'imagenet-val' in data:
top1, top5 = run(model, classifier, data['imagenet-val'].dataloader, args)
results['imagenet-zeroshot-val-top1'] = top1
results['imagenet-zeroshot-val-top5'] = top5
if 'imagenet-v2' in data:
top1, top5 = run(model, classifier, data['imagenet-v2'].dataloader, args)
results['imagenetv2-zeroshot-val-top1'] = top1
results['imagenetv2-zeroshot-val-top5'] = top5
logging.info('Finished zero-shot imagenet.')
return results
|
open_clip-main
|
src/training/zero_shot.py
|
import logging
import os
import multiprocessing
import subprocess
import time
import fsspec
import torch
from tqdm import tqdm
def remote_sync_s3(local_dir, remote_dir):
# skip epoch_latest which can change during sync.
result = subprocess.run(["aws", "s3", "sync", local_dir, remote_dir, '--exclude', '*epoch_latest.pt'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if result.returncode != 0:
logging.error(f"Error: Failed to sync with S3 bucket {result.stderr.decode('utf-8')}")
return False
logging.info(f"Successfully synced with S3 bucket")
return True
def remote_sync_fsspec(local_dir, remote_dir):
# FIXME currently this is slow and not recommended. Look into speeding up.
a = fsspec.get_mapper(local_dir)
b = fsspec.get_mapper(remote_dir)
for k in a:
# skip epoch_latest which can change during sync.
if 'epoch_latest.pt' in k:
continue
logging.info(f'Attempting to sync {k}')
if k in b and len(a[k]) == len(b[k]):
logging.debug(f'Skipping remote sync for {k}.')
continue
try:
logging.info(f'Successful sync for {k}.')
b[k] = a[k]
except Exception as e:
logging.info(f'Error during remote sync for {k}: {e}')
return False
return True
def remote_sync(local_dir, remote_dir, protocol):
logging.info('Starting remote sync.')
if protocol == 's3':
return remote_sync_s3(local_dir, remote_dir)
elif protocol == 'fsspec':
return remote_sync_fsspec(local_dir, remote_dir)
else:
logging.error('Remote protocol not known')
return False
def keep_running_remote_sync(sync_every, local_dir, remote_dir, protocol):
while True:
time.sleep(sync_every)
remote_sync(local_dir, remote_dir, protocol)
def start_sync_process(sync_every, local_dir, remote_dir, protocol):
p = multiprocessing.Process(target=keep_running_remote_sync, args=(sync_every, local_dir, remote_dir, protocol))
return p
# Note: we are not currently using this save function.
def pt_save(pt_obj, file_path):
of = fsspec.open(file_path, "wb")
with of as f:
torch.save(pt_obj, file_path)
def pt_load(file_path, map_location=None):
if not file_path.startswith('/'):
logging.info('Loading remote checkpoint, which may take a bit.')
of = fsspec.open(file_path, "rb")
with of as f:
out = torch.load(f, map_location=map_location)
return out
def check_exists(file_path):
try:
with fsspec.open(file_path):
pass
except FileNotFoundError:
return False
return True
|
open_clip-main
|
src/training/file_utils.py
|
import numpy as np
def assign_learning_rate(optimizer, new_lr):
for param_group in optimizer.param_groups:
param_group["lr"] = new_lr
def _warmup_lr(base_lr, warmup_length, step):
return base_lr * (step + 1) / warmup_length
def const_lr(optimizer, base_lr, warmup_length, steps):
def _lr_adjuster(step):
if step < warmup_length:
lr = _warmup_lr(base_lr, warmup_length, step)
else:
lr = base_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def const_lr_cooldown(optimizer, base_lr, warmup_length, steps, cooldown_steps, cooldown_power=1.0, cooldown_end_lr=0.):
def _lr_adjuster(step):
start_cooldown_step = steps - cooldown_steps
if step < warmup_length:
lr = _warmup_lr(base_lr, warmup_length, step)
else:
if step < start_cooldown_step:
lr = base_lr
else:
e = step - start_cooldown_step
es = steps - start_cooldown_step
# linear decay if power == 1; polynomial decay otherwise;
decay = (1 - (e/es)) ** cooldown_power
lr = decay * (base_lr - cooldown_end_lr) + cooldown_end_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
def cosine_lr(optimizer, base_lr, warmup_length, steps):
def _lr_adjuster(step):
if step < warmup_length:
lr = _warmup_lr(base_lr, warmup_length, step)
else:
e = step - warmup_length
es = steps - warmup_length
lr = 0.5 * (1 + np.cos(np.pi * e / es)) * base_lr
assign_learning_rate(optimizer, lr)
return lr
return _lr_adjuster
|
open_clip-main
|
src/training/scheduler.py
|
import glob
import logging
import os
import re
import subprocess
import sys
import random
from datetime import datetime
import numpy as np
import torch
from torch import optim
from torch.cuda.amp import GradScaler
try:
import wandb
except ImportError:
wandb = None
try:
import torch.utils.tensorboard as tensorboard
except ImportError:
tensorboard = None
try:
import horovod.torch as hvd
except ImportError:
hvd = None
from open_clip import create_model_and_transforms, trace_model, get_tokenizer, create_loss
from training.data import get_data
from training.distributed import is_master, init_distributed_device, broadcast_object
from training.logger import setup_logging
from training.params import parse_args
from training.scheduler import cosine_lr, const_lr, const_lr_cooldown
from training.train import train_one_epoch, evaluate
from training.file_utils import pt_load, check_exists, start_sync_process, remote_sync
LATEST_CHECKPOINT_NAME = "epoch_latest.pt"
def random_seed(seed=42, rank=0):
torch.manual_seed(seed + rank)
np.random.seed(seed + rank)
random.seed(seed + rank)
def natural_key(string_):
"""See http://www.codinghorror.com/blog/archives/001018.html"""
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def get_latest_checkpoint(path: str, remote : bool):
# as writen, this glob recurses, so can pick up checkpoints across multiple sub-folders
if remote:
result = subprocess.run(["aws", "s3", "ls", path + "/"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
print(result)
if result.returncode == 1:
return None
checkpoints = [os.path.join(path, x.split(' ')[-1]) for x in result.stdout.decode().split('\n')[:-1]]
else:
checkpoints = glob.glob(path + '**/*.pt', recursive=True)
if checkpoints:
checkpoints = sorted(checkpoints, key=natural_key)
return checkpoints[-1]
return None
def main(args):
args = parse_args(args)
if torch.cuda.is_available():
# This enables tf32 on Ampere GPUs which is only 8% slower than
# float16 and almost as accurate as float32
# This was a default in pytorch until 1.12
torch.backends.cuda.matmul.allow_tf32 = True
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = False
# fully initialize distributed device environment
device = init_distributed_device(args)
# get the name of the experiments
if args.name is None:
# sanitize model name for filesystem / uri use, easier if we don't use / in name as a rule?
model_name_safe = args.model.replace('/', '-')
date_str = datetime.now().strftime("%Y_%m_%d-%H_%M_%S")
if args.distributed:
# sync date_str from master to all ranks
date_str = broadcast_object(args, date_str)
args.name = '-'.join([
date_str,
f"model_{model_name_safe}",
f"lr_{args.lr}",
f"b_{args.batch_size}",
f"j_{args.workers}",
f"p_{args.precision}",
])
resume_latest = args.resume == 'latest'
log_base_path = os.path.join(args.logs, args.name)
args.log_path = None
if is_master(args, local=args.log_local):
os.makedirs(log_base_path, exist_ok=True)
log_filename = f'out-{args.rank}' if args.log_local else 'out.log'
args.log_path = os.path.join(log_base_path, log_filename)
if os.path.exists(args.log_path) and not resume_latest:
print(
"Error. Experiment already exists. Use --name {} to specify a new experiment."
)
return -1
# Setup text logger
args.log_level = logging.DEBUG if args.debug else logging.INFO
setup_logging(args.log_path, args.log_level)
# Setup wandb, tensorboard, checkpoint logging
args.wandb = 'wandb' in args.report_to or 'all' in args.report_to
args.tensorboard = 'tensorboard' in args.report_to or 'all' in args.report_to
args.checkpoint_path = os.path.join(log_base_path, "checkpoints")
if is_master(args):
args.tensorboard_path = os.path.join(log_base_path, "tensorboard") if args.tensorboard else ''
for dirname in [args.tensorboard_path, args.checkpoint_path]:
if dirname:
os.makedirs(dirname, exist_ok=True)
else:
args.tensorboard_path = ''
if resume_latest:
resume_from = None
checkpoint_path = args.checkpoint_path
# If using remote_sync, need to check the remote instead of the local checkpoints folder.
if args.remote_sync is not None:
checkpoint_path = os.path.join(args.remote_sync, args.name, "checkpoints")
if args.save_most_recent:
print('Error. Cannot use save-most-recent with remote_sync and resume latest.')
return -1
if args.remote_sync_protocol != 's3':
print('Error. Sync protocol not supported when using resume latest.')
return -1
if is_master(args):
# Checking for existing checkpoint via master rank only. It is possible for
# different rank processes to see different files if a shared file-system is under
# stress, however it's very difficult to fully work around such situations.
if args.save_most_recent:
# if --save-most-recent flag is set, look for latest at a fixed filename
resume_from = os.path.join(checkpoint_path, LATEST_CHECKPOINT_NAME)
if not os.path.exists(resume_from):
# If no latest checkpoint has been saved yet, don't try to resume
resume_from = None
else:
# otherwise, list checkpoint dir contents and pick the newest checkpoint
resume_from = get_latest_checkpoint(checkpoint_path, remote=args.remote_sync is not None)
if resume_from:
logging.info(f'Found latest resume checkpoint at {resume_from}.')
else:
logging.info(f'No latest resume checkpoint found in {checkpoint_path}.')
if args.distributed:
# sync found checkpoint path to all ranks
resume_from = broadcast_object(args, resume_from)
args.resume = resume_from
if args.copy_codebase:
copy_codebase(args)
# start the sync proces if remote-sync is not None
remote_sync_process = None
if is_master(args) and args.remote_sync is not None:
# first make sure it works
result = remote_sync(
os.path.join(args.logs, args.name),
os.path.join(args.remote_sync, args.name),
args.remote_sync_protocol
)
if result:
logging.info('remote sync successful.')
else:
logging.info('Error: remote sync failed. Exiting.')
return -1
# if all looks good, start a process to do this every args.remote_sync_frequency seconds
remote_sync_process = start_sync_process(
args.remote_sync_frequency,
os.path.join(args.logs, args.name),
os.path.join(args.remote_sync, args.name),
args.remote_sync_protocol
)
remote_sync_process.start()
if args.precision == 'fp16':
logging.warning(
'It is recommended to use AMP mixed-precision instead of FP16. '
'FP16 support needs further verification and tuning, especially for train.')
if args.horovod:
logging.info(
f'Running in horovod mode with multiple processes / nodes. Device: {args.device}.'
f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
elif args.distributed:
logging.info(
f'Running in distributed mode with multiple processes. Device: {args.device}.'
f'Process (global: {args.rank}, local {args.local_rank}), total {args.world_size}.')
else:
logging.info(f'Running with a single process. Device {args.device}.')
if isinstance(args.force_image_size, (tuple, list)) and len(args.force_image_size) == 1:
# arg is nargs, single (square) image size list -> int
args.force_image_size = args.force_image_size[0]
random_seed(args.seed, 0)
model, preprocess_train, preprocess_val = create_model_and_transforms(
args.model,
args.pretrained,
precision=args.precision,
device=device,
jit=args.torchscript,
force_quick_gelu=args.force_quick_gelu,
force_custom_text=args.force_custom_text,
force_patch_dropout=args.force_patch_dropout,
force_image_size=args.force_image_size,
pretrained_image=args.pretrained_image,
image_mean=args.image_mean,
image_std=args.image_std,
aug_cfg=args.aug_cfg,
output_dict=True,
)
random_seed(args.seed, args.rank)
if args.trace:
model = trace_model(model, batch_size=args.batch_size, device=device)
if args.lock_image:
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
model.lock_image_tower(
unlocked_groups=args.lock_image_unlocked_groups,
freeze_bn_stats=args.lock_image_freeze_bn_stats)
if args.lock_text:
model.lock_text_tower(
unlocked_layers=args.lock_text_unlocked_layers,
freeze_layer_norm=args.lock_text_freeze_layer_norm)
if args.grad_checkpointing:
model.set_grad_checkpointing()
if is_master(args):
logging.info("Model:")
logging.info(f"{str(model)}")
logging.info("Params:")
params_file = os.path.join(args.logs, args.name, "params.txt")
with open(params_file, "w") as f:
for name in sorted(vars(args)):
val = getattr(args, name)
logging.info(f" {name}: {val}")
f.write(f"{name}: {val}\n")
if args.distributed and not args.horovod:
if args.use_bn_sync:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
ddp_args = {}
if args.ddp_static_graph:
# this doesn't exist in older PyTorch, arg only added if enabled
ddp_args['static_graph'] = True
model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[device], **ddp_args)
# create optimizer and scaler
optimizer = None
scaler = None
if args.train_data or args.dataset_type == "synthetic":
assert not args.trace, 'Cannot train with traced model'
exclude = lambda n, p: p.ndim < 2 or "bn" in n or "ln" in n or "bias" in n or 'logit_scale' in n
include = lambda n, p: not exclude(n, p)
named_parameters = list(model.named_parameters())
gain_or_bias_params = [p for n, p in named_parameters if exclude(n, p) and p.requires_grad]
rest_params = [p for n, p in named_parameters if include(n, p) and p.requires_grad]
optimizer = optim.AdamW(
[
{"params": gain_or_bias_params, "weight_decay": 0.},
{"params": rest_params, "weight_decay": args.wd},
],
lr=args.lr,
betas=(args.beta1, args.beta2),
eps=args.eps,
)
if args.horovod:
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
scaler = GradScaler() if args.precision == "amp" else None
# optionally resume from a checkpoint
start_epoch = 0
if args.resume is not None:
checkpoint = pt_load(args.resume, map_location='cpu')
if 'epoch' in checkpoint:
# resuming a train checkpoint w/ epoch and optimizer state
start_epoch = checkpoint["epoch"]
sd = checkpoint["state_dict"]
if not args.distributed and next(iter(sd.items()))[0].startswith('module'):
sd = {k[len('module.'):]: v for k, v in sd.items()}
model.load_state_dict(sd)
if optimizer is not None:
optimizer.load_state_dict(checkpoint["optimizer"])
if scaler is not None and 'scaler' in checkpoint:
scaler.load_state_dict(checkpoint['scaler'])
logging.info(f"=> resuming checkpoint '{args.resume}' (epoch {start_epoch})")
else:
# loading a bare (model only) checkpoint for fine-tune or evaluation
model.load_state_dict(checkpoint)
logging.info(f"=> loaded checkpoint '{args.resume}' (epoch {start_epoch})")
# initialize datasets
data = get_data(args, (preprocess_train, preprocess_val), epoch=start_epoch, tokenizer=get_tokenizer(args.model))
assert len(data), 'At least one train or eval dataset must be specified.'
# create scheduler if train
scheduler = None
if 'train' in data and optimizer is not None:
total_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs
if args.lr_scheduler == "cosine":
scheduler = cosine_lr(optimizer, args.lr, args.warmup, total_steps)
elif args.lr_scheduler == "const":
scheduler = const_lr(optimizer, args.lr, args.warmup, total_steps)
elif args.lr_scheduler == "const-cooldown":
assert args.epochs_cooldown is not None,\
"Please specify the number of cooldown epochs for this lr schedule."
cooldown_steps = (data["train"].dataloader.num_batches // args.accum_freq) * args.epochs_cooldown
scheduler = const_lr_cooldown(
optimizer, args.lr, args.warmup, total_steps,
cooldown_steps, args.lr_cooldown_power, args.lr_cooldown_end)
else:
logging.error(
f'Unknown scheduler, {args.lr_scheduler}. Available options are: cosine, const, const-cooldown.')
exit(1)
# determine if this worker should save logs and checkpoints. only do so if it is rank == 0
args.save_logs = args.logs and args.logs.lower() != 'none' and is_master(args)
writer = None
if args.save_logs and args.tensorboard:
assert tensorboard is not None, "Please install tensorboard."
writer = tensorboard.SummaryWriter(args.tensorboard_path)
if args.wandb and is_master(args):
assert wandb is not None, 'Please install wandb.'
logging.debug('Starting wandb.')
args.train_sz = data["train"].dataloader.num_samples
if args.val_data is not None:
args.val_sz = data["val"].dataloader.num_samples
# you will have to configure this for your project!
wandb.init(
project=args.wandb_project_name,
name=args.name,
id=args.name,
notes=args.wandb_notes,
tags=[],
resume='auto' if args.resume == "latest" else None,
config=vars(args),
)
if args.debug:
wandb.watch(model, log='all')
wandb.save(params_file)
logging.debug('Finished loading wandb.')
if 'train' not in data:
evaluate(model, data, start_epoch, args, writer)
return
loss = create_loss(args)
for epoch in range(start_epoch, args.epochs):
if is_master(args):
logging.info(f'Start epoch {epoch}')
train_one_epoch(model, data, loss, epoch, optimizer, scaler, scheduler, args, tb_writer=writer)
completed_epoch = epoch + 1
if any(v in data for v in ('val', 'imagenet-val', 'imagenet-v2')):
evaluate(model, data, completed_epoch, args, writer)
# Saving checkpoints.
if args.save_logs:
checkpoint_dict = {
"epoch": completed_epoch,
"name": args.name,
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}
if scaler is not None:
checkpoint_dict["scaler"] = scaler.state_dict()
if completed_epoch == args.epochs or (
args.save_frequency > 0 and (completed_epoch % args.save_frequency) == 0
):
torch.save(
checkpoint_dict,
os.path.join(args.checkpoint_path, f"epoch_{completed_epoch}.pt"),
)
if args.delete_previous_checkpoint:
previous_checkpoint = os.path.join(args.checkpoint_path, f"epoch_{completed_epoch - 1}.pt")
if os.path.exists(previous_checkpoint):
os.remove(previous_checkpoint)
if args.save_most_recent:
# try not to corrupt the latest checkpoint if save fails
tmp_save_path = os.path.join(args.checkpoint_path, "tmp.pt")
latest_save_path = os.path.join(args.checkpoint_path, LATEST_CHECKPOINT_NAME)
torch.save(checkpoint_dict, tmp_save_path)
os.replace(tmp_save_path, latest_save_path)
if args.wandb and is_master(args):
wandb.finish()
# run a final sync.
if remote_sync_process is not None:
logging.info('Final remote sync.')
remote_sync_process.terminate()
result = remote_sync(
os.path.join(args.logs, args.name),
os.path.join(args.remote_sync, args.name),
args.remote_sync_protocol
)
if result:
logging.info('Final remote sync successful.')
else:
logging.info('Final remote sync failed.')
def copy_codebase(args):
from shutil import copytree, ignore_patterns
new_code_path = os.path.join(args.logs, args.name, "code")
if os.path.exists(new_code_path):
print(
f"Error. Experiment already exists at {new_code_path}. Use --name to specify a new experiment."
)
return -1
print(f"Copying codebase to {new_code_path}")
current_code_path = os.path.realpath(__file__)
for _ in range(3):
current_code_path = os.path.dirname(current_code_path)
copytree(current_code_path, new_code_path, ignore=ignore_patterns('log', 'logs', 'wandb'))
print("Done copying code.")
return 1
if __name__ == "__main__":
main(sys.argv[1:])
|
open_clip-main
|
src/training/main.py
|
imagenet_classnames = ["tench", "goldfish", "great white shark", "tiger shark", "hammerhead shark", "electric ray",
"stingray", "rooster", "hen", "ostrich", "brambling", "goldfinch", "house finch", "junco",
"indigo bunting", "American robin", "bulbul", "jay", "magpie", "chickadee", "American dipper",
"kite (bird of prey)", "bald eagle", "vulture", "great grey owl", "fire salamander",
"smooth newt", "newt", "spotted salamander", "axolotl", "American bullfrog", "tree frog",
"tailed frog", "loggerhead sea turtle", "leatherback sea turtle", "mud turtle", "terrapin",
"box turtle", "banded gecko", "green iguana", "Carolina anole",
"desert grassland whiptail lizard", "agama", "frilled-necked lizard", "alligator lizard",
"Gila monster", "European green lizard", "chameleon", "Komodo dragon", "Nile crocodile",
"American alligator", "triceratops", "worm snake", "ring-necked snake",
"eastern hog-nosed snake", "smooth green snake", "kingsnake", "garter snake", "water snake",
"vine snake", "night snake", "boa constrictor", "African rock python", "Indian cobra",
"green mamba", "sea snake", "Saharan horned viper", "eastern diamondback rattlesnake",
"sidewinder rattlesnake", "trilobite", "harvestman", "scorpion", "yellow garden spider",
"barn spider", "European garden spider", "southern black widow", "tarantula", "wolf spider",
"tick", "centipede", "black grouse", "ptarmigan", "ruffed grouse", "prairie grouse", "peafowl",
"quail", "partridge", "african grey parrot", "macaw", "sulphur-crested cockatoo", "lorikeet",
"coucal", "bee eater", "hornbill", "hummingbird", "jacamar", "toucan", "duck",
"red-breasted merganser", "goose", "black swan", "tusker", "echidna", "platypus", "wallaby",
"koala", "wombat", "jellyfish", "sea anemone", "brain coral", "flatworm", "nematode", "conch",
"snail", "slug", "sea slug", "chiton", "chambered nautilus", "Dungeness crab", "rock crab",
"fiddler crab", "red king crab", "American lobster", "spiny lobster", "crayfish", "hermit crab",
"isopod", "white stork", "black stork", "spoonbill", "flamingo", "little blue heron",
"great egret", "bittern bird", "crane bird", "limpkin", "common gallinule", "American coot",
"bustard", "ruddy turnstone", "dunlin", "common redshank", "dowitcher", "oystercatcher",
"pelican", "king penguin", "albatross", "grey whale", "killer whale", "dugong", "sea lion",
"Chihuahua", "Japanese Chin", "Maltese", "Pekingese", "Shih Tzu", "King Charles Spaniel",
"Papillon", "toy terrier", "Rhodesian Ridgeback", "Afghan Hound", "Basset Hound", "Beagle",
"Bloodhound", "Bluetick Coonhound", "Black and Tan Coonhound", "Treeing Walker Coonhound",
"English foxhound", "Redbone Coonhound", "borzoi", "Irish Wolfhound", "Italian Greyhound",
"Whippet", "Ibizan Hound", "Norwegian Elkhound", "Otterhound", "Saluki", "Scottish Deerhound",
"Weimaraner", "Staffordshire Bull Terrier", "American Staffordshire Terrier",
"Bedlington Terrier", "Border Terrier", "Kerry Blue Terrier", "Irish Terrier",
"Norfolk Terrier", "Norwich Terrier", "Yorkshire Terrier", "Wire Fox Terrier",
"Lakeland Terrier", "Sealyham Terrier", "Airedale Terrier", "Cairn Terrier",
"Australian Terrier", "Dandie Dinmont Terrier", "Boston Terrier", "Miniature Schnauzer",
"Giant Schnauzer", "Standard Schnauzer", "Scottish Terrier", "Tibetan Terrier",
"Australian Silky Terrier", "Soft-coated Wheaten Terrier", "West Highland White Terrier",
"Lhasa Apso", "Flat-Coated Retriever", "Curly-coated Retriever", "Golden Retriever",
"Labrador Retriever", "Chesapeake Bay Retriever", "German Shorthaired Pointer", "Vizsla",
"English Setter", "Irish Setter", "Gordon Setter", "Brittany dog", "Clumber Spaniel",
"English Springer Spaniel", "Welsh Springer Spaniel", "Cocker Spaniel", "Sussex Spaniel",
"Irish Water Spaniel", "Kuvasz", "Schipperke", "Groenendael dog", "Malinois", "Briard",
"Australian Kelpie", "Komondor", "Old English Sheepdog", "Shetland Sheepdog", "collie",
"Border Collie", "Bouvier des Flandres dog", "Rottweiler", "German Shepherd Dog", "Dobermann",
"Miniature Pinscher", "Greater Swiss Mountain Dog", "Bernese Mountain Dog",
"Appenzeller Sennenhund", "Entlebucher Sennenhund", "Boxer", "Bullmastiff", "Tibetan Mastiff",
"French Bulldog", "Great Dane", "St. Bernard", "husky", "Alaskan Malamute", "Siberian Husky",
"Dalmatian", "Affenpinscher", "Basenji", "pug", "Leonberger", "Newfoundland dog",
"Great Pyrenees dog", "Samoyed", "Pomeranian", "Chow Chow", "Keeshond", "brussels griffon",
"Pembroke Welsh Corgi", "Cardigan Welsh Corgi", "Toy Poodle", "Miniature Poodle",
"Standard Poodle", "Mexican hairless dog (xoloitzcuintli)", "grey wolf", "Alaskan tundra wolf",
"red wolf or maned wolf", "coyote", "dingo", "dhole", "African wild dog", "hyena", "red fox",
"kit fox", "Arctic fox", "grey fox", "tabby cat", "tiger cat", "Persian cat", "Siamese cat",
"Egyptian Mau", "cougar", "lynx", "leopard", "snow leopard", "jaguar", "lion", "tiger",
"cheetah", "brown bear", "American black bear", "polar bear", "sloth bear", "mongoose",
"meerkat", "tiger beetle", "ladybug", "ground beetle", "longhorn beetle", "leaf beetle",
"dung beetle", "rhinoceros beetle", "weevil", "fly", "bee", "ant", "grasshopper",
"cricket insect", "stick insect", "cockroach", "praying mantis", "cicada", "leafhopper",
"lacewing", "dragonfly", "damselfly", "red admiral butterfly", "ringlet butterfly",
"monarch butterfly", "small white butterfly", "sulphur butterfly", "gossamer-winged butterfly",
"starfish", "sea urchin", "sea cucumber", "cottontail rabbit", "hare", "Angora rabbit",
"hamster", "porcupine", "fox squirrel", "marmot", "beaver", "guinea pig", "common sorrel horse",
"zebra", "pig", "wild boar", "warthog", "hippopotamus", "ox", "water buffalo", "bison",
"ram (adult male sheep)", "bighorn sheep", "Alpine ibex", "hartebeest", "impala (antelope)",
"gazelle", "arabian camel", "llama", "weasel", "mink", "European polecat",
"black-footed ferret", "otter", "skunk", "badger", "armadillo", "three-toed sloth", "orangutan",
"gorilla", "chimpanzee", "gibbon", "siamang", "guenon", "patas monkey", "baboon", "macaque",
"langur", "black-and-white colobus", "proboscis monkey", "marmoset", "white-headed capuchin",
"howler monkey", "titi monkey", "Geoffroy's spider monkey", "common squirrel monkey",
"ring-tailed lemur", "indri", "Asian elephant", "African bush elephant", "red panda",
"giant panda", "snoek fish", "eel", "silver salmon", "rock beauty fish", "clownfish",
"sturgeon", "gar fish", "lionfish", "pufferfish", "abacus", "abaya", "academic gown",
"accordion", "acoustic guitar", "aircraft carrier", "airliner", "airship", "altar", "ambulance",
"amphibious vehicle", "analog clock", "apiary", "apron", "trash can", "assault rifle",
"backpack", "bakery", "balance beam", "balloon", "ballpoint pen", "Band-Aid", "banjo",
"baluster / handrail", "barbell", "barber chair", "barbershop", "barn", "barometer", "barrel",
"wheelbarrow", "baseball", "basketball", "bassinet", "bassoon", "swimming cap", "bath towel",
"bathtub", "station wagon", "lighthouse", "beaker", "military hat (bearskin or shako)",
"beer bottle", "beer glass", "bell tower", "baby bib", "tandem bicycle", "bikini",
"ring binder", "binoculars", "birdhouse", "boathouse", "bobsleigh", "bolo tie", "poke bonnet",
"bookcase", "bookstore", "bottle cap", "hunting bow", "bow tie", "brass memorial plaque", "bra",
"breakwater", "breastplate", "broom", "bucket", "buckle", "bulletproof vest",
"high-speed train", "butcher shop", "taxicab", "cauldron", "candle", "cannon", "canoe",
"can opener", "cardigan", "car mirror", "carousel", "tool kit", "cardboard box / carton",
"car wheel", "automated teller machine", "cassette", "cassette player", "castle", "catamaran",
"CD player", "cello", "mobile phone", "chain", "chain-link fence", "chain mail", "chainsaw",
"storage chest", "chiffonier", "bell or wind chime", "china cabinet", "Christmas stocking",
"church", "movie theater", "cleaver", "cliff dwelling", "cloak", "clogs", "cocktail shaker",
"coffee mug", "coffeemaker", "spiral or coil", "combination lock", "computer keyboard",
"candy store", "container ship", "convertible", "corkscrew", "cornet", "cowboy boot",
"cowboy hat", "cradle", "construction crane", "crash helmet", "crate", "infant bed",
"Crock Pot", "croquet ball", "crutch", "cuirass", "dam", "desk", "desktop computer",
"rotary dial telephone", "diaper", "digital clock", "digital watch", "dining table",
"dishcloth", "dishwasher", "disc brake", "dock", "dog sled", "dome", "doormat", "drilling rig",
"drum", "drumstick", "dumbbell", "Dutch oven", "electric fan", "electric guitar",
"electric locomotive", "entertainment center", "envelope", "espresso machine", "face powder",
"feather boa", "filing cabinet", "fireboat", "fire truck", "fire screen", "flagpole", "flute",
"folding chair", "football helmet", "forklift", "fountain", "fountain pen", "four-poster bed",
"freight car", "French horn", "frying pan", "fur coat", "garbage truck",
"gas mask or respirator", "gas pump", "goblet", "go-kart", "golf ball", "golf cart", "gondola",
"gong", "gown", "grand piano", "greenhouse", "radiator grille", "grocery store", "guillotine",
"hair clip", "hair spray", "half-track", "hammer", "hamper", "hair dryer", "hand-held computer",
"handkerchief", "hard disk drive", "harmonica", "harp", "combine harvester", "hatchet",
"holster", "home theater", "honeycomb", "hook", "hoop skirt", "gymnastic horizontal bar",
"horse-drawn vehicle", "hourglass", "iPod", "clothes iron", "carved pumpkin", "jeans", "jeep",
"T-shirt", "jigsaw puzzle", "rickshaw", "joystick", "kimono", "knee pad", "knot", "lab coat",
"ladle", "lampshade", "laptop computer", "lawn mower", "lens cap", "letter opener", "library",
"lifeboat", "lighter", "limousine", "ocean liner", "lipstick", "slip-on shoe", "lotion",
"music speaker", "loupe magnifying glass", "sawmill", "magnetic compass", "messenger bag",
"mailbox", "tights", "one-piece bathing suit", "manhole cover", "maraca", "marimba", "mask",
"matchstick", "maypole", "maze", "measuring cup", "medicine cabinet", "megalith", "microphone",
"microwave oven", "military uniform", "milk can", "minibus", "miniskirt", "minivan", "missile",
"mitten", "mixing bowl", "mobile home", "ford model t", "modem", "monastery", "monitor",
"moped", "mortar and pestle", "graduation cap", "mosque", "mosquito net", "vespa",
"mountain bike", "tent", "computer mouse", "mousetrap", "moving van", "muzzle", "metal nail",
"neck brace", "necklace", "baby pacifier", "notebook computer", "obelisk", "oboe", "ocarina",
"odometer", "oil filter", "pipe organ", "oscilloscope", "overskirt", "bullock cart",
"oxygen mask", "product packet / packaging", "paddle", "paddle wheel", "padlock", "paintbrush",
"pajamas", "palace", "pan flute", "paper towel", "parachute", "parallel bars", "park bench",
"parking meter", "railroad car", "patio", "payphone", "pedestal", "pencil case",
"pencil sharpener", "perfume", "Petri dish", "photocopier", "plectrum", "Pickelhaube",
"picket fence", "pickup truck", "pier", "piggy bank", "pill bottle", "pillow", "ping-pong ball",
"pinwheel", "pirate ship", "drink pitcher", "block plane", "planetarium", "plastic bag",
"plate rack", "farm plow", "plunger", "Polaroid camera", "pole", "police van", "poncho",
"pool table", "soda bottle", "plant pot", "potter's wheel", "power drill", "prayer rug",
"printer", "prison", "missile", "projector", "hockey puck", "punching bag", "purse", "quill",
"quilt", "race car", "racket", "radiator", "radio", "radio telescope", "rain barrel",
"recreational vehicle", "fishing casting reel", "reflex camera", "refrigerator",
"remote control", "restaurant", "revolver", "rifle", "rocking chair", "rotisserie", "eraser",
"rugby ball", "ruler measuring stick", "sneaker", "safe", "safety pin", "salt shaker", "sandal",
"sarong", "saxophone", "scabbard", "weighing scale", "school bus", "schooner", "scoreboard",
"CRT monitor", "screw", "screwdriver", "seat belt", "sewing machine", "shield", "shoe store",
"shoji screen / room divider", "shopping basket", "shopping cart", "shovel", "shower cap",
"shower curtain", "ski", "balaclava ski mask", "sleeping bag", "slide rule", "sliding door",
"slot machine", "snorkel", "snowmobile", "snowplow", "soap dispenser", "soccer ball", "sock",
"solar thermal collector", "sombrero", "soup bowl", "keyboard space bar", "space heater",
"space shuttle", "spatula", "motorboat", "spider web", "spindle", "sports car", "spotlight",
"stage", "steam locomotive", "through arch bridge", "steel drum", "stethoscope", "scarf",
"stone wall", "stopwatch", "stove", "strainer", "tram", "stretcher", "couch", "stupa",
"submarine", "suit", "sundial", "sunglasses", "sunglasses", "sunscreen", "suspension bridge",
"mop", "sweatshirt", "swim trunks / shorts", "swing", "electrical switch", "syringe",
"table lamp", "tank", "tape player", "teapot", "teddy bear", "television", "tennis ball",
"thatched roof", "front curtain", "thimble", "threshing machine", "throne", "tile roof",
"toaster", "tobacco shop", "toilet seat", "torch", "totem pole", "tow truck", "toy store",
"tractor", "semi-trailer truck", "tray", "trench coat", "tricycle", "trimaran", "tripod",
"triumphal arch", "trolleybus", "trombone", "hot tub", "turnstile", "typewriter keyboard",
"umbrella", "unicycle", "upright piano", "vacuum cleaner", "vase", "vaulted or arched ceiling",
"velvet fabric", "vending machine", "vestment", "viaduct", "violin", "volleyball",
"waffle iron", "wall clock", "wallet", "wardrobe", "military aircraft", "sink",
"washing machine", "water bottle", "water jug", "water tower", "whiskey jug", "whistle",
"hair wig", "window screen", "window shade", "Windsor tie", "wine bottle", "airplane wing",
"wok", "wooden spoon", "wool", "split-rail fence", "shipwreck", "sailboat", "yurt", "website",
"comic book", "crossword", "traffic or street sign", "traffic light", "dust jacket", "menu",
"plate", "guacamole", "consomme", "hot pot", "trifle", "ice cream", "popsicle", "baguette",
"bagel", "pretzel", "cheeseburger", "hot dog", "mashed potatoes", "cabbage", "broccoli",
"cauliflower", "zucchini", "spaghetti squash", "acorn squash", "butternut squash", "cucumber",
"artichoke", "bell pepper", "cardoon", "mushroom", "Granny Smith apple", "strawberry", "orange",
"lemon", "fig", "pineapple", "banana", "jackfruit", "cherimoya (custard apple)", "pomegranate",
"hay", "carbonara", "chocolate syrup", "dough", "meatloaf", "pizza", "pot pie", "burrito",
"red wine", "espresso", "tea cup", "eggnog", "mountain", "bubble", "cliff", "coral reef",
"geyser", "lakeshore", "promontory", "sandbar", "beach", "valley", "volcano", "baseball player",
"bridegroom", "scuba diver", "rapeseed", "daisy", "yellow lady's slipper", "corn", "acorn",
"rose hip", "horse chestnut seed", "coral fungus", "agaric", "gyromitra", "stinkhorn mushroom",
"earth star fungus", "hen of the woods mushroom", "bolete", "corn cob", "toilet paper"]
openai_imagenet_template = [
lambda c: f'a bad photo of a {c}.',
lambda c: f'a photo of many {c}.',
lambda c: f'a sculpture of a {c}.',
lambda c: f'a photo of the hard to see {c}.',
lambda c: f'a low resolution photo of the {c}.',
lambda c: f'a rendering of a {c}.',
lambda c: f'graffiti of a {c}.',
lambda c: f'a bad photo of the {c}.',
lambda c: f'a cropped photo of the {c}.',
lambda c: f'a tattoo of a {c}.',
lambda c: f'the embroidered {c}.',
lambda c: f'a photo of a hard to see {c}.',
lambda c: f'a bright photo of a {c}.',
lambda c: f'a photo of a clean {c}.',
lambda c: f'a photo of a dirty {c}.',
lambda c: f'a dark photo of the {c}.',
lambda c: f'a drawing of a {c}.',
lambda c: f'a photo of my {c}.',
lambda c: f'the plastic {c}.',
lambda c: f'a photo of the cool {c}.',
lambda c: f'a close-up photo of a {c}.',
lambda c: f'a black and white photo of the {c}.',
lambda c: f'a painting of the {c}.',
lambda c: f'a painting of a {c}.',
lambda c: f'a pixelated photo of the {c}.',
lambda c: f'a sculpture of the {c}.',
lambda c: f'a bright photo of the {c}.',
lambda c: f'a cropped photo of a {c}.',
lambda c: f'a plastic {c}.',
lambda c: f'a photo of the dirty {c}.',
lambda c: f'a jpeg corrupted photo of a {c}.',
lambda c: f'a blurry photo of the {c}.',
lambda c: f'a photo of the {c}.',
lambda c: f'a good photo of the {c}.',
lambda c: f'a rendering of the {c}.',
lambda c: f'a {c} in a video game.',
lambda c: f'a photo of one {c}.',
lambda c: f'a doodle of a {c}.',
lambda c: f'a close-up photo of the {c}.',
lambda c: f'a photo of a {c}.',
lambda c: f'the origami {c}.',
lambda c: f'the {c} in a video game.',
lambda c: f'a sketch of a {c}.',
lambda c: f'a doodle of the {c}.',
lambda c: f'a origami {c}.',
lambda c: f'a low resolution photo of a {c}.',
lambda c: f'the toy {c}.',
lambda c: f'a rendition of the {c}.',
lambda c: f'a photo of the clean {c}.',
lambda c: f'a photo of a large {c}.',
lambda c: f'a rendition of a {c}.',
lambda c: f'a photo of a nice {c}.',
lambda c: f'a photo of a weird {c}.',
lambda c: f'a blurry photo of a {c}.',
lambda c: f'a cartoon {c}.',
lambda c: f'art of a {c}.',
lambda c: f'a sketch of the {c}.',
lambda c: f'a embroidered {c}.',
lambda c: f'a pixelated photo of a {c}.',
lambda c: f'itap of the {c}.',
lambda c: f'a jpeg corrupted photo of the {c}.',
lambda c: f'a good photo of a {c}.',
lambda c: f'a plushie {c}.',
lambda c: f'a photo of the nice {c}.',
lambda c: f'a photo of the small {c}.',
lambda c: f'a photo of the weird {c}.',
lambda c: f'the cartoon {c}.',
lambda c: f'art of the {c}.',
lambda c: f'a drawing of the {c}.',
lambda c: f'a photo of the large {c}.',
lambda c: f'a black and white photo of a {c}.',
lambda c: f'the plushie {c}.',
lambda c: f'a dark photo of a {c}.',
lambda c: f'itap of a {c}.',
lambda c: f'graffiti of the {c}.',
lambda c: f'a toy {c}.',
lambda c: f'itap of my {c}.',
lambda c: f'a photo of a cool {c}.',
lambda c: f'a photo of a small {c}.',
lambda c: f'a tattoo of the {c}.',
]
|
open_clip-main
|
src/training/imagenet_zeroshot_data.py
|
import ast
import json
import logging
import math
import os
import random
import sys
import time
from dataclasses import dataclass
from multiprocessing import Value
import numpy as np
import pandas as pd
import torch
import torchvision.datasets as datasets
import webdataset as wds
from PIL import Image
from torch.utils.data import Dataset, DataLoader, SubsetRandomSampler, IterableDataset, get_worker_info
from torch.utils.data.distributed import DistributedSampler
from webdataset.filters import _shuffle
from webdataset.tariterators import base_plus_ext, url_opener, tar_file_expander, valid_sample
try:
import horovod.torch as hvd
except ImportError:
hvd = None
class CsvDataset(Dataset):
def __init__(self, input_filename, transforms, img_key, caption_key, sep="\t", tokenizer=None):
logging.debug(f'Loading csv data from {input_filename}.')
df = pd.read_csv(input_filename, sep=sep)
self.images = df[img_key].tolist()
self.captions = df[caption_key].tolist()
self.transforms = transforms
logging.debug('Done loading data.')
self.tokenize = tokenizer
def __len__(self):
return len(self.captions)
def __getitem__(self, idx):
images = self.transforms(Image.open(str(self.images[idx])))
texts = self.tokenize([str(self.captions[idx])])[0]
return images, texts
class SharedEpoch:
def __init__(self, epoch: int = 0):
self.shared_epoch = Value('i', epoch)
def set_value(self, epoch):
self.shared_epoch.value = epoch
def get_value(self):
return self.shared_epoch.value
@dataclass
class DataInfo:
dataloader: DataLoader
sampler: DistributedSampler = None
shared_epoch: SharedEpoch = None
def set_epoch(self, epoch):
if self.shared_epoch is not None:
self.shared_epoch.set_value(epoch)
if self.sampler is not None and isinstance(self.sampler, DistributedSampler):
self.sampler.set_epoch(epoch)
def get_dataset_size(shards):
shards_list = wds.shardlists.expand_urls(shards)
dir_path = os.path.dirname(shards_list[0])
sizes_filename = os.path.join(dir_path, 'sizes.json')
len_filename = os.path.join(dir_path, '__len__')
if os.path.exists(sizes_filename):
sizes = json.load(open(sizes_filename, 'r'))
total_size = sum([int(sizes[os.path.basename(shard)]) for shard in shards_list])
elif os.path.exists(len_filename):
# FIXME this used to be eval(open(...)) but that seemed rather unsafe
total_size = ast.literal_eval(open(len_filename, 'r').read())
else:
total_size = None # num samples undefined
# some common dataset sizes (at time of authors last download)
# CC3M (train): 2905954
# CC12M: 10968539
# LAION-400M: 407332084
# LAION-2B (english): 2170337258
num_shards = len(shards_list)
return total_size, num_shards
def get_imagenet(args, preprocess_fns, split):
assert split in ["train", "val", "v2"]
is_train = split == "train"
preprocess_train, preprocess_val = preprocess_fns
if split == "v2":
from imagenetv2_pytorch import ImageNetV2Dataset
dataset = ImageNetV2Dataset(location=args.imagenet_v2, transform=preprocess_val)
else:
if is_train:
data_path = args.imagenet_train
preprocess_fn = preprocess_train
else:
data_path = args.imagenet_val
preprocess_fn = preprocess_val
assert data_path
dataset = datasets.ImageFolder(data_path, transform=preprocess_fn)
if is_train:
idxs = np.zeros(len(dataset.targets))
target_array = np.array(dataset.targets)
k = 50
for c in range(1000):
m = target_array == c
n = len(idxs[m])
arr = np.zeros(n)
arr[:k] = 1
np.random.shuffle(arr)
idxs[m] = arr
idxs = idxs.astype('int')
sampler = SubsetRandomSampler(np.where(idxs)[0])
else:
sampler = None
dataloader = torch.utils.data.DataLoader(
dataset,
batch_size=args.batch_size,
num_workers=args.workers,
sampler=sampler,
)
return DataInfo(dataloader=dataloader, sampler=sampler)
def count_samples(dataloader):
os.environ["WDS_EPOCH"] = "0"
n_elements, n_batches = 0, 0
for images, texts in dataloader:
n_batches += 1
n_elements += len(images)
assert len(images) == len(texts)
return n_elements, n_batches
def filter_no_caption_or_no_image(sample):
has_caption = ('txt' in sample)
has_image = ('png' in sample or 'jpg' in sample or 'jpeg' in sample or 'webp' in sample)
return has_caption and has_image
def log_and_continue(exn):
"""Call in an exception handler to ignore any exception, issue a warning, and continue."""
logging.warning(f'Handling webdataset error ({repr(exn)}). Ignoring.')
return True
def group_by_keys_nothrow(data, keys=base_plus_ext, lcase=True, suffixes=None, handler=None):
"""Return function over iterator that groups key, value pairs into samples.
:param keys: function that splits the key into key and extension (base_plus_ext)
:param lcase: convert suffixes to lower case (Default value = True)
"""
current_sample = None
for filesample in data:
assert isinstance(filesample, dict)
fname, value = filesample["fname"], filesample["data"]
prefix, suffix = keys(fname)
if prefix is None:
continue
if lcase:
suffix = suffix.lower()
# FIXME webdataset version throws if suffix in current_sample, but we have a potential for
# this happening in the current LAION400m dataset if a tar ends with same prefix as the next
# begins, rare, but can happen since prefix aren't unique across tar files in that dataset
if current_sample is None or prefix != current_sample["__key__"] or suffix in current_sample:
if valid_sample(current_sample):
yield current_sample
current_sample = dict(__key__=prefix, __url__=filesample["__url__"])
if suffixes is None or suffix in suffixes:
current_sample[suffix] = value
if valid_sample(current_sample):
yield current_sample
def tarfile_to_samples_nothrow(src, handler=log_and_continue):
# NOTE this is a re-impl of the webdataset impl with group_by_keys that doesn't throw
streams = url_opener(src, handler=handler)
files = tar_file_expander(streams, handler=handler)
samples = group_by_keys_nothrow(files, handler=handler)
return samples
def pytorch_worker_seed(increment=0):
"""get dataloader worker seed from pytorch"""
worker_info = get_worker_info()
if worker_info is not None:
# favour using the seed already created for pytorch dataloader workers if it exists
seed = worker_info.seed
if increment:
# space out seed increments so they can't overlap across workers in different iterations
seed += increment * max(1, worker_info.num_workers)
return seed
# fallback to wds rank based seed
return wds.utils.pytorch_worker_seed()
_SHARD_SHUFFLE_SIZE = 2000
_SHARD_SHUFFLE_INITIAL = 500
_SAMPLE_SHUFFLE_SIZE = 5000
_SAMPLE_SHUFFLE_INITIAL = 1000
class detshuffle2(wds.PipelineStage):
def __init__(
self,
bufsize=1000,
initial=100,
seed=0,
epoch=-1,
):
self.bufsize = bufsize
self.initial = initial
self.seed = seed
self.epoch = epoch
def run(self, src):
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
rng = random.Random()
if self.seed < 0:
# If seed is negative, we use the worker's seed, this will be different across all nodes/workers
seed = pytorch_worker_seed(epoch)
else:
# This seed to be deterministic AND the same across all nodes/workers in each epoch
seed = self.seed + epoch
rng.seed(seed)
return _shuffle(src, self.bufsize, self.initial, rng)
class ResampledShards2(IterableDataset):
"""An iterable dataset yielding a list of urls."""
def __init__(
self,
urls,
nshards=sys.maxsize,
worker_seed=None,
deterministic=False,
epoch=-1,
):
"""Sample shards from the shard list with replacement.
:param urls: a list of URLs as a Python list or brace notation string
"""
super().__init__()
urls = wds.shardlists.expand_urls(urls)
self.urls = urls
assert isinstance(self.urls[0], str)
self.nshards = nshards
self.rng = random.Random()
self.worker_seed = worker_seed
self.deterministic = deterministic
self.epoch = epoch
def __iter__(self):
"""Return an iterator over the shards."""
if isinstance(self.epoch, SharedEpoch):
epoch = self.epoch.get_value()
else:
# NOTE: this is epoch tracking is problematic in a multiprocess (dataloader workers or train)
# situation as different workers may wrap at different times (or not at all).
self.epoch += 1
epoch = self.epoch
if self.deterministic:
# reset seed w/ epoch if deterministic
if self.worker_seed is None:
# pytorch worker seed should be deterministic due to being init by arg.seed + rank + worker id
seed = pytorch_worker_seed(epoch)
else:
seed = self.worker_seed() + epoch
self.rng.seed(seed)
for _ in range(self.nshards):
yield dict(url=self.rng.choice(self.urls))
def get_wds_dataset(args, preprocess_img, is_train, epoch=0, floor=False, tokenizer=None):
input_shards = args.train_data if is_train else args.val_data
assert input_shards is not None
resampled = getattr(args, 'dataset_resampled', False) and is_train
num_samples, num_shards = get_dataset_size(input_shards)
if not num_samples:
if is_train:
num_samples = args.train_num_samples
if not num_samples:
raise RuntimeError(
'Currently, number of dataset samples must be specified for training dataset. '
'Please specify via `--train-num-samples` if no dataset length info present.')
else:
num_samples = args.val_num_samples or 0 # eval will just exhaust the iterator if not specified
shared_epoch = SharedEpoch(epoch=epoch) # create a shared epoch store to sync epoch to dataloader worker proc
if resampled:
pipeline = [ResampledShards2(input_shards, deterministic=True, epoch=shared_epoch)]
else:
pipeline = [wds.SimpleShardList(input_shards)]
# at this point we have an iterator over all the shards
if is_train:
if not resampled:
pipeline.extend([
detshuffle2(
bufsize=_SHARD_SHUFFLE_SIZE,
initial=_SHARD_SHUFFLE_INITIAL,
seed=args.seed,
epoch=shared_epoch,
),
wds.split_by_node,
wds.split_by_worker,
])
pipeline.extend([
# at this point, we have an iterator over the shards assigned to each worker at each node
tarfile_to_samples_nothrow, # wds.tarfile_to_samples(handler=log_and_continue),
wds.shuffle(
bufsize=_SAMPLE_SHUFFLE_SIZE,
initial=_SAMPLE_SHUFFLE_INITIAL,
),
])
else:
pipeline.extend([
wds.split_by_worker,
# at this point, we have an iterator over the shards assigned to each worker
wds.tarfile_to_samples(handler=log_and_continue),
])
pipeline.extend([
wds.select(filter_no_caption_or_no_image),
wds.decode("pilrgb", handler=log_and_continue),
wds.rename(image="jpg;png;jpeg;webp", text="txt"),
wds.map_dict(image=preprocess_img, text=lambda text: tokenizer(text)[0]),
wds.to_tuple("image", "text"),
wds.batched(args.batch_size, partial=not is_train),
])
dataset = wds.DataPipeline(*pipeline)
if is_train:
if not resampled:
assert num_shards >= args.workers * args.world_size, 'number of shards must be >= total workers'
# roll over and repeat a few samples to get same number of full batches on each node
round_fn = math.floor if floor else math.ceil
global_batch_size = args.batch_size * args.world_size
num_batches = round_fn(num_samples / global_batch_size)
num_workers = max(1, args.workers)
num_worker_batches = round_fn(num_batches / num_workers) # per dataloader worker
num_batches = num_worker_batches * num_workers
num_samples = num_batches * global_batch_size
dataset = dataset.with_epoch(num_worker_batches) # each worker is iterating over this
else:
# last batches are partial, eval is done on single (master) node
num_batches = math.ceil(num_samples / args.batch_size)
dataloader = wds.WebLoader(
dataset,
batch_size=None,
shuffle=False,
num_workers=args.workers,
persistent_workers=True,
)
# FIXME not clear which approach is better, with_epoch before vs after dataloader?
# hoping to resolve via https://github.com/webdataset/webdataset/issues/169
# if is_train:
# # roll over and repeat a few samples to get same number of full batches on each node
# global_batch_size = args.batch_size * args.world_size
# num_batches = math.ceil(num_samples / global_batch_size)
# num_workers = max(1, args.workers)
# num_batches = math.ceil(num_batches / num_workers) * num_workers
# num_samples = num_batches * global_batch_size
# dataloader = dataloader.with_epoch(num_batches)
# else:
# # last batches are partial, eval is done on single (master) node
# num_batches = math.ceil(num_samples / args.batch_size)
# add meta-data to dataloader instance for convenience
dataloader.num_batches = num_batches
dataloader.num_samples = num_samples
return DataInfo(dataloader=dataloader, shared_epoch=shared_epoch)
def get_csv_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
input_filename = args.train_data if is_train else args.val_data
assert input_filename
dataset = CsvDataset(
input_filename,
preprocess_fn,
img_key=args.csv_img_key,
caption_key=args.csv_caption_key,
sep=args.csv_separator,
tokenizer=tokenizer
)
num_samples = len(dataset)
sampler = DistributedSampler(dataset) if args.distributed and is_train else None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.workers,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
class SyntheticDataset(Dataset):
def __init__(self, transform=None, image_size=(224, 224), caption="Dummy caption", dataset_size=100, tokenizer=None):
self.transform = transform
self.image_size = image_size
self.caption = caption
self.image = Image.new('RGB', image_size)
self.dataset_size = dataset_size
self.preprocess_txt = lambda text: tokenizer(text)[0]
def __len__(self):
return self.dataset_size
def __getitem__(self, idx):
if self.transform is not None:
image = self.transform(self.image)
return image, self.preprocess_txt(self.caption)
def get_synthetic_dataset(args, preprocess_fn, is_train, epoch=0, tokenizer=None):
image_size = preprocess_fn.transforms[0].size
dataset = SyntheticDataset(
transform=preprocess_fn, image_size=image_size, dataset_size=args.train_num_samples, tokenizer=tokenizer)
num_samples = len(dataset)
sampler = DistributedSampler(dataset) if args.distributed and is_train else None
shuffle = is_train and sampler is None
dataloader = DataLoader(
dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.workers,
pin_memory=True,
sampler=sampler,
drop_last=is_train,
)
dataloader.num_samples = num_samples
dataloader.num_batches = len(dataloader)
return DataInfo(dataloader, sampler)
def get_dataset_fn(data_path, dataset_type):
if dataset_type == "webdataset":
return get_wds_dataset
elif dataset_type == "csv":
return get_csv_dataset
elif dataset_type == "synthetic":
return get_synthetic_dataset
elif dataset_type == "auto":
ext = data_path.split('.')[-1]
if ext in ['csv', 'tsv']:
return get_csv_dataset
elif ext in ['tar']:
return get_wds_dataset
else:
raise ValueError(
f"Tried to figure out dataset type, but failed for extension {ext}.")
else:
raise ValueError(f"Unsupported dataset type: {dataset_type}")
def get_data(args, preprocess_fns, epoch=0, tokenizer=None):
preprocess_train, preprocess_val = preprocess_fns
data = {}
if args.train_data or args.dataset_type == "synthetic":
data["train"] = get_dataset_fn(args.train_data, args.dataset_type)(
args, preprocess_train, is_train=True, epoch=epoch, tokenizer=tokenizer)
if args.val_data:
data["val"] = get_dataset_fn(args.val_data, args.dataset_type)(
args, preprocess_val, is_train=False, tokenizer=tokenizer)
if args.imagenet_val is not None:
data["imagenet-val"] = get_imagenet(args, preprocess_fns, "val")
if args.imagenet_v2 is not None:
data["imagenet-v2"] = get_imagenet(args, preprocess_fns, "v2")
return data
|
open_clip-main
|
src/training/data.py
|
from typing import Optional
import torch
from torch import nn
from torch.nn import functional as F
import numpy as np
from dataclasses import dataclass
from .transformer import (
LayerNormFp32,
LayerNorm,
QuickGELU,
MultimodalTransformer,
)
from .model import CLIPTextCfg, CLIPVisionCfg, _build_vision_tower, _build_text_tower
from .generation_utils import top_a, top_k, top_p, prepare_inputs_for_generation
from transformers import BeamSearchScorer, LogitsProcessorList, MinLengthLogitsProcessor, StoppingCriteriaList, MaxLengthCriteria
GENERATION_TYPES = {
"top_k": top_k,
"top_p": top_p,
"top_a": top_a,
"beam_search": "beam_search"
}
@dataclass
class MultimodalCfg(CLIPTextCfg):
mlp_ratio: int = 4
dim_head: int = 64
heads: int = 8
n_queries: int = 256
attn_pooler_heads: int = 8
def _build_text_decoder_tower(
embed_dim,
multimodal_cfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
):
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
act_layer = QuickGELU if quick_gelu else nn.GELU
norm_layer = (
LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
)
decoder = MultimodalTransformer(
context_length=multimodal_cfg.context_length,
width=multimodal_cfg.width,
heads=multimodal_cfg.heads,
layers=multimodal_cfg.layers,
ls_init_value=multimodal_cfg.ls_init_value,
output_dim=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer,
)
return decoder
class CoCa(nn.Module):
def __init__(
self,
embed_dim,
multimodal_cfg: MultimodalCfg,
text_cfg: CLIPTextCfg,
vision_cfg: CLIPVisionCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
pad_id: int = 0,
):
super().__init__()
multimodal_cfg = MultimodalCfg(**multimodal_cfg) if isinstance(multimodal_cfg, dict) else multimodal_cfg
text_cfg = CLIPTextCfg(**text_cfg) if isinstance(text_cfg, dict) else text_cfg
vision_cfg = CLIPVisionCfg(**vision_cfg) if isinstance(vision_cfg, dict) else vision_cfg
self.text = _build_text_tower(
embed_dim=embed_dim,
text_cfg=text_cfg,
quick_gelu=quick_gelu,
cast_dtype=cast_dtype,
)
vocab_size = (
text_cfg.vocab_size # for hf models
if hasattr(text_cfg, "hf_model_name") and text_cfg.hf_model_name is not None
else text_cfg.vocab_size
)
self.visual = _build_vision_tower(
embed_dim=embed_dim,
vision_cfg=vision_cfg,
quick_gelu=quick_gelu,
cast_dtype=cast_dtype,
)
self.text_decoder = _build_text_decoder_tower(
vocab_size,
multimodal_cfg=multimodal_cfg,
quick_gelu=quick_gelu,
cast_dtype=cast_dtype,
)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
self.pad_id = pad_id
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.text.set_grad_checkpointing(enable)
self.text_decoder.set_grad_checkpointing(enable)
def _encode_image(self, images, normalize=True):
image_latent, tokens_embs = self.visual(images)
image_latent = F.normalize(image_latent, dim=-1) if normalize else image_latent
return image_latent, tokens_embs
def _encode_text(self, text, normalize=True, embed_cls=True):
text = text[:, :-1] if embed_cls else text # make space for CLS token
text_latent, token_emb = self.text(text)
text_latent = F.normalize(text_latent, dim=-1) if normalize else text_latent
return text_latent, token_emb
def encode_image(self, images, normalize=True):
image_latent, _ = self._encode_image(images, normalize=normalize)
return image_latent
def encode_text(self, text, normalize=True, embed_cls=True):
text_latent, _ = self._encode_text(text, normalize=normalize, embed_cls=embed_cls)
return text_latent
def forward(self, image, text, embed_cls=True, image_latent=None, image_embs=None):
text_latent, token_embs = self._encode_text(text, embed_cls=embed_cls)
if image_latent is None or image_embs is None:
image_latent, image_embs = self._encode_image(image)
# TODO: add assertion to avoid bugs?
labels = text[:, -token_embs.shape[1]:]
logits = self.text_decoder(image_embs, token_embs)
return {
"image_features": image_latent,
"text_features": text_latent,
"logits": logits,
"labels": labels,
"logit_scale": self.logit_scale.exp()
}
def generate(
self,
image,
text=None,
seq_len=77,
max_seq_len=77,
mask_prob=0.0,
temperature=1.,
generation_type="beam_search",
filter_thres=0.9,
min_p_pow=2.0,
min_p_ratio=0.02,
pad_token_id=None,
eos_token_id=None,
sot_token_id=None,
num_beams=6,
num_beam_groups=3,
min_seq_len=5,
stopping_criteria=None,
):
assert generation_type in GENERATION_TYPES, \
f"generation_type has to be one of {'| ' + ' | '.join(list(GENERATION_TYPES.keys())) + ' |'}."
filter_logits_fn = GENERATION_TYPES[generation_type]
if generation_type == "beam_search":
return self.generate_beamsearch(
image_inputs = image,
max_length = seq_len,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
sot_token_id=sot_token_id,
num_beams=num_beams,
num_beam_groups=num_beam_groups,
min_seq_len=min_seq_len,
stopping_criteria=stopping_criteria,
)
assert mask_prob < 1, "mask_prob must be smaller than 1."
device = image.device
sot_token_id = 49406 if sot_token_id is None else sot_token_id
if text is None:
text = torch.ones((image.shape[0], 1), device=device, dtype=torch.long) * sot_token_id
was_training = self.training
num_dims = len(text.shape)
if num_dims == 1:
text = text[None, :]
_, t = text.shape
self.eval()
out = text
for _ in range(seq_len):
x = out[:, -max_seq_len:]
logits = self(image, x, embed_cls=False)["logits"][:, -1]
if filter_logits_fn in {top_k, top_p}:
filtered_logits = filter_logits_fn(logits, thres=filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
elif filter_logits_fn is top_a:
filtered_logits = filter_logits_fn(
logits, min_p_pow=min_p_pow, min_p_ratio=min_p_ratio
)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
if num_dims == 1:
out = out.squeeze(0)
self.train(was_training)
return out
def generate_beamsearch(
self,
image_inputs,
max_length,
pad_token_id=None,
eos_token_id=None,
sot_token_id=None,
num_beams=6,
num_beam_groups=3,
min_seq_len=5,
stopping_criteria=None,
):
sot_token_id = 49406 if sot_token_id is None else sot_token_id
eos_token_id = 49407 if eos_token_id is None else eos_token_id
pad_token_id = self.pad_id if pad_token_id is None else pad_token_id
device = image_inputs.device
batch_size = image_inputs.shape[0]
image_inputs = torch.repeat_interleave(image_inputs, num_beams, dim=0)
image_latent, image_embs = self._encode_image(image_inputs)
input_ids = torch.ones((batch_size * num_beams, 1), device=device, dtype=torch.long)
input_ids = input_ids * sot_token_id
beam_scorer = BeamSearchScorer(
batch_size=batch_size,
num_beams=num_beams,
device=device,
num_beam_groups=num_beam_groups,
)
# instantiate logits processors
target_logits_processor_list = [
MinLengthLogitsProcessor(min_seq_len, eos_token_id=eos_token_id)
]
logits_processor = LogitsProcessorList(
target_logits_processor_list
)
if stopping_criteria is None:
stopping_criteria = [MaxLengthCriteria(max_length=max_length)]
stopping_criteria = StoppingCriteriaList(
stopping_criteria
)
batch_size = len(beam_scorer._beam_hyps)
num_beams = beam_scorer.num_beams
num_beam_groups = beam_scorer.num_beam_groups
num_sub_beams = num_beams // num_beam_groups
batch_beam_size, cur_len = input_ids.shape
beam_indices = None
if num_beams * batch_size != batch_beam_size:
raise ValueError(
f"Batch dimension of `input_ids` should be {num_beams * batch_size}, but is {batch_beam_size}."
)
beam_scores = torch.full((batch_size, num_beams), -1e9, dtype=torch.float, device=device)
# initialise score of first beam of each group with 0 and the rest with 1e-9. This ensures that the beams in
# the same group don't produce same tokens everytime.
beam_scores[:, ::num_sub_beams] = 0
beam_scores = beam_scores.view((batch_size * num_beams,))
while True:
# predicted tokens in cur_len step
current_tokens = torch.zeros(batch_size * num_beams, dtype=input_ids.dtype, device=device)
# indices which will form the beams in the next time step
reordering_indices = torch.zeros(batch_size * num_beams, dtype=torch.long, device=device)
# do one decoder step on all beams of all sentences in batch
model_inputs = prepare_inputs_for_generation(input_ids=input_ids, image_inputs=image_inputs)
outputs = self(
model_inputs['images'],
model_inputs['text'],
embed_cls=False,
image_latent=image_latent,
image_embs=image_embs
)
for beam_group_idx in range(num_beam_groups):
group_start_idx = beam_group_idx * num_sub_beams
group_end_idx = min(group_start_idx + num_sub_beams, num_beams)
group_size = group_end_idx - group_start_idx
# indices of beams of current group among all sentences in batch
batch_group_indices = []
for batch_idx in range(batch_size):
batch_group_indices.extend(
[batch_idx * num_beams + idx for idx in range(group_start_idx, group_end_idx)]
)
group_input_ids = input_ids[batch_group_indices]
# select outputs of beams of currentg group only
next_token_logits = outputs['logits'][batch_group_indices, -1, :]
vocab_size = next_token_logits.shape[-1]
next_token_scores_processed = logits_processor(
group_input_ids, next_token_logits, current_tokens=current_tokens, beam_group_idx=beam_group_idx
)
next_token_scores = next_token_scores_processed + beam_scores[batch_group_indices].unsqueeze(-1)
next_token_scores = next_token_scores.expand_as(next_token_scores_processed)
# reshape for beam search
next_token_scores = next_token_scores.view(batch_size, group_size * vocab_size)
next_token_scores, next_tokens = torch.topk(
next_token_scores, 2 * group_size, dim=1, largest=True, sorted=True
)
next_indices = torch.div(next_tokens, vocab_size, rounding_mode="floor")
next_tokens = next_tokens % vocab_size
# stateless
process_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
beam_outputs = beam_scorer.process(
group_input_ids,
next_token_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
beam_indices=process_beam_indices,
)
beam_scores[batch_group_indices] = beam_outputs["next_beam_scores"]
beam_next_tokens = beam_outputs["next_beam_tokens"]
beam_idx = beam_outputs["next_beam_indices"]
input_ids[batch_group_indices] = group_input_ids[beam_idx]
group_input_ids = torch.cat([group_input_ids[beam_idx, :], beam_next_tokens.unsqueeze(-1)], dim=-1)
current_tokens[batch_group_indices] = group_input_ids[:, -1]
# (beam_idx // group_size) -> batch_idx
# (beam_idx % group_size) -> offset of idx inside the group
reordering_indices[batch_group_indices] = (
num_beams * torch.div(beam_idx, group_size, rounding_mode="floor") + group_start_idx + (beam_idx % group_size)
)
input_ids = torch.cat([input_ids, current_tokens.unsqueeze(-1)], dim=-1)
# increase cur_len
cur_len = cur_len + 1
if beam_scorer.is_done or stopping_criteria(input_ids, None):
break
final_beam_indices = sum(beam_indices, ()) if beam_indices is not None else None
sequence_outputs = beam_scorer.finalize(
input_ids,
beam_scores,
next_tokens,
next_indices,
pad_token_id=pad_token_id,
eos_token_id=eos_token_id,
max_length=stopping_criteria.max_length,
beam_indices=final_beam_indices,
)
return sequence_outputs['sequences']
|
open_clip-main
|
src/open_clip/coca_model.py
|
import hashlib
import os
import urllib
import warnings
from functools import partial
from typing import Dict, Union
from tqdm import tqdm
from .version import __version__
try:
from huggingface_hub import hf_hub_download
hf_hub_download = partial(hf_hub_download, library_name="open_clip", library_version=__version__)
_has_hf_hub = True
except ImportError:
hf_hub_download = None
_has_hf_hub = False
def _pcfg(url='', hf_hub='', mean=None, std=None):
return dict(
url=url,
hf_hub=hf_hub,
mean=mean,
std=std,
)
_RN50 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"),
cc12m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"),
)
_RN50_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/afeb0e10f9e5a86da6080e35cf09123aca3b358a0c3e3b6c78a7b63bc04b6762/RN50.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-yfcc15m-455df137.pt"),
cc12m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn50-quickgelu-cc12m-f000538c.pt"),
)
_RN101 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"),
)
_RN101_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/8fa8567bab74a42d41c5915025a8e4538c3bdbe8804a470a72f30b0d94fab599/RN101.pt"),
yfcc15m=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/rn101-quickgelu-yfcc15m-3e04b30e.pt"),
)
_RN50x4 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/7e526bd135e493cef0776de27d5f42653e6b4c8bf9e0f653bb11773263205fdd/RN50x4.pt"),
)
_RN50x16 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/52378b407f34354e150460fe41077663dd5b39c54cd0bfd2b27167a4a06ec9aa/RN50x16.pt"),
)
_RN50x64 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/be1cfb55d75a9666199fb2206c106743da0f6468c9d327f3e0d0a543a9919d9c/RN50x64.pt"),
)
_VITB32 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
laion2b_e16=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-laion2b_e16-af8dbd0c.pth"),
laion2b_s34b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-laion2B-s34B-b79K/')
)
_VITB32_quickgelu = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/40d365715913c9da98579312b702a82c18be219cc2a73407c4526f58eba950af/ViT-B-32.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e31-d867053b.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_32-quickgelu-laion400m_e32-46683a32.pt"),
)
_VITB16 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/5806e77cd80f8b59890b7e101eabd078d9fb84e6937f9e85e4ecb61988df416f/ViT-B-16.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e31-00efa78f.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16-laion400m_e32-55e67d44.pt"),
# laion400m_32k=_pcfg(
# url="",
# mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
# laion400m_64k=_pcfg(
# url="",
# mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
laion2b_s34b_b88k=_pcfg(hf_hub='laion/CLIP-ViT-B-16-laion2B-s34B-b88K/'),
)
_VITB16_PLUS_240 = dict(
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e31-8fb26589.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_b_16_plus_240-laion400m_e32-699c4b84.pt"),
)
_VITL14 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/b8cca3fd41ae0c99ba7e8951adf17d267cdb84cd88be6f7c2e0eca1737a03836/ViT-L-14.pt"),
laion400m_e31=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e31-69988bb6.pt"),
laion400m_e32=_pcfg(
"https://github.com/mlfoundations/open_clip/releases/download/v0.2-weights/vit_l_14-laion400m_e32-3d133497.pt"),
laion2b_s32b_b82k=_pcfg(
hf_hub='laion/CLIP-ViT-L-14-laion2B-s32B-b82K/',
mean=(0.5, 0.5, 0.5), std=(0.5, 0.5, 0.5)),
)
_VITL14_336 = dict(
openai=_pcfg(
"https://openaipublic.azureedge.net/clip/models/3035c92b350959924f9f00213499208652fc7ea050643e8b385c2dac08641f02/ViT-L-14-336px.pt"),
)
_VITH14 = dict(
laion2b_s32b_b79k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-laion2B-s32B-b79K/'),
)
_VITg14 = dict(
laion2b_s12b_b42k=_pcfg(hf_hub='laion/CLIP-ViT-g-14-laion2B-s12B-b42K/'),
)
_VITbigG14 = dict(
laion2b_s39b_b160k=_pcfg(hf_hub='laion/CLIP-ViT-bigG-14-laion2B-39B-b160k/'),
)
_robertaViTB32 = dict(
laion2b_s12b_b32k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-roberta-base-laion2B-s12B-b32k/'),
)
_xlmRobertaBaseViTB32 = dict(
laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-B-32-xlm-roberta-base-laion5B-s13B-b90k/'),
)
_xlmRobertaLargeFrozenViTH14 = dict(
frozen_laion5b_s13b_b90k=_pcfg(hf_hub='laion/CLIP-ViT-H-14-frozen-xlm-roberta-large-laion5B-s13B-b90k/'),
)
_convnext_base = dict(
laion400m_s13b_b51k=_pcfg(hf_hub='laion/CLIP-convnext_base-laion400M-s13B-b51K/'),
)
_convnext_base_w = dict(
laion2b_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K/'),
laion2b_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion2B-s13B-b82K-augreg/'),
laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w-laion_aesthetic-s13B-b82K/'),
)
_convnext_base_w_320 = dict(
laion_aesthetic_s13b_b82k=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K/'),
laion_aesthetic_s13b_b82k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_base_w_320-laion_aesthetic-s13B-b82K-augreg/'),
)
_convnext_large_d = dict(
laion2b_s26b_b102k_augreg=_pcfg(hf_hub='laion/CLIP-convnext_large_d.laion2B-s26B-b102K-augreg/'),
)
_coca_VITB32 = dict(
laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-B-32-laion2B-s13B-b90k/')
)
_coca_VITL14 = dict(
laion2b_s13b_b90k=_pcfg(hf_hub='laion/CoCa-ViT-L-14-laion2B-s13B-b90k/')
)
_PRETRAINED = {
"RN50": _RN50,
"RN50-quickgelu": _RN50_quickgelu,
"RN101": _RN101,
"RN101-quickgelu": _RN101_quickgelu,
"RN50x4": _RN50x4,
"RN50x16": _RN50x16,
"RN50x64": _RN50x64,
"ViT-B-32": _VITB32,
"ViT-B-32-quickgelu": _VITB32_quickgelu,
"ViT-B-16": _VITB16,
"ViT-B-16-plus-240": _VITB16_PLUS_240,
"ViT-L-14": _VITL14,
"ViT-L-14-336": _VITL14_336,
"ViT-H-14": _VITH14,
"ViT-g-14": _VITg14,
"ViT-bigG-14": _VITbigG14,
"roberta-ViT-B-32": _robertaViTB32,
"xlm-roberta-base-ViT-B-32": _xlmRobertaBaseViTB32,
"xlm-roberta-large-ViT-H-14": _xlmRobertaLargeFrozenViTH14,
"convnext_base": _convnext_base,
"convnext_base_w": _convnext_base_w,
"convnext_base_w_320": _convnext_base_w_320,
"convnext_large_d": _convnext_large_d,
"coca_ViT-B-32": _coca_VITB32,
"coca_ViT-L-14": _coca_VITL14,
}
def _clean_tag(tag: str):
# normalize pretrained tags
return tag.lower().replace('-', '_')
def list_pretrained(as_str: bool = False):
""" returns list of pretrained models
Returns a tuple (model_name, pretrain_tag) by default or 'name:tag' if as_str == True
"""
return [':'.join([k, t]) if as_str else (k, t) for k in _PRETRAINED.keys() for t in _PRETRAINED[k].keys()]
def list_pretrained_models_by_tag(tag: str):
""" return all models having the specified pretrain tag """
models = []
tag = _clean_tag(tag)
for k in _PRETRAINED.keys():
if tag in _PRETRAINED[k]:
models.append(k)
return models
def list_pretrained_tags_by_model(model: str):
""" return all pretrain tags for the specified model architecture """
tags = []
if model in _PRETRAINED:
tags.extend(_PRETRAINED[model].keys())
return tags
def is_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return False
return _clean_tag(tag) in _PRETRAINED[model]
def get_pretrained_cfg(model: str, tag: str):
if model not in _PRETRAINED:
return {}
model_pretrained = _PRETRAINED[model]
return model_pretrained.get(_clean_tag(tag), {})
def get_pretrained_url(model: str, tag: str):
cfg = get_pretrained_cfg(model, _clean_tag(tag))
return cfg.get('url', '')
def download_pretrained_from_url(
url: str,
cache_dir: Union[str, None] = None,
):
if not cache_dir:
cache_dir = os.path.expanduser("~/.cache/clip")
os.makedirs(cache_dir, exist_ok=True)
filename = os.path.basename(url)
if 'openaipublic' in url:
expected_sha256 = url.split("/")[-2]
elif 'mlfoundations' in url:
expected_sha256 = os.path.splitext(filename)[0].split("-")[-1]
else:
expected_sha256 = ''
download_target = os.path.join(cache_dir, filename)
if os.path.exists(download_target) and not os.path.isfile(download_target):
raise RuntimeError(f"{download_target} exists and is not a regular file")
if os.path.isfile(download_target):
if expected_sha256:
if hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
return download_target
else:
warnings.warn(f"{download_target} exists, but the SHA256 checksum does not match; re-downloading the file")
else:
return download_target
with urllib.request.urlopen(url) as source, open(download_target, "wb") as output:
with tqdm(total=int(source.headers.get("Content-Length")), ncols=80, unit='iB', unit_scale=True) as loop:
while True:
buffer = source.read(8192)
if not buffer:
break
output.write(buffer)
loop.update(len(buffer))
if expected_sha256 and not hashlib.sha256(open(download_target, "rb").read()).hexdigest().startswith(expected_sha256):
raise RuntimeError(f"Model has been downloaded but the SHA256 checksum does not not match")
return download_target
def has_hf_hub(necessary=False):
if not _has_hf_hub and necessary:
# if no HF Hub module installed, and it is necessary to continue, raise error
raise RuntimeError(
'Hugging Face hub model specified but package not installed. Run `pip install huggingface_hub`.')
return _has_hf_hub
def download_pretrained_from_hf(
model_id: str,
filename: str = 'open_clip_pytorch_model.bin',
revision=None,
cache_dir: Union[str, None] = None,
):
has_hf_hub(True)
cached_file = hf_hub_download(model_id, filename, revision=revision, cache_dir=cache_dir)
return cached_file
def download_pretrained(
cfg: Dict,
force_hf_hub: bool = False,
cache_dir: Union[str, None] = None,
):
target = ''
if not cfg:
return target
download_url = cfg.get('url', '')
download_hf_hub = cfg.get('hf_hub', '')
if download_hf_hub and force_hf_hub:
# use HF hub even if url exists
download_url = ''
if download_url:
target = download_pretrained_from_url(download_url, cache_dir=cache_dir)
elif download_hf_hub:
has_hf_hub(True)
# we assume the hf_hub entries in pretrained config combine model_id + filename in
# 'org/model_name/filename.pt' form. To specify just the model id w/o filename and
# use 'open_clip_pytorch_model.bin' default, there must be a trailing slash 'org/model_name/'.
model_id, filename = os.path.split(download_hf_hub)
if filename:
target = download_pretrained_from_hf(model_id, filename=filename, cache_dir=cache_dir)
else:
target = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
return target
|
open_clip-main
|
src/open_clip/pretrained.py
|
__version__ = '2.11.0'
|
open_clip-main
|
src/open_clip/version.py
|
""" huggingface model adapter
Wraps HuggingFace transformers (https://github.com/huggingface/transformers) models for use as a text tower in CLIP model.
"""
import re
import torch
import torch.nn as nn
from torch import TensorType
try:
import transformers
from transformers import AutoModel, AutoTokenizer, AutoConfig, PretrainedConfig
from transformers.modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, \
BaseModelOutputWithPoolingAndCrossAttentions
except ImportError as e:
transformers = None
class BaseModelOutput:
pass
class PretrainedConfig:
pass
from .hf_configs import arch_dict
# utils
def _camel2snake(s):
return re.sub(r'(?<!^)(?=[A-Z])', '_', s).lower()
# TODO: ?last - for gpt-like models
_POOLERS = {}
def register_pooler(cls):
"""Decorator registering pooler class"""
_POOLERS[_camel2snake(cls.__name__)] = cls
return cls
@register_pooler
class MeanPooler(nn.Module):
"""Mean pooling"""
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
masked_output = x.last_hidden_state * attention_mask.unsqueeze(-1)
return masked_output.sum(dim=1) / attention_mask.sum(-1, keepdim=True)
@register_pooler
class MaxPooler(nn.Module):
"""Max pooling"""
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
masked_output = x.last_hidden_state.masked_fill(attention_mask.unsqueeze(-1), -torch.inf)
return masked_output.max(1).values
@register_pooler
class ClsPooler(nn.Module):
"""CLS token pooling"""
def __init__(self, use_pooler_output=True):
super().__init__()
self.cls_token_position = 0
self.use_pooler_output = use_pooler_output
def forward(self, x: BaseModelOutput, attention_mask: TensorType):
if (self.use_pooler_output and
isinstance(x, (BaseModelOutputWithPooling, BaseModelOutputWithPoolingAndCrossAttentions)) and
(x.pooler_output is not None)
):
return x.pooler_output
return x.last_hidden_state[:, self.cls_token_position, :]
class HFTextEncoder(nn.Module):
"""HuggingFace model adapter"""
output_tokens: torch.jit.Final[bool]
def __init__(
self,
model_name_or_path: str,
output_dim: int,
config: PretrainedConfig = None,
pooler_type: str = None,
proj: str = None,
pretrained: bool = True,
output_tokens: bool = False,
):
super().__init__()
self.output_tokens = output_tokens
self.output_dim = output_dim
# TODO: find better way to get this information
uses_transformer_pooler = (pooler_type == "cls_pooler")
if transformers is None:
raise RuntimeError("Please `pip install transformers` to use pre-trained HuggingFace models")
if config is None:
self.config = AutoConfig.from_pretrained(model_name_or_path)
create_func, model_args = (AutoModel.from_pretrained, model_name_or_path) if pretrained else (
AutoModel.from_config, self.config)
# TODO: do all model configs have this attribute? PretrainedConfig does so yes??
if hasattr(self.config, "is_encoder_decoder") and self.config.is_encoder_decoder:
self.transformer = create_func(model_args)
self.transformer = self.transformer.encoder
else:
self.transformer = create_func(model_args, add_pooling_layer=uses_transformer_pooler)
else:
self.config = config
self.transformer = AutoModel.from_config(config)
if pooler_type is None: # get default arch pooler
pooler_type = (arch_dict[self.config.model_type]["pooler"])
self.pooler = _POOLERS[pooler_type]()
d_model = getattr(self.config, arch_dict[self.config.model_type]["config_names"]["width"])
if (d_model == output_dim) and (proj is None): # do we always need a proj?
self.proj = nn.Identity()
elif proj == 'linear':
self.proj = nn.Linear(d_model, output_dim, bias=False)
elif proj == 'mlp':
hidden_size = (d_model + output_dim) // 2
self.proj = nn.Sequential(
nn.Linear(d_model, hidden_size, bias=False),
nn.GELU(),
nn.Linear(hidden_size, output_dim, bias=False),
)
def forward(self, x: TensorType):
attn_mask = (x != self.config.pad_token_id).long()
out = self.transformer(input_ids=x, attention_mask=attn_mask)
pooled_out = self.pooler(out, attn_mask)
projected = self.proj(pooled_out)
seq_len = out.last_hidden_state.shape[1]
tokens = (
out.last_hidden_state[:, torch.arange(seq_len) != self.pooler.cls_token_position, :]
if type(self.pooler) == ClsPooler
else out.last_hidden_state
)
if self.output_tokens:
return projected, tokens
return projected
def lock(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
if not unlocked_layers: # full freezing
for n, p in self.transformer.named_parameters():
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
return
encoder = self.transformer.encoder if hasattr(self.transformer, 'encoder') else self.transformer
layer_list = getattr(encoder, arch_dict[self.config.model_type]["config_names"]["layer_attr"])
print(f"Unlocking {unlocked_layers}/{len(layer_list) + 1} layers of hf model")
embeddings = getattr(
self.transformer, arch_dict[self.config.model_type]["config_names"]["token_embeddings_attr"])
modules = [embeddings, *layer_list][:-unlocked_layers]
# freeze layers
for module in modules:
for n, p in module.named_parameters():
p.requires_grad = (not freeze_layer_norm) if "LayerNorm" in n.split(".") else False
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.transformer.gradient_checkpointing_enable()
def init_parameters(self):
pass
|
open_clip-main
|
src/open_clip/hf_model.py
|
OPENAI_DATASET_MEAN = (0.48145466, 0.4578275, 0.40821073)
OPENAI_DATASET_STD = (0.26862954, 0.26130258, 0.27577711)
|
open_clip-main
|
src/open_clip/constants.py
|
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .factory import create_model, create_model_and_transforms, create_model_from_pretrained, get_tokenizer, create_loss
from .factory import list_models, add_model_config, get_model_config, load_checkpoint
from .loss import ClipLoss, CoCaLoss
from .model import CLIP, CustomTextCLIP, CLIPTextCfg, CLIPVisionCfg,\
convert_weights_to_lp, convert_weights_to_fp16, trace_model, get_cast_dtype
from .coca_model import CoCa
from .openai import load_openai_model, list_openai_models
from .pretrained import list_pretrained, list_pretrained_models_by_tag, list_pretrained_tags_by_model,\
get_pretrained_url, download_pretrained_from_url, is_pretrained_cfg, get_pretrained_cfg, download_pretrained
from .tokenizer import SimpleTokenizer, tokenize, decode
from .transform import image_transform, AugmentationCfg
|
open_clip-main
|
src/open_clip/__init__.py
|
# HF architecture dict:
arch_dict = {
# https://huggingface.co/docs/transformers/model_doc/roberta#roberta
"roberta": {
"config_names": {
"context_length": "max_position_embeddings",
"vocab_size": "vocab_size",
"width": "hidden_size",
"heads": "num_attention_heads",
"layers": "num_hidden_layers",
"layer_attr": "layer",
"token_embeddings_attr": "embeddings"
},
"pooler": "mean_pooler",
},
# https://huggingface.co/docs/transformers/model_doc/xlm-roberta#transformers.XLMRobertaConfig
"xlm-roberta": {
"config_names": {
"context_length": "max_position_embeddings",
"vocab_size": "vocab_size",
"width": "hidden_size",
"heads": "num_attention_heads",
"layers": "num_hidden_layers",
"layer_attr": "layer",
"token_embeddings_attr": "embeddings"
},
"pooler": "mean_pooler",
},
# https://huggingface.co/docs/transformers/model_doc/mt5#mt5
"mt5": {
"config_names": {
# unlimited seqlen
# https://github.com/google-research/text-to-text-transfer-transformer/issues/273
# https://github.com/huggingface/transformers/blob/v4.24.0/src/transformers/models/t5/modeling_t5.py#L374
"context_length": "",
"vocab_size": "vocab_size",
"width": "d_model",
"heads": "num_heads",
"layers": "num_layers",
"layer_attr": "block",
"token_embeddings_attr": "embed_tokens"
},
"pooler": "mean_pooler",
},
}
|
open_clip-main
|
src/open_clip/hf_configs.py
|
from collections import OrderedDict
import torch
from torch import nn
from torch.nn import functional as F
from open_clip.utils import freeze_batch_norm_2d
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1):
super().__init__()
# all conv layers have stride 1. an avgpool is performed after the second convolution when stride > 1
self.conv1 = nn.Conv2d(inplanes, planes, 1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(planes, planes, 3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.act2 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(stride) if stride > 1 else nn.Identity()
self.conv3 = nn.Conv2d(planes, planes * self.expansion, 1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * self.expansion)
self.act3 = nn.ReLU(inplace=True)
self.downsample = None
self.stride = stride
if stride > 1 or inplanes != planes * Bottleneck.expansion:
# downsampling layer is prepended with an avgpool, and the subsequent convolution has stride 1
self.downsample = nn.Sequential(OrderedDict([
("-1", nn.AvgPool2d(stride)),
("0", nn.Conv2d(inplanes, planes * self.expansion, 1, stride=1, bias=False)),
("1", nn.BatchNorm2d(planes * self.expansion))
]))
def forward(self, x: torch.Tensor):
identity = x
out = self.act1(self.bn1(self.conv1(x)))
out = self.act2(self.bn2(self.conv2(out)))
out = self.avgpool(out)
out = self.bn3(self.conv3(out))
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.act3(out)
return out
class AttentionPool2d(nn.Module):
def __init__(self, spacial_dim: int, embed_dim: int, num_heads: int, output_dim: int = None):
super().__init__()
self.positional_embedding = nn.Parameter(torch.randn(spacial_dim ** 2 + 1, embed_dim) / embed_dim ** 0.5)
self.k_proj = nn.Linear(embed_dim, embed_dim)
self.q_proj = nn.Linear(embed_dim, embed_dim)
self.v_proj = nn.Linear(embed_dim, embed_dim)
self.c_proj = nn.Linear(embed_dim, output_dim or embed_dim)
self.num_heads = num_heads
def forward(self, x):
x = x.reshape(x.shape[0], x.shape[1], x.shape[2] * x.shape[3]).permute(2, 0, 1) # NCHW -> (HW)NC
x = torch.cat([x.mean(dim=0, keepdim=True), x], dim=0) # (HW+1)NC
x = x + self.positional_embedding[:, None, :].to(x.dtype) # (HW+1)NC
x, _ = F.multi_head_attention_forward(
query=x, key=x, value=x,
embed_dim_to_check=x.shape[-1],
num_heads=self.num_heads,
q_proj_weight=self.q_proj.weight,
k_proj_weight=self.k_proj.weight,
v_proj_weight=self.v_proj.weight,
in_proj_weight=None,
in_proj_bias=torch.cat([self.q_proj.bias, self.k_proj.bias, self.v_proj.bias]),
bias_k=None,
bias_v=None,
add_zero_attn=False,
dropout_p=0.,
out_proj_weight=self.c_proj.weight,
out_proj_bias=self.c_proj.bias,
use_separate_proj_weight=True,
training=self.training,
need_weights=False
)
return x[0]
class ModifiedResNet(nn.Module):
"""
A ResNet class that is similar to torchvision's but contains the following changes:
- There are now 3 "stem" convolutions as opposed to 1, with an average pool instead of a max pool.
- Performs anti-aliasing strided convolutions, where an avgpool is prepended to convolutions with stride > 1
- The final pooling layer is a QKV attention instead of an average pool
"""
def __init__(self, layers, output_dim, heads, image_size=224, width=64):
super().__init__()
self.output_dim = output_dim
self.image_size = image_size
# the 3-layer stem
self.conv1 = nn.Conv2d(3, width // 2, kernel_size=3, stride=2, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(width // 2)
self.act1 = nn.ReLU(inplace=True)
self.conv2 = nn.Conv2d(width // 2, width // 2, kernel_size=3, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(width // 2)
self.act2 = nn.ReLU(inplace=True)
self.conv3 = nn.Conv2d(width // 2, width, kernel_size=3, padding=1, bias=False)
self.bn3 = nn.BatchNorm2d(width)
self.act3 = nn.ReLU(inplace=True)
self.avgpool = nn.AvgPool2d(2)
# residual layers
self._inplanes = width # this is a *mutable* variable used during construction
self.layer1 = self._make_layer(width, layers[0])
self.layer2 = self._make_layer(width * 2, layers[1], stride=2)
self.layer3 = self._make_layer(width * 4, layers[2], stride=2)
self.layer4 = self._make_layer(width * 8, layers[3], stride=2)
embed_dim = width * 32 # the ResNet feature dimension
self.attnpool = AttentionPool2d(image_size // 32, embed_dim, heads, output_dim)
self.init_parameters()
def _make_layer(self, planes, blocks, stride=1):
layers = [Bottleneck(self._inplanes, planes, stride)]
self._inplanes = planes * Bottleneck.expansion
for _ in range(1, blocks):
layers.append(Bottleneck(self._inplanes, planes))
return nn.Sequential(*layers)
def init_parameters(self):
if self.attnpool is not None:
std = self.attnpool.c_proj.in_features ** -0.5
nn.init.normal_(self.attnpool.q_proj.weight, std=std)
nn.init.normal_(self.attnpool.k_proj.weight, std=std)
nn.init.normal_(self.attnpool.v_proj.weight, std=std)
nn.init.normal_(self.attnpool.c_proj.weight, std=std)
for resnet_block in [self.layer1, self.layer2, self.layer3, self.layer4]:
for name, param in resnet_block.named_parameters():
if name.endswith("bn3.weight"):
nn.init.zeros_(param)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
assert unlocked_groups == 0, 'partial locking not currently supported for this model'
for param in self.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
# FIXME support for non-transformer
pass
def stem(self, x):
x = self.act1(self.bn1(self.conv1(x)))
x = self.act2(self.bn2(self.conv2(x)))
x = self.act3(self.bn3(self.conv3(x)))
x = self.avgpool(x)
return x
def forward(self, x):
x = self.stem(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.attnpool(x)
return x
|
open_clip-main
|
src/open_clip/modified_resnet.py
|
import json
import logging
import os
import pathlib
import re
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Tuple, Union
import torch
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
from .model import CLIP, CustomTextCLIP, convert_weights_to_lp, convert_to_custom_text_state_dict,\
resize_pos_embed, get_cast_dtype
from .coca_model import CoCa
from .loss import ClipLoss, CoCaLoss
from .openai import load_openai_model
from .pretrained import is_pretrained_cfg, get_pretrained_cfg, download_pretrained, list_pretrained_tags_by_model, download_pretrained_from_hf
from .transform import image_transform, AugmentationCfg
from .tokenizer import HFTokenizer, tokenize
HF_HUB_PREFIX = 'hf-hub:'
_MODEL_CONFIG_PATHS = [Path(__file__).parent / f"model_configs/"]
_MODEL_CONFIGS = {} # directory (model_name: config) of model architecture configs
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def _rescan_model_configs():
global _MODEL_CONFIGS
config_ext = ('.json',)
config_files = []
for config_path in _MODEL_CONFIG_PATHS:
if config_path.is_file() and config_path.suffix in config_ext:
config_files.append(config_path)
elif config_path.is_dir():
for ext in config_ext:
config_files.extend(config_path.glob(f'*{ext}'))
for cf in config_files:
with open(cf, 'r') as f:
model_cfg = json.load(f)
if all(a in model_cfg for a in ('embed_dim', 'vision_cfg', 'text_cfg')):
_MODEL_CONFIGS[cf.stem] = model_cfg
_MODEL_CONFIGS = {k: v for k, v in sorted(_MODEL_CONFIGS.items(), key=lambda x: _natural_key(x[0]))}
_rescan_model_configs() # initial populate of model config registry
def list_models():
""" enumerate available model architectures based on config files """
return list(_MODEL_CONFIGS.keys())
def add_model_config(path):
""" add model config path or file and update registry """
if not isinstance(path, Path):
path = Path(path)
_MODEL_CONFIG_PATHS.append(path)
_rescan_model_configs()
def get_model_config(model_name):
if model_name in _MODEL_CONFIGS:
return deepcopy(_MODEL_CONFIGS[model_name])
else:
return None
def get_tokenizer(model_name):
if model_name.startswith(HF_HUB_PREFIX):
tokenizer = HFTokenizer(model_name[len(HF_HUB_PREFIX):])
else:
config = get_model_config(model_name)
tokenizer = HFTokenizer(config['text_cfg']['hf_tokenizer_name']) if 'hf_tokenizer_name' in config['text_cfg'] else tokenize
return tokenizer
def load_state_dict(checkpoint_path: str, map_location='cpu'):
checkpoint = torch.load(checkpoint_path, map_location=map_location)
if isinstance(checkpoint, dict) and 'state_dict' in checkpoint:
state_dict = checkpoint['state_dict']
else:
state_dict = checkpoint
if next(iter(state_dict.items()))[0].startswith('module'):
state_dict = {k[7:]: v for k, v in state_dict.items()}
return state_dict
def load_checkpoint(model, checkpoint_path, strict=True):
state_dict = load_state_dict(checkpoint_path)
# detect old format and make compatible with new format
if 'positional_embedding' in state_dict and not hasattr(model, 'positional_embedding'):
state_dict = convert_to_custom_text_state_dict(state_dict)
resize_pos_embed(state_dict, model)
incompatible_keys = model.load_state_dict(state_dict, strict=strict)
return incompatible_keys
def create_model(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
has_hf_hub_prefix = model_name.startswith(HF_HUB_PREFIX)
if has_hf_hub_prefix:
model_id = model_name[len(HF_HUB_PREFIX):]
checkpoint_path = download_pretrained_from_hf(model_id, cache_dir=cache_dir)
config_path = download_pretrained_from_hf(model_id, filename='open_clip_config.json', cache_dir=cache_dir)
with open(config_path, 'r', encoding='utf-8') as f:
config = json.load(f)
pretrained_cfg = config['preprocess_cfg']
model_cfg = config['model_cfg']
else:
model_name = model_name.replace('/', '-') # for callers using old naming with / in ViT names
checkpoint_path = None
pretrained_cfg = {}
model_cfg = None
if isinstance(device, str):
device = torch.device(device)
if pretrained and pretrained.lower() == 'openai':
logging.info(f'Loading pretrained {model_name} from OpenAI.')
model = load_openai_model(
model_name,
precision=precision,
device=device,
jit=jit,
cache_dir=cache_dir,
)
else:
model_cfg = model_cfg or get_model_config(model_name)
if model_cfg is not None:
logging.info(f'Loaded {model_name} model config.')
else:
logging.error(f'Model config for {model_name} not found; available models {list_models()}.')
raise RuntimeError(f'Model config for {model_name} not found.')
if force_quick_gelu:
# override for use of QuickGELU on non-OpenAI transformer models
model_cfg["quick_gelu"] = True
if force_patch_dropout is not None:
# override the default patch dropout value
model_cfg["vision_cfg"]["patch_dropout"] = force_patch_dropout
if force_image_size is not None:
# override model config's image size
model_cfg["vision_cfg"]["image_size"] = force_image_size
if pretrained_image:
if 'timm_model_name' in model_cfg.get('vision_cfg', {}):
# pretrained weight loading for timm models set via vision_cfg
model_cfg['vision_cfg']['timm_model_pretrained'] = True
else:
assert False, 'pretrained image towers currently only supported for timm models'
cast_dtype = get_cast_dtype(precision)
is_hf_model = 'hf_model_name' in model_cfg.get('text_cfg', {})
custom_text = model_cfg.pop('custom_text', False) or force_custom_text or is_hf_model
if custom_text:
if is_hf_model:
model_cfg['text_cfg']['hf_model_pretrained'] = pretrained_hf
if "coca" in model_name:
model = CoCa(**model_cfg, cast_dtype=cast_dtype)
else:
model = CustomTextCLIP(**model_cfg, cast_dtype=cast_dtype)
else:
model = CLIP(**model_cfg, cast_dtype=cast_dtype)
if pretrained:
checkpoint_path = ''
pretrained_cfg = get_pretrained_cfg(model_name, pretrained)
if pretrained_cfg:
checkpoint_path = download_pretrained(pretrained_cfg, cache_dir=cache_dir)
elif os.path.exists(pretrained):
checkpoint_path = pretrained
if checkpoint_path:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
else:
error_str = (
f'Pretrained weights ({pretrained}) not found for model {model_name}.'
f'Available pretrained tags ({list_pretrained_tags_by_model(model_name)}.')
logging.warning(error_str)
raise RuntimeError(error_str)
elif has_hf_hub_prefix:
logging.info(f'Loading pretrained {model_name} weights ({pretrained}).')
load_checkpoint(model, checkpoint_path)
model.to(device=device)
if precision in ("fp16", "bf16"):
convert_weights_to_lp(model, dtype=torch.bfloat16 if precision == 'bf16' else torch.float16)
# set image / mean metadata from pretrained_cfg if available, or use default
model.visual.image_mean = pretrained_cfg.get('mean', None) or OPENAI_DATASET_MEAN
model.visual.image_std = pretrained_cfg.get('std', None) or OPENAI_DATASET_STD
# to always output dict even if it is clip
if output_dict and hasattr(model, "output_dict"):
model.output_dict = True
if jit:
model = torch.jit.script(model)
return model
def create_loss(args):
if "coca" in args.model.lower():
return CoCaLoss(
caption_loss_weight=args.coca_caption_loss_weight,
clip_loss_weight=args.coca_contrastive_loss_weight,
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
return ClipLoss(
local_loss=args.local_loss,
gather_with_grad=args.gather_with_grad,
cache_labels=True,
rank=args.rank,
world_size=args.world_size,
use_horovod=args.horovod,
)
def create_model_and_transforms(
model_name: str,
pretrained: Optional[str] = None,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_patch_dropout: Optional[float] = None,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
pretrained_image: bool = False,
pretrained_hf: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
cache_dir: Optional[str] = None,
output_dict: Optional[bool] = None,
):
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_patch_dropout=force_patch_dropout,
force_image_size=force_image_size,
pretrained_image=pretrained_image,
pretrained_hf=pretrained_hf,
cache_dir=cache_dir,
output_dict=output_dict,
)
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess_train = image_transform(
model.visual.image_size,
is_train=True,
mean=image_mean,
std=image_std,
aug_cfg=aug_cfg,
)
preprocess_val = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess_train, preprocess_val
def create_model_from_pretrained(
model_name: str,
pretrained: str,
precision: str = 'fp32',
device: Union[str, torch.device] = 'cpu',
jit: bool = False,
force_quick_gelu: bool = False,
force_custom_text: bool = False,
force_image_size: Optional[Union[int, Tuple[int, int]]] = None,
return_transform: bool = True,
image_mean: Optional[Tuple[float, ...]] = None,
image_std: Optional[Tuple[float, ...]] = None,
cache_dir: Optional[str] = None,
):
if not is_pretrained_cfg(model_name, pretrained) and not os.path.exists(pretrained):
raise RuntimeError(
f'{pretrained} is not a valid pretrained cfg or checkpoint for {model_name}.'
f' Use open_clip.list_pretrained() to find one.')
model = create_model(
model_name,
pretrained,
precision=precision,
device=device,
jit=jit,
force_quick_gelu=force_quick_gelu,
force_custom_text=force_custom_text,
force_image_size=force_image_size,
cache_dir=cache_dir,
)
if not return_transform:
return model
image_mean = image_mean or getattr(model.visual, 'image_mean', None)
image_std = image_std or getattr(model.visual, 'image_std', None)
preprocess = image_transform(
model.visual.image_size,
is_train=False,
mean=image_mean,
std=image_std,
)
return model, preprocess
|
open_clip-main
|
src/open_clip/factory.py
|
""" CLIP Model
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
from dataclasses import dataclass
import logging
import math
from typing import Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from torch import nn
from torch.utils.checkpoint import checkpoint
from .hf_model import HFTextEncoder
from .modified_resnet import ModifiedResNet
from .timm_model import TimmModel
from .transformer import LayerNormFp32, LayerNorm, QuickGELU, Attention, VisionTransformer, TextTransformer
from .utils import to_2tuple
@dataclass
class CLIPVisionCfg:
layers: Union[Tuple[int, int, int, int], int] = 12
width: int = 768
head_width: int = 64
mlp_ratio: float = 4.0
patch_size: int = 16
image_size: Union[Tuple[int, int], int] = 224
ls_init_value: Optional[float] = None # layer scale initial value
patch_dropout: float = 0. # what fraction of patches to dropout during training (0 would mean disabled and no patches dropped) - 0.5 to 0.75 recommended in the paper for optimal results
global_average_pool: bool = False # whether to global average pool the last embedding layer, instead of using CLS token (https://arxiv.org/abs/2205.01580)
attentional_pool: bool = False # whether to use attentional pooler in the last embedding layer
n_queries: int = 256 # n_queries for attentional pooler
attn_pooler_heads: int = 8 # n heads for attentional_pooling
timm_model_name: str = None # a valid model name overrides layers, width, patch_size
timm_model_pretrained: bool = False # use (imagenet) pretrained weights for named model
timm_pool: str = 'avg' # feature pooling for timm model ('abs_attn', 'rot_attn', 'avg', '')
timm_proj: str = 'linear' # linear projection for timm model output ('linear', 'mlp', '')
timm_proj_bias: bool = False # enable bias final projection
timm_drop: float = 0. # head dropout
timm_drop_path: Optional[float] = None # backbone stochastic depth
output_tokens: bool = False
@dataclass
class CLIPTextCfg:
context_length: int = 77
vocab_size: int = 49408
width: int = 512
heads: int = 8
layers: int = 12
ls_init_value: Optional[float] = None # layer scale initial value
hf_model_name: str = None
hf_tokenizer_name: str = None
hf_model_pretrained: bool = True
proj: str = 'mlp'
pooler_type: str = 'mean_pooler'
embed_cls: bool = False
pad_id: int = 0
output_tokens: bool = False
def get_cast_dtype(precision: str):
cast_dtype = None
if precision == 'bf16':
cast_dtype = torch.bfloat16
elif precision == 'fp16':
cast_dtype = torch.float16
return cast_dtype
def _build_vision_tower(
embed_dim: int,
vision_cfg: CLIPVisionCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None
):
if isinstance(vision_cfg, dict):
vision_cfg = CLIPVisionCfg(**vision_cfg)
# OpenAI models are pretrained w/ QuickGELU but native nn.GELU is both faster and more
# memory efficient in recent PyTorch releases (>= 1.10).
# NOTE: timm models always use native GELU regardless of quick_gelu flag.
act_layer = QuickGELU if quick_gelu else nn.GELU
if vision_cfg.timm_model_name:
visual = TimmModel(
vision_cfg.timm_model_name,
pretrained=vision_cfg.timm_model_pretrained,
pool=vision_cfg.timm_pool,
proj=vision_cfg.timm_proj,
proj_bias=vision_cfg.timm_proj_bias,
drop=vision_cfg.timm_drop,
drop_path=vision_cfg.timm_drop_path,
embed_dim=embed_dim,
image_size=vision_cfg.image_size,
)
act_layer = nn.GELU # so that text transformer doesn't use QuickGELU w/ timm models
elif isinstance(vision_cfg.layers, (tuple, list)):
vision_heads = vision_cfg.width * 32 // vision_cfg.head_width
visual = ModifiedResNet(
layers=vision_cfg.layers,
output_dim=embed_dim,
heads=vision_heads,
image_size=vision_cfg.image_size,
width=vision_cfg.width,
)
else:
vision_heads = vision_cfg.width // vision_cfg.head_width
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
visual = VisionTransformer(
image_size=vision_cfg.image_size,
patch_size=vision_cfg.patch_size,
width=vision_cfg.width,
layers=vision_cfg.layers,
heads=vision_heads,
mlp_ratio=vision_cfg.mlp_ratio,
ls_init_value=vision_cfg.ls_init_value,
patch_dropout=vision_cfg.patch_dropout,
global_average_pool=vision_cfg.global_average_pool,
attentional_pool=vision_cfg.attentional_pool,
n_queries=vision_cfg.n_queries,
attn_pooler_heads=vision_cfg.attn_pooler_heads,
output_tokens=vision_cfg.output_tokens,
output_dim=embed_dim,
act_layer=act_layer,
norm_layer=norm_layer,
)
return visual
def _build_text_tower(
embed_dim: int,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
):
if isinstance(text_cfg, dict):
text_cfg = CLIPTextCfg(**text_cfg)
if text_cfg.hf_model_name:
text = HFTextEncoder(
text_cfg.hf_model_name,
output_dim=embed_dim,
proj=text_cfg.proj,
pooler_type=text_cfg.pooler_type,
pretrained=text_cfg.hf_model_pretrained,
output_tokens=text_cfg.output_tokens,
)
else:
act_layer = QuickGELU if quick_gelu else nn.GELU
norm_layer = LayerNormFp32 if cast_dtype in (torch.float16, torch.bfloat16) else LayerNorm
text = TextTransformer(
context_length=text_cfg.context_length,
vocab_size=text_cfg.vocab_size,
width=text_cfg.width,
heads=text_cfg.heads,
layers=text_cfg.layers,
ls_init_value=text_cfg.ls_init_value,
output_dim=embed_dim,
embed_cls=text_cfg.embed_cls,
output_tokens=text_cfg.output_tokens,
pad_id=text_cfg.pad_id,
act_layer=act_layer,
norm_layer=norm_layer,
)
return text
class CLIP(nn.Module):
output_dict: torch.jit.Final[bool]
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
output_dict: bool = False,
):
super().__init__()
self.output_dict = output_dict
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
self.transformer = text.transformer
self.vocab_size = text.vocab_size
self.token_embedding = text.token_embedding
self.positional_embedding = text.positional_embedding
self.ln_final = text.ln_final
self.text_projection = text.text_projection
self.register_buffer('attn_mask', text.attn_mask, persistent=False)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.transformer.grad_checkpointing = enable
def encode_image(self, image, normalize: bool = False):
features = self.visual(image)
return F.normalize(features, dim=-1) if normalize else features
def encode_text(self, text, normalize: bool = False):
cast_dtype = self.transformer.get_cast_dtype()
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
x = x + self.positional_embedding.to(cast_dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=self.attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x) # [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
x = x[torch.arange(x.shape[0]), text.argmax(dim=-1)] @ self.text_projection
return F.normalize(x, dim=-1) if normalize else x
def forward(self, image, text):
image_features = self.encode_image(image, normalize=True)
text_features = self.encode_text(text, normalize=True)
if self.output_dict:
return {
"image_features": image_features,
"text_features": text_features,
"logit_scale": self.logit_scale.exp()
}
return image_features, text_features, self.logit_scale.exp()
class CustomTextCLIP(nn.Module):
output_dict: torch.jit.Final[bool]
def __init__(
self,
embed_dim: int,
vision_cfg: CLIPVisionCfg,
text_cfg: CLIPTextCfg,
quick_gelu: bool = False,
cast_dtype: Optional[torch.dtype] = None,
output_dict: bool = False,
):
super().__init__()
self.output_dict = output_dict
self.visual = _build_vision_tower(embed_dim, vision_cfg, quick_gelu, cast_dtype)
self.text = _build_text_tower(embed_dim, text_cfg, quick_gelu, cast_dtype)
self.logit_scale = nn.Parameter(torch.ones([]) * np.log(1 / 0.07))
def lock_image_tower(self, unlocked_groups=0, freeze_bn_stats=False):
# lock image tower as per LiT - https://arxiv.org/abs/2111.07991
self.visual.lock(unlocked_groups=unlocked_groups, freeze_bn_stats=freeze_bn_stats)
def lock_text_tower(self, unlocked_layers: int = 0, freeze_layer_norm: bool = True):
self.text.lock(unlocked_layers, freeze_layer_norm)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.visual.set_grad_checkpointing(enable)
self.text.set_grad_checkpointing(enable)
def encode_image(self, image, normalize: bool = False):
features = self.visual(image)
return F.normalize(features, dim=-1) if normalize else features
def encode_text(self, text, normalize: bool = False):
features = self.text(text)
return F.normalize(features, dim=-1) if normalize else features
def forward(self, image, text):
image_features = self.encode_image(image, normalize=True)
text_features = self.encode_text(text, normalize=True)
if self.output_dict:
return {
"image_features": image_features,
"text_features": text_features,
"logit_scale": self.logit_scale.exp()
}
return image_features, text_features, self.logit_scale.exp()
def convert_weights_to_lp(model: nn.Module, dtype=torch.float16):
"""Convert applicable model parameters to low-precision (bf16 or fp16)"""
def _convert_weights(l):
if isinstance(l, (nn.Conv1d, nn.Conv2d, nn.Linear)):
l.weight.data = l.weight.data.to(dtype)
if l.bias is not None:
l.bias.data = l.bias.data.to(dtype)
if isinstance(l, (nn.MultiheadAttention, Attention)):
for attr in [*[f"{s}_proj_weight" for s in ["in", "q", "k", "v"]], "in_proj_bias", "bias_k", "bias_v"]:
tensor = getattr(l, attr)
if tensor is not None:
tensor.data = tensor.data.to(dtype)
for name in ["text_projection", "proj"]:
if hasattr(l, name):
attr = getattr(l, name)
if attr is not None:
attr.data = attr.data.to(dtype)
model.apply(_convert_weights)
convert_weights_to_fp16 = convert_weights_to_lp # backwards compat
# used to maintain checkpoint compatibility
def convert_to_custom_text_state_dict(state_dict: dict):
if 'text_projection' in state_dict:
# old format state_dict, move text tower -> .text
new_state_dict = {}
for k, v in state_dict.items():
if any(k.startswith(p) for p in (
'text_projection',
'positional_embedding',
'token_embedding',
'transformer',
'ln_final',
)):
k = 'text.' + k
new_state_dict[k] = v
return new_state_dict
return state_dict
def build_model_from_openai_state_dict(
state_dict: dict,
quick_gelu=True,
cast_dtype=torch.float16,
):
vit = "visual.proj" in state_dict
if vit:
vision_width = state_dict["visual.conv1.weight"].shape[0]
vision_layers = len(
[k for k in state_dict.keys() if k.startswith("visual.") and k.endswith(".attn.in_proj_weight")])
vision_patch_size = state_dict["visual.conv1.weight"].shape[-1]
grid_size = round((state_dict["visual.positional_embedding"].shape[0] - 1) ** 0.5)
image_size = vision_patch_size * grid_size
else:
counts: list = [
len(set(k.split(".")[2] for k in state_dict if k.startswith(f"visual.layer{b}"))) for b in [1, 2, 3, 4]]
vision_layers = tuple(counts)
vision_width = state_dict["visual.layer1.0.conv1.weight"].shape[0]
output_width = round((state_dict["visual.attnpool.positional_embedding"].shape[0] - 1) ** 0.5)
vision_patch_size = None
assert output_width ** 2 + 1 == state_dict["visual.attnpool.positional_embedding"].shape[0]
image_size = output_width * 32
embed_dim = state_dict["text_projection"].shape[1]
context_length = state_dict["positional_embedding"].shape[0]
vocab_size = state_dict["token_embedding.weight"].shape[0]
transformer_width = state_dict["ln_final.weight"].shape[0]
transformer_heads = transformer_width // 64
transformer_layers = len(set(k.split(".")[2] for k in state_dict if k.startswith(f"transformer.resblocks")))
vision_cfg = CLIPVisionCfg(
layers=vision_layers,
width=vision_width,
patch_size=vision_patch_size,
image_size=image_size,
)
text_cfg = CLIPTextCfg(
context_length=context_length,
vocab_size=vocab_size,
width=transformer_width,
heads=transformer_heads,
layers=transformer_layers,
)
model = CLIP(
embed_dim,
vision_cfg=vision_cfg,
text_cfg=text_cfg,
quick_gelu=quick_gelu, # OpenAI models were trained with QuickGELU
cast_dtype=cast_dtype,
)
for key in ["input_resolution", "context_length", "vocab_size"]:
state_dict.pop(key, None)
convert_weights_to_fp16(model) # OpenAI state dicts are partially converted to float16
model.load_state_dict(state_dict)
return model.eval()
def trace_model(model, batch_size=256, device=torch.device('cpu')):
model.eval()
image_size = model.visual.image_size
example_images = torch.ones((batch_size, 3, image_size, image_size), device=device)
example_text = torch.zeros((batch_size, model.context_length), dtype=torch.int, device=device)
model = torch.jit.trace_module(
model,
inputs=dict(
forward=(example_images, example_text),
encode_text=(example_text,),
encode_image=(example_images,)
))
model.visual.image_size = image_size
return model
def resize_pos_embed(state_dict, model, interpolation: str = 'bicubic', antialias: bool = True):
# Rescale the grid of position embeddings when loading from state_dict
old_pos_embed = state_dict.get('visual.positional_embedding', None)
if old_pos_embed is None or not hasattr(model.visual, 'grid_size'):
return
grid_size = to_2tuple(model.visual.grid_size)
extra_tokens = 1 # FIXME detect different token configs (ie no class token, or more)
new_seq_len = grid_size[0] * grid_size[1] + extra_tokens
if new_seq_len == old_pos_embed.shape[0]:
return
if extra_tokens:
pos_emb_tok, pos_emb_img = old_pos_embed[:extra_tokens], old_pos_embed[extra_tokens:]
else:
pos_emb_tok, pos_emb_img = None, old_pos_embed
old_grid_size = to_2tuple(int(math.sqrt(len(pos_emb_img))))
logging.info('Resizing position embedding grid-size from %s to %s', old_grid_size, grid_size)
pos_emb_img = pos_emb_img.reshape(1, old_grid_size[0], old_grid_size[1], -1).permute(0, 3, 1, 2)
pos_emb_img = F.interpolate(
pos_emb_img,
size=grid_size,
mode=interpolation,
antialias=antialias,
align_corners=False,
)
pos_emb_img = pos_emb_img.permute(0, 2, 3, 1).reshape(1, grid_size[0] * grid_size[1], -1)[0]
if pos_emb_tok is not None:
new_pos_embed = torch.cat([pos_emb_tok, pos_emb_img], dim=0)
else:
new_pos_embed = pos_emb_img
state_dict['visual.positional_embedding'] = new_pos_embed
|
open_clip-main
|
src/open_clip/model.py
|
from math import ceil
import torch
import torch.nn.functional as F
def exists(val):
return val is not None
def top_p(logits, thres=0.9):
# nucleus
sorted_logits, sorted_indices = torch.sort(logits, descending=True)
cum_probs = torch.cumsum(F.softmax(sorted_logits, dim=-1), dim=-1)
sorted_indices_to_remove = cum_probs > (1 - thres)
sorted_indices_to_remove[:, 1:] = sorted_indices_to_remove[:, :-1].clone()
sorted_indices_to_remove[:, 0] = 0
sorted_logits[sorted_indices_to_remove] = float('-inf')
return sorted_logits.scatter(1, sorted_indices, sorted_logits)
def top_k(logits, thres=0.9):
k = ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def top_a(logits, min_p_pow=2.0, min_p_ratio=0.02):
probs = F.softmax(logits, dim=-1)
limit = torch.pow(torch.max(probs), min_p_pow) * min_p_ratio
logits[probs < limit] = float('-inf')
logits[probs >= limit] = 1
return logits
def prepare_inputs_for_generation(input_ids, image_inputs, past=None, **kwargs):
if past:
input_ids = input_ids[:, -1].unsqueeze(-1)
attention_mask = kwargs.get("attention_mask", None)
position_ids = kwargs.get("position_ids", None)
if attention_mask is not None and position_ids is None:
# create position_ids on the fly for batch generation
position_ids = attention_mask.long().cumsum(-1) - 1
position_ids.masked_fill_(attention_mask == 0, 1)
else:
position_ids = None
return {
"text": input_ids,
"images": image_inputs,
"past_key_values": past,
"position_ids": position_ids,
"attention_mask": attention_mask,
}
|
open_clip-main
|
src/open_clip/generation_utils.py
|
""" CLIP tokenizer
Copied from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import gzip
import html
import os
from functools import lru_cache
from typing import Union, List
import ftfy
import regex as re
import torch
# https://stackoverflow.com/q/62691279
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"
@lru_cache()
def default_bpe():
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz")
@lru_cache()
def bytes_to_unicode():
"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a significant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""
bs = list(range(ord("!"), ord("~")+1))+list(range(ord("¡"), ord("¬")+1))+list(range(ord("®"), ord("ÿ")+1))
cs = bs[:]
n = 0
for b in range(2**8):
if b not in bs:
bs.append(b)
cs.append(2**8+n)
n += 1
cs = [chr(n) for n in cs]
return dict(zip(bs, cs))
def get_pairs(word):
"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""
pairs = set()
prev_char = word[0]
for char in word[1:]:
pairs.add((prev_char, char))
prev_char = char
return pairs
def basic_clean(text):
text = ftfy.fix_text(text)
text = html.unescape(html.unescape(text))
return text.strip()
def whitespace_clean(text):
text = re.sub(r'\s+', ' ', text)
text = text.strip()
return text
class SimpleTokenizer(object):
def __init__(self, bpe_path: str = default_bpe(), special_tokens=None):
self.byte_encoder = bytes_to_unicode()
self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
merges = gzip.open(bpe_path).read().decode("utf-8").split('\n')
merges = merges[1:49152-256-2+1]
merges = [tuple(merge.split()) for merge in merges]
vocab = list(bytes_to_unicode().values())
vocab = vocab + [v+'</w>' for v in vocab]
for merge in merges:
vocab.append(''.join(merge))
if not special_tokens:
special_tokens = ['<start_of_text>', '<end_of_text>']
else:
special_tokens = ['<start_of_text>', '<end_of_text>'] + special_tokens
vocab.extend(special_tokens)
self.encoder = dict(zip(vocab, range(len(vocab))))
self.decoder = {v: k for k, v in self.encoder.items()}
self.bpe_ranks = dict(zip(merges, range(len(merges))))
self.cache = {t:t for t in special_tokens}
special = "|".join(special_tokens)
self.pat = re.compile(special + r"""|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", re.IGNORECASE)
self.vocab_size = len(self.encoder)
self.all_special_ids = [self.encoder[t] for t in special_tokens]
def bpe(self, token):
if token in self.cache:
return self.cache[token]
word = tuple(token[:-1]) + ( token[-1] + '</w>',)
pairs = get_pairs(word)
if not pairs:
return token+'</w>'
while True:
bigram = min(pairs, key = lambda pair: self.bpe_ranks.get(pair, float('inf')))
if bigram not in self.bpe_ranks:
break
first, second = bigram
new_word = []
i = 0
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
new_word.extend(word[i:])
break
if word[i] == first and i < len(word)-1 and word[i+1] == second:
new_word.append(first+second)
i += 2
else:
new_word.append(word[i])
i += 1
new_word = tuple(new_word)
word = new_word
if len(word) == 1:
break
else:
pairs = get_pairs(word)
word = ' '.join(word)
self.cache[token] = word
return word
def encode(self, text):
bpe_tokens = []
text = whitespace_clean(basic_clean(text)).lower()
for token in re.findall(self.pat, text):
token = ''.join(self.byte_encoder[b] for b in token.encode('utf-8'))
bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(' '))
return bpe_tokens
def decode(self, tokens):
text = ''.join([self.decoder[token] for token in tokens])
text = bytearray([self.byte_decoder[c] for c in text]).decode('utf-8', errors="replace").replace('</w>', ' ')
return text
_tokenizer = SimpleTokenizer()
def decode(output_ids: torch.Tensor):
output_ids = output_ids.cpu().numpy()
return _tokenizer.decode(output_ids)
def tokenize(texts: Union[str, List[str]], context_length: int = 77) -> torch.LongTensor:
"""
Returns the tokenized representation of given input string(s)
Parameters
----------
texts : Union[str, List[str]]
An input string or a list of input strings to tokenize
context_length : int
The context length to use; all CLIP models use 77 as the context length
Returns
-------
A two-dimensional tensor containing the resulting tokens, shape = [number of input strings, context_length]
"""
if isinstance(texts, str):
texts = [texts]
sot_token = _tokenizer.encoder["<start_of_text>"]
eot_token = _tokenizer.encoder["<end_of_text>"]
all_tokens = [[sot_token] + _tokenizer.encode(text) + [eot_token] for text in texts]
result = torch.zeros(len(all_tokens), context_length, dtype=torch.long)
for i, tokens in enumerate(all_tokens):
if len(tokens) > context_length:
tokens = tokens[:context_length] # Truncate
tokens[-1] = eot_token
result[i, :len(tokens)] = torch.tensor(tokens)
return result
class HFTokenizer:
"""HuggingFace tokenizer wrapper"""
def __init__(self, tokenizer_name: str):
from transformers import AutoTokenizer
self.tokenizer = AutoTokenizer.from_pretrained(tokenizer_name)
def __call__(self, texts: Union[str, List[str]], context_length: int = 77) -> torch.Tensor:
# same cleaning as for default tokenizer, except lowercasing
# adding lower (for case-sensitive tokenizers) will make it more robust but less sensitive to nuance
if isinstance(texts, str):
texts = [texts]
texts = [whitespace_clean(basic_clean(text)) for text in texts]
input_ids = self.tokenizer(
texts,
return_tensors='pt',
max_length=context_length,
padding='max_length',
truncation=True,
).input_ids
return input_ids
|
open_clip-main
|
src/open_clip/tokenizer.py
|
import torch
import torch.nn as nn
from torch.nn import functional as F
try:
import torch.distributed.nn
from torch import distributed as dist
has_distributed = True
except ImportError:
has_distributed = False
try:
import horovod.torch as hvd
except ImportError:
hvd = None
def gather_features(
image_features,
text_features,
local_loss=False,
gather_with_grad=False,
rank=0,
world_size=1,
use_horovod=False
):
assert has_distributed, 'torch.distributed did not import correctly, please use a PyTorch version with support.'
if use_horovod:
assert hvd is not None, 'Please install horovod'
if gather_with_grad:
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
else:
with torch.no_grad():
all_image_features = hvd.allgather(image_features)
all_text_features = hvd.allgather(text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features = list(all_image_features.chunk(world_size, dim=0))
gathered_text_features = list(all_text_features.chunk(world_size, dim=0))
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
else:
# We gather tensors from all gpus
if gather_with_grad:
all_image_features = torch.cat(torch.distributed.nn.all_gather(image_features), dim=0)
all_text_features = torch.cat(torch.distributed.nn.all_gather(text_features), dim=0)
else:
gathered_image_features = [torch.zeros_like(image_features) for _ in range(world_size)]
gathered_text_features = [torch.zeros_like(text_features) for _ in range(world_size)]
dist.all_gather(gathered_image_features, image_features)
dist.all_gather(gathered_text_features, text_features)
if not local_loss:
# ensure grads for local rank when all_* features don't have a gradient
gathered_image_features[rank] = image_features
gathered_text_features[rank] = text_features
all_image_features = torch.cat(gathered_image_features, dim=0)
all_text_features = torch.cat(gathered_text_features, dim=0)
return all_image_features, all_text_features
class ClipLoss(nn.Module):
def __init__(
self,
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__()
self.local_loss = local_loss
self.gather_with_grad = gather_with_grad
self.cache_labels = cache_labels
self.rank = rank
self.world_size = world_size
self.use_horovod = use_horovod
# cache state
self.prev_num_logits = 0
self.labels = {}
def forward(self, image_features, text_features, logit_scale, output_dict=False):
device = image_features.device
if self.world_size > 1:
all_image_features, all_text_features = gather_features(
image_features, text_features,
self.local_loss, self.gather_with_grad, self.rank, self.world_size, self.use_horovod)
if self.local_loss:
logits_per_image = logit_scale * image_features @ all_text_features.T
logits_per_text = logit_scale * text_features @ all_image_features.T
else:
logits_per_image = logit_scale * all_image_features @ all_text_features.T
logits_per_text = logits_per_image.T
else:
logits_per_image = logit_scale * image_features @ text_features.T
logits_per_text = logit_scale * text_features @ image_features.T
# calculated ground-truth and cache if enabled
num_logits = logits_per_image.shape[0]
if self.prev_num_logits != num_logits or device not in self.labels:
labels = torch.arange(num_logits, device=device, dtype=torch.long)
if self.world_size > 1 and self.local_loss:
labels = labels + num_logits * self.rank
if self.cache_labels:
self.labels[device] = labels
self.prev_num_logits = num_logits
else:
labels = self.labels[device]
total_loss = (
F.cross_entropy(logits_per_image, labels) +
F.cross_entropy(logits_per_text, labels)
) / 2
return {"contrastive_loss": total_loss} if output_dict else total_loss
class CoCaLoss(ClipLoss):
def __init__(
self,
caption_loss_weight,
clip_loss_weight,
pad_id=0, # pad_token for open_clip custom tokenizer
local_loss=False,
gather_with_grad=False,
cache_labels=False,
rank=0,
world_size=1,
use_horovod=False,
):
super().__init__(
local_loss=local_loss,
gather_with_grad=gather_with_grad,
cache_labels=cache_labels,
rank=rank,
world_size=world_size,
use_horovod=use_horovod
)
self.clip_loss_weight = clip_loss_weight
self.caption_loss_weight = caption_loss_weight
self.caption_loss = nn.CrossEntropyLoss(ignore_index=pad_id)
def forward(self, image_features, text_features, logits, labels, logit_scale, output_dict=False):
clip_loss = super().forward(image_features, text_features, logit_scale)
clip_loss = self.clip_loss_weight * clip_loss
caption_loss = self.caption_loss(
logits.permute(0, 2, 1),
labels,
)
caption_loss = caption_loss * self.caption_loss_weight
if output_dict:
return {"contrastive_loss": clip_loss, "caption_loss": caption_loss}
return clip_loss, caption_loss
|
open_clip-main
|
src/open_clip/loss.py
|
""" OpenAI pretrained model functions
Adapted from https://github.com/openai/CLIP. Originally MIT License, Copyright (c) 2021 OpenAI.
"""
import os
import warnings
from typing import List, Optional, Union
import torch
from .model import build_model_from_openai_state_dict, convert_weights_to_lp, get_cast_dtype
from .pretrained import get_pretrained_url, list_pretrained_models_by_tag, download_pretrained_from_url
__all__ = ["list_openai_models", "load_openai_model"]
def list_openai_models() -> List[str]:
"""Returns the names of available CLIP models"""
return list_pretrained_models_by_tag('openai')
def load_openai_model(
name: str,
precision: Optional[str] = None,
device: Optional[Union[str, torch.device]] = None,
jit: bool = True,
cache_dir: Optional[str] = None,
):
"""Load a CLIP model
Parameters
----------
name : str
A model name listed by `clip.available_models()`, or the path to a model checkpoint containing the state_dict
precision: str
Model precision, if None defaults to 'fp32' if device == 'cpu' else 'fp16'.
device : Union[str, torch.device]
The device to put the loaded model
jit : bool
Whether to load the optimized JIT model (default) or more hackable non-JIT model.
cache_dir : Optional[str]
The directory to cache the downloaded model weights
Returns
-------
model : torch.nn.Module
The CLIP model
preprocess : Callable[[PIL.Image], torch.Tensor]
A torchvision transform that converts a PIL image into a tensor that the returned model can take as its input
"""
if device is None:
device = "cuda" if torch.cuda.is_available() else "cpu"
if precision is None:
precision = 'fp32' if device == 'cpu' else 'fp16'
if get_pretrained_url(name, 'openai'):
model_path = download_pretrained_from_url(get_pretrained_url(name, 'openai'), cache_dir=cache_dir)
elif os.path.isfile(name):
model_path = name
else:
raise RuntimeError(f"Model {name} not found; available models = {list_openai_models()}")
try:
# loading JIT archive
model = torch.jit.load(model_path, map_location=device if jit else "cpu").eval()
state_dict = None
except RuntimeError:
# loading saved state dict
if jit:
warnings.warn(f"File {model_path} is not a JIT archive. Loading as a state dict instead")
jit = False
state_dict = torch.load(model_path, map_location="cpu")
if not jit:
# Build a non-jit model from the OpenAI jitted model state dict
cast_dtype = get_cast_dtype(precision)
try:
model = build_model_from_openai_state_dict(state_dict or model.state_dict(), cast_dtype=cast_dtype)
except KeyError:
sd = {k[7:]: v for k, v in state_dict["state_dict"].items()}
model = build_model_from_openai_state_dict(sd, cast_dtype=cast_dtype)
# model from OpenAI state dict is in manually cast fp16 mode, must be converted for AMP/fp32/bf16 use
model = model.to(device)
if precision.startswith('amp') or precision == 'fp32':
model.float()
elif precision == 'bf16':
convert_weights_to_lp(model, dtype=torch.bfloat16)
return model
# patch the device names
device_holder = torch.jit.trace(lambda: torch.ones([]).to(torch.device(device)), example_inputs=[])
device_node = [n for n in device_holder.graph.findAllNodes("prim::Constant") if "Device" in repr(n)][-1]
def patch_device(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("prim::Constant"):
if "value" in node.attributeNames() and str(node["value"]).startswith("cuda"):
node.copyAttributes(device_node)
model.apply(patch_device)
patch_device(model.encode_image)
patch_device(model.encode_text)
# patch dtype to float32 (typically for CPU)
if precision == 'fp32':
float_holder = torch.jit.trace(lambda: torch.ones([]).float(), example_inputs=[])
float_input = list(float_holder.graph.findNode("aten::to").inputs())[1]
float_node = float_input.node()
def patch_float(module):
try:
graphs = [module.graph] if hasattr(module, "graph") else []
except RuntimeError:
graphs = []
if hasattr(module, "forward1"):
graphs.append(module.forward1.graph)
for graph in graphs:
for node in graph.findAllNodes("aten::to"):
inputs = list(node.inputs())
for i in [1, 2]: # dtype can be the second or third argument to aten::to()
if inputs[i].node()["value"] == 5:
inputs[i].node().copyAttributes(float_node)
model.apply(patch_float)
patch_float(model.encode_image)
patch_float(model.encode_text)
model.float()
# ensure image_size attr available at consistent location for both jit and non-jit
model.visual.image_size = model.input_resolution.item()
return model
|
open_clip-main
|
src/open_clip/openai.py
|
from itertools import repeat
import collections.abc
from torch import nn as nn
from torchvision.ops.misc import FrozenBatchNorm2d
def freeze_batch_norm_2d(module, module_match={}, name=''):
"""
Converts all `BatchNorm2d` and `SyncBatchNorm` layers of provided module into `FrozenBatchNorm2d`. If `module` is
itself an instance of either `BatchNorm2d` or `SyncBatchNorm`, it is converted into `FrozenBatchNorm2d` and
returned. Otherwise, the module is walked recursively and submodules are converted in place.
Args:
module (torch.nn.Module): Any PyTorch module.
module_match (dict): Dictionary of full module names to freeze (all if empty)
name (str): Full module name (prefix)
Returns:
torch.nn.Module: Resulting module
Inspired by https://github.com/pytorch/pytorch/blob/a5895f85be0f10212791145bfedc0261d364f103/torch/nn/modules/batchnorm.py#L762
"""
res = module
is_match = True
if module_match:
is_match = name in module_match
if is_match and isinstance(module, (nn.modules.batchnorm.BatchNorm2d, nn.modules.batchnorm.SyncBatchNorm)):
res = FrozenBatchNorm2d(module.num_features)
res.num_features = module.num_features
res.affine = module.affine
if module.affine:
res.weight.data = module.weight.data.clone().detach()
res.bias.data = module.bias.data.clone().detach()
res.running_mean.data = module.running_mean.data
res.running_var.data = module.running_var.data
res.eps = module.eps
else:
for child_name, child in module.named_children():
full_child_name = '.'.join([name, child_name]) if name else child_name
new_child = freeze_batch_norm_2d(child, module_match, full_child_name)
if new_child is not child:
res.add_module(child_name, new_child)
return res
# From PyTorch internals
def _ntuple(n):
def parse(x):
if isinstance(x, collections.abc.Iterable):
return x
return tuple(repeat(x, n))
return parse
to_1tuple = _ntuple(1)
to_2tuple = _ntuple(2)
to_3tuple = _ntuple(3)
to_4tuple = _ntuple(4)
to_ntuple = lambda n, x: _ntuple(n)(x)
|
open_clip-main
|
src/open_clip/utils.py
|
from collections import OrderedDict
import math
from typing import Callable, Optional, Sequence, Tuple
import torch
from torch import nn
from torch.nn import functional as F
from torch.utils.checkpoint import checkpoint
from .utils import to_2tuple
class LayerNormFp32(nn.LayerNorm):
"""Subclass torch's LayerNorm to handle fp16 (by casting to float32 and back)."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x.to(torch.float32), self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class LayerNorm(nn.LayerNorm):
"""Subclass torch's LayerNorm (with cast back to input dtype)."""
def forward(self, x: torch.Tensor):
orig_type = x.dtype
x = F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)
return x.to(orig_type)
class QuickGELU(nn.Module):
# NOTE This is slower than nn.GELU or nn.SiLU and uses more GPU memory
def forward(self, x: torch.Tensor):
return x * torch.sigmoid(1.702 * x)
class LayerScale(nn.Module):
def __init__(self, dim, init_values=1e-5, inplace=False):
super().__init__()
self.inplace = inplace
self.gamma = nn.Parameter(init_values * torch.ones(dim))
def forward(self, x):
return x.mul_(self.gamma) if self.inplace else x * self.gamma
class PatchDropout(nn.Module):
"""
https://arxiv.org/abs/2212.00794
"""
def __init__(self, prob, exclude_first_token=True):
super().__init__()
assert 0 <= prob < 1.
self.prob = prob
self.exclude_first_token = exclude_first_token # exclude CLS token
def forward(self, x):
if not self.training or self.prob == 0.:
return x
if self.exclude_first_token:
cls_tokens, x = x[:, :1], x[:, 1:]
else:
cls_tokens = torch.jit.annotate(torch.Tensor, x[:, :1])
batch = x.size()[0]
num_tokens = x.size()[1]
batch_indices = torch.arange(batch)
batch_indices = batch_indices[..., None]
keep_prob = 1 - self.prob
num_patches_keep = max(1, int(num_tokens * keep_prob))
rand = torch.randn(batch, num_tokens)
patch_indices_keep = rand.topk(num_patches_keep, dim=-1).indices
x = x[batch_indices, patch_indices_keep]
if self.exclude_first_token:
x = torch.cat((cls_tokens, x), dim=1)
return x
class Attention(nn.Module):
def __init__(
self,
dim,
num_heads=8,
qkv_bias=True,
scaled_cosine=False,
scale_heads=False,
logit_scale_max=math.log(1. / 0.01),
attn_drop=0.,
proj_drop=0.
):
super().__init__()
self.scaled_cosine = scaled_cosine
self.scale_heads = scale_heads
assert dim % num_heads == 0, 'dim should be divisible by num_heads'
self.num_heads = num_heads
self.head_dim = dim // num_heads
self.scale = self.head_dim ** -0.5
self.logit_scale_max = logit_scale_max
# keeping in_proj in this form (instead of nn.Linear) to match weight scheme of original
self.in_proj_weight = nn.Parameter(torch.randn((dim * 3, dim)) * self.scale)
if qkv_bias:
self.in_proj_bias = nn.Parameter(torch.zeros(dim * 3))
else:
self.in_proj_bias = None
if self.scaled_cosine:
self.logit_scale = nn.Parameter(torch.log(10 * torch.ones((num_heads, 1, 1))))
else:
self.logit_scale = None
self.attn_drop = nn.Dropout(attn_drop)
if self.scale_heads:
self.head_scale = nn.Parameter(torch.ones((num_heads, 1, 1)))
else:
self.head_scale = None
self.out_proj = nn.Linear(dim, dim)
self.out_drop = nn.Dropout(proj_drop)
def forward(self, x, attn_mask: Optional[torch.Tensor] = None):
L, N, C = x.shape
q, k, v = F.linear(x, self.in_proj_weight, self.in_proj_bias).chunk(3, dim=-1)
q = q.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
k = k.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
v = v.contiguous().view(L, N * self.num_heads, -1).transpose(0, 1)
if self.logit_scale is not None:
attn = torch.bmm(F.normalize(q, dim=-1), F.normalize(k, dim=-1).transpose(-1, -2))
logit_scale = torch.clamp(self.logit_scale, max=self.logit_scale_max).exp()
attn = attn.view(N, self.num_heads, L, L) * logit_scale
attn = attn.view(-1, L, L)
else:
q = q * self.scale
attn = torch.bmm(q, k.transpose(-1, -2))
if attn_mask is not None:
if attn_mask.dtype == torch.bool:
new_attn_mask = torch.zeros_like(attn_mask, dtype=q.dtype)
new_attn_mask.masked_fill_(attn_mask, float("-inf"))
attn_mask = new_attn_mask
attn += attn_mask
attn = attn.softmax(dim=-1)
attn = self.attn_drop(attn)
x = torch.bmm(attn, v)
if self.head_scale is not None:
x = x.view(N, self.num_heads, L, C) * self.head_scale
x = x.view(-1, L, C)
x = x.transpose(0, 1).reshape(L, N, C)
x = self.out_proj(x)
x = self.out_drop(x)
return x
class AttentionalPooler(nn.Module):
def __init__(
self,
d_model: int,
context_dim: int,
n_head: int = 8,
n_queries: int = 256,
norm_layer: Callable = LayerNorm
):
super().__init__()
self.query = nn.Parameter(torch.randn(n_queries, d_model))
self.attn = nn.MultiheadAttention(d_model, n_head, kdim=context_dim, vdim=context_dim)
self.ln_q = norm_layer(d_model)
self.ln_k = norm_layer(context_dim)
def forward(self, x: torch.Tensor):
x = self.ln_k(x).permute(1, 0, 2) # NLD -> LND
N = x.shape[1]
q = self.ln_q(self.query)
out = self.attn(self._repeat(q, N), x, x, need_weights=False)[0]
return out.permute(1, 0, 2) # LND -> NLD
def _repeat(self, query, N: int):
return query.unsqueeze(1).repeat(1, N, 1)
class ResidualAttentionBlock(nn.Module):
def __init__(
self,
d_model: int,
n_head: int,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
is_cross_attention: bool = False,
):
super().__init__()
self.ln_1 = norm_layer(d_model)
self.attn = nn.MultiheadAttention(d_model, n_head)
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
if is_cross_attention:
self.ln_1_kv = norm_layer(d_model)
self.ln_2 = norm_layer(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
def attention(
self,
q_x: torch.Tensor,
k_x: Optional[torch.Tensor] = None,
v_x: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
):
k_x = k_x if k_x is not None else q_x
v_x = v_x if v_x is not None else q_x
attn_mask = attn_mask.to(q_x.dtype) if attn_mask is not None else None
return self.attn(
q_x, k_x, v_x, need_weights=False, attn_mask=attn_mask
)[0]
def forward(
self,
q_x: torch.Tensor,
k_x: Optional[torch.Tensor] = None,
v_x: Optional[torch.Tensor] = None,
attn_mask: Optional[torch.Tensor] = None,
):
k_x = self.ln_1_kv(k_x) if hasattr(self, "ln_1_kv") and k_x is not None else None
v_x = self.ln_1_kv(v_x) if hasattr(self, "ln_1_kv") and v_x is not None else None
x = q_x + self.ls_1(self.attention(q_x=self.ln_1(q_x), k_x=k_x, v_x=v_x, attn_mask=attn_mask))
x = x + self.ls_2(self.mlp(self.ln_2(x)))
return x
class CustomResidualAttentionBlock(nn.Module):
def __init__(
self,
d_model: int,
n_head: int,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
scale_cosine_attn: bool = False,
scale_heads: bool = False,
scale_attn: bool = False,
scale_fc: bool = False,
):
super().__init__()
self.ln_1 = norm_layer(d_model)
self.attn = Attention(
d_model, n_head,
scaled_cosine=scale_cosine_attn,
scale_heads=scale_heads,
)
self.ln_attn = norm_layer(d_model) if scale_attn else nn.Identity()
self.ls_1 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
self.ln_2 = norm_layer(d_model)
mlp_width = int(d_model * mlp_ratio)
self.mlp = nn.Sequential(OrderedDict([
("c_fc", nn.Linear(d_model, mlp_width)),
('ln', norm_layer(mlp_width) if scale_fc else nn.Identity()),
("gelu", act_layer()),
("c_proj", nn.Linear(mlp_width, d_model))
]))
self.ls_2 = LayerScale(d_model, ls_init_value) if ls_init_value is not None else nn.Identity()
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
x = x + self.ls_1(self.ln_attn(self.attn(self.ln_1(x), attn_mask=attn_mask)))
x = x + self.ls_2(self.mlp(self.ln_2(x)))
return x
class Transformer(nn.Module):
def __init__(
self,
width: int,
layers: int,
heads: int,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
):
super().__init__()
self.width = width
self.layers = layers
self.grad_checkpointing = False
self.resblocks = nn.ModuleList([
ResidualAttentionBlock(
width, heads, mlp_ratio, ls_init_value=ls_init_value, act_layer=act_layer, norm_layer=norm_layer)
for _ in range(layers)
])
def get_cast_dtype(self) -> torch.dtype:
return self.resblocks[0].mlp.c_fc.weight.dtype
def forward(self, x: torch.Tensor, attn_mask: Optional[torch.Tensor] = None):
for r in self.resblocks:
if self.grad_checkpointing and not torch.jit.is_scripting():
# TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
x = checkpoint(r, x, None, None, attn_mask)
else:
x = r(x, attn_mask=attn_mask)
return x
class VisionTransformer(nn.Module):
output_tokens: torch.jit.Final[bool]
def __init__(
self,
image_size: int,
patch_size: int,
width: int,
layers: int,
heads: int,
mlp_ratio: float,
ls_init_value: float = None,
global_average_pool: bool = False,
attentional_pool: bool = False,
n_queries: int = 256,
attn_pooler_heads: int = 8,
output_dim: int = 512,
patch_dropout: float = 0.,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
output_tokens: bool = False
):
super().__init__()
self.output_tokens = output_tokens
self.image_size = to_2tuple(image_size)
self.patch_size = to_2tuple(patch_size)
self.grid_size = (self.image_size[0] // self.patch_size[0], self.image_size[1] // self.patch_size[1])
self.output_dim = output_dim
self.conv1 = nn.Conv2d(in_channels=3, out_channels=width, kernel_size=patch_size, stride=patch_size, bias=False)
scale = width ** -0.5
self.class_embedding = nn.Parameter(scale * torch.randn(width))
self.positional_embedding = nn.Parameter(scale * torch.randn(self.grid_size[0] * self.grid_size[1] + 1, width))
# setting a patch_dropout of 0. would mean it is disabled and this function would be the identity fn
self.patch_dropout = PatchDropout(patch_dropout) if patch_dropout > 0. else nn.Identity()
self.ln_pre = norm_layer(width)
self.transformer = Transformer(
width,
layers,
heads,
mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
self.global_average_pool = global_average_pool
if attentional_pool:
self.attn_pool = AttentionalPooler(output_dim, width, n_head=attn_pooler_heads, n_queries=n_queries)
self.ln_post = norm_layer(output_dim)
self.proj = nn.Parameter(scale * torch.randn(output_dim, output_dim))
else:
self.attn_pool = None
self.ln_post = norm_layer(width)
self.proj = nn.Parameter(scale * torch.randn(width, output_dim))
self.init_parameters()
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
for param in self.parameters():
param.requires_grad = False
if unlocked_groups != 0:
groups = [
[
self.conv1,
self.class_embedding,
self.positional_embedding,
self.ln_pre,
],
*self.transformer.resblocks[:-1],
[
self.transformer.resblocks[-1],
self.ln_post,
],
self.proj,
]
def _unlock(x):
if isinstance(x, Sequence):
for g in x:
_unlock(g)
else:
if isinstance(x, torch.nn.Parameter):
x.requires_grad = True
else:
for p in x.parameters():
p.requires_grad = True
_unlock(groups[-unlocked_groups:])
def init_parameters(self):
# FIXME OpenAI CLIP did not define an init for the VisualTransformer
# TODO experiment if default PyTorch init, below, or alternate init is best.
# nn.init.normal_(self.class_embedding, std=self.scale)
# nn.init.normal_(self.positional_embedding, std=self.scale)
#
# proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
# attn_std = self.transformer.width ** -0.5
# fc_std = (2 * self.transformer.width) ** -0.5
# for block in self.transformer.resblocks:
# nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
# nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
# nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
# nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
#
# if self.text_projection is not None:
# nn.init.normal_(self.text_projection, std=self.scale)
pass
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.transformer.grad_checkpointing = enable
def _global_pool(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
if self.global_average_pool:
return x.mean(dim=1), x
else:
return x[:, 0], x[:, 1:]
def forward(self, x: torch.Tensor):
x = self.conv1(x) # shape = [*, width, grid, grid]
x = x.reshape(x.shape[0], x.shape[1], -1) # shape = [*, width, grid ** 2]
x = x.permute(0, 2, 1) # shape = [*, grid ** 2, width]
x = torch.cat(
[self.class_embedding.to(x.dtype) + torch.zeros(x.shape[0], 1, x.shape[-1], dtype=x.dtype, device=x.device),
x], dim=1) # shape = [*, grid ** 2 + 1, width]
x = x + self.positional_embedding.to(x.dtype)
# a patch_dropout of 0. would mean it is disabled and this function would do nothing but return what was passed in
x = self.patch_dropout(x)
x = self.ln_pre(x)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x)
x = x.permute(1, 0, 2) # LND -> NLD
if self.attn_pool is not None:
x = self.attn_pool(x)
x = self.ln_post(x)
pooled, tokens = self._global_pool(x)
else:
pooled, tokens = self._global_pool(x)
pooled = self.ln_post(pooled)
if self.proj is not None:
pooled = pooled @ self.proj
if self.output_tokens:
return pooled, tokens
return pooled
class TextTransformer(nn.Module):
output_tokens: torch.jit.Final[bool]
def __init__(
self,
context_length: int = 77,
vocab_size: int = 49408,
width: int = 512,
heads: int = 8,
layers: int = 12,
ls_init_value: float = None,
output_dim: int = 512,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
embed_cls: bool = False,
pad_id: int = 0,
output_tokens: bool = False,
):
super().__init__()
self.output_tokens = output_tokens
self.num_pos = self.context_length = context_length
self.vocab_size = vocab_size
self.width = width
self.output_dim = output_dim
self.heads = heads
self.pad_id = pad_id
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
if embed_cls:
self.cls_emb = nn.Parameter(torch.empty(width))
self.num_pos += 1
else:
self.cls_emb = None
self.token_embedding = nn.Embedding(vocab_size, width)
self.positional_embedding = nn.Parameter(torch.empty(self.num_pos, width))
self.transformer = Transformer(
width=width,
layers=layers,
heads=heads,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
self.ln_final = norm_layer(width)
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
self.init_parameters()
def init_parameters(self):
nn.init.normal_(self.token_embedding.weight, std=0.02)
nn.init.normal_(self.positional_embedding, std=0.01)
if self.cls_emb is not None:
nn.init.normal_(self.cls_emb, std=0.01)
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.transformer.grad_checkpointing = enable
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.num_pos, self.num_pos)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def build_cls_mask(self, text, cast_dtype: torch.dtype):
cls_mask = (text != self.pad_id).unsqueeze(1)
cls_mask = F.pad(cls_mask, (1, 0, cls_mask.shape[2], 0), value=1.0)
additive_mask = torch.empty(cls_mask.shape, dtype=cast_dtype, device=cls_mask.device)
additive_mask.fill_(0)
additive_mask.masked_fill_(~cls_mask, float("-inf"))
additive_mask = torch.repeat_interleave(additive_mask, self.heads, 0)
return additive_mask
def _repeat(self, t, N: int):
return t.reshape(1, 1, -1).repeat(N, 1, 1)
def forward(self, text):
cast_dtype = self.transformer.get_cast_dtype()
seq_len = text.shape[1]
x = self.token_embedding(text).to(cast_dtype) # [batch_size, n_ctx, d_model]
attn_mask = self.attn_mask
if self.cls_emb is not None:
seq_len += 1
x = torch.cat([x, self._repeat(self.cls_emb, x.shape[0])], dim=1)
cls_mask = self.build_cls_mask(text, cast_dtype)
attn_mask = attn_mask[None, :seq_len, :seq_len] + cls_mask[:, :seq_len, :seq_len]
x = x + self.positional_embedding[:seq_len].to(cast_dtype)
x = x.permute(1, 0, 2) # NLD -> LND
x = self.transformer(x, attn_mask=attn_mask)
x = x.permute(1, 0, 2) # LND -> NLD
# x.shape = [batch_size, n_ctx, transformer.width]
# take features from the eot embedding (eot_token is the highest number in each sequence)
if self.cls_emb is not None:
pooled, tokens = x[:, -1], x[:, :-1]
pooled = self.ln_final(pooled)
else:
x = self.ln_final(x)
pooled, tokens = x[torch.arange(x.shape[0]), text.argmax(dim=-1)], x
if self.text_projection is not None:
pooled = pooled @ self.text_projection
if self.output_tokens:
return pooled, tokens
return pooled
class MultimodalTransformer(Transformer):
def __init__(
self,
width: int,
layers: int,
heads: int,
context_length: int = 77,
mlp_ratio: float = 4.0,
ls_init_value: float = None,
act_layer: Callable = nn.GELU,
norm_layer: Callable = LayerNorm,
output_dim: int = 512,
):
super().__init__(
width=width,
layers=layers,
heads=heads,
mlp_ratio=mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
)
self.context_length = context_length
self.cross_attn = nn.ModuleList([
ResidualAttentionBlock(
width,
heads,
mlp_ratio,
ls_init_value=ls_init_value,
act_layer=act_layer,
norm_layer=norm_layer,
is_cross_attention=True,
)
for _ in range(layers)
])
self.register_buffer('attn_mask', self.build_attention_mask(), persistent=False)
self.ln_final = norm_layer(width)
self.text_projection = nn.Parameter(torch.empty(width, output_dim))
def init_parameters(self):
proj_std = (self.transformer.width ** -0.5) * ((2 * self.transformer.layers) ** -0.5)
attn_std = self.transformer.width ** -0.5
fc_std = (2 * self.transformer.width) ** -0.5
for block in self.transformer.resblocks:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
for block in self.transformer.cross_attn:
nn.init.normal_(block.attn.in_proj_weight, std=attn_std)
nn.init.normal_(block.attn.out_proj.weight, std=proj_std)
nn.init.normal_(block.mlp.c_fc.weight, std=fc_std)
nn.init.normal_(block.mlp.c_proj.weight, std=proj_std)
if self.text_projection is not None:
nn.init.normal_(self.text_projection, std=self.transformer.width ** -0.5)
def build_attention_mask(self):
# lazily create causal attention mask, with full attention between the tokens
# pytorch uses additive attention mask; fill with -inf
mask = torch.empty(self.context_length, self.context_length)
mask.fill_(float("-inf"))
mask.triu_(1) # zero out the lower diagonal
return mask
def forward(self, image_embs, text_embs):
text_embs = text_embs.permute(1, 0, 2) # NLD -> LNDsq
image_embs = image_embs.permute(1, 0, 2) # NLD -> LND
seq_len = text_embs.shape[0]
for resblock, cross_attn in zip(self.resblocks, self.cross_attn):
if self.grad_checkpointing and not torch.jit.is_scripting():
# TODO: handle kwargs https://github.com/pytorch/pytorch/issues/79887#issuecomment-1161758372
text_embs = checkpoint(resblock, text_embs, None, None, self.attn_mask[:seq_len, :seq_len])
text_embs = checkpoint(cross_attn, text_embs, image_embs, image_embs, None)
else:
text_embs = resblock(text_embs, attn_mask=self.attn_mask[:seq_len, :seq_len])
text_embs = cross_attn(text_embs, k_x=image_embs, v_x=image_embs)
x = text_embs.permute(1, 0, 2) # LND -> NLD
x = self.ln_final(x)
if self.text_projection is not None:
x = x @ self.text_projection
return x
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
self.grad_checkpointing = enable
|
open_clip-main
|
src/open_clip/transformer.py
|
import warnings
from dataclasses import dataclass, asdict
from typing import Any, Dict, Optional, Sequence, Tuple, Union
import torch
import torch.nn as nn
import torchvision.transforms.functional as F
from torchvision.transforms import Normalize, Compose, RandomResizedCrop, InterpolationMode, ToTensor, Resize, \
CenterCrop
from .constants import OPENAI_DATASET_MEAN, OPENAI_DATASET_STD
@dataclass
class AugmentationCfg:
scale: Tuple[float, float] = (0.9, 1.0)
ratio: Optional[Tuple[float, float]] = None
color_jitter: Optional[Union[float, Tuple[float, float, float]]] = None
interpolation: Optional[str] = None
re_prob: Optional[float] = None
re_count: Optional[int] = None
use_timm: bool = False
class ResizeMaxSize(nn.Module):
def __init__(self, max_size, interpolation=InterpolationMode.BICUBIC, fn='max', fill=0):
super().__init__()
if not isinstance(max_size, int):
raise TypeError(f"Size should be int. Got {type(max_size)}")
self.max_size = max_size
self.interpolation = interpolation
self.fn = min if fn == 'min' else min
self.fill = fill
def forward(self, img):
if isinstance(img, torch.Tensor):
height, width = img.shape[:2]
else:
width, height = img.size
scale = self.max_size / float(max(height, width))
if scale != 1.0:
new_size = tuple(round(dim * scale) for dim in (height, width))
img = F.resize(img, new_size, self.interpolation)
pad_h = self.max_size - new_size[0]
pad_w = self.max_size - new_size[1]
img = F.pad(img, padding=[pad_w//2, pad_h//2, pad_w - pad_w//2, pad_h - pad_h//2], fill=self.fill)
return img
def _convert_to_rgb(image):
return image.convert('RGB')
def image_transform(
image_size: int,
is_train: bool,
mean: Optional[Tuple[float, ...]] = None,
std: Optional[Tuple[float, ...]] = None,
resize_longest_max: bool = False,
fill_color: int = 0,
aug_cfg: Optional[Union[Dict[str, Any], AugmentationCfg]] = None,
):
mean = mean or OPENAI_DATASET_MEAN
if not isinstance(mean, (list, tuple)):
mean = (mean,) * 3
std = std or OPENAI_DATASET_STD
if not isinstance(std, (list, tuple)):
std = (std,) * 3
if isinstance(image_size, (list, tuple)) and image_size[0] == image_size[1]:
# for square size, pass size as int so that Resize() uses aspect preserving shortest edge
image_size = image_size[0]
if isinstance(aug_cfg, dict):
aug_cfg = AugmentationCfg(**aug_cfg)
else:
aug_cfg = aug_cfg or AugmentationCfg()
normalize = Normalize(mean=mean, std=std)
if is_train:
aug_cfg_dict = {k: v for k, v in asdict(aug_cfg).items() if v is not None}
use_timm = aug_cfg_dict.pop('use_timm', False)
if use_timm:
from timm.data import create_transform # timm can still be optional
if isinstance(model.visual.image_size, (tuple, list)):
assert len(model.visual.image_size) >= 2
input_size = (3,) + model.visual.image_size[-2:]
else:
input_size = (3, model.visual.image_size, model.visual.image_size)
# by default, timm aug randomly alternates bicubic & bilinear for better robustness at inference time
aug_cfg_dict.setdefault('interpolation', 'random')
aug_cfg_dict.setdefault('color_jitter', None) # disable by default
train_transform = create_transform(
input_size=input_size,
is_training=True,
hflip=0.,
mean=image_mean,
std=image_std,
re_mode='pixel',
**aug_cfg_dict,
)
else:
train_transform = Compose([
RandomResizedCrop(
image_size,
scale=aug_cfg_dict.pop('scale'),
interpolation=InterpolationMode.BICUBIC,
),
_convert_to_rgb,
ToTensor(),
normalize,
])
if aug_cfg_dict:
warnings.warn(f'Unused augmentation cfg items, specify `use_timm` to use ({list(aug_cfg_dict.keys())}).')
return train_transform
else:
if resize_longest_max:
transforms = [
ResizeMaxSize(image_size, fill=fill_color)
]
else:
transforms = [
Resize(image_size, interpolation=InterpolationMode.BICUBIC),
CenterCrop(image_size),
]
transforms.extend([
_convert_to_rgb,
ToTensor(),
normalize,
])
return Compose(transforms)
|
open_clip-main
|
src/open_clip/transform.py
|
""" timm model adapter
Wraps timm (https://github.com/rwightman/pytorch-image-models) models for use as a vision tower in CLIP model.
"""
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
try:
import timm
from timm.models.layers import Mlp, to_2tuple
try:
# old timm imports < 0.8.1
from timm.models.layers.attention_pool2d import RotAttentionPool2d
from timm.models.layers.attention_pool2d import AttentionPool2d as AbsAttentionPool2d
except ImportError:
# new timm imports >= 0.8.1
from timm.layers import RotAttentionPool2d
from timm.layers import AttentionPool2d as AbsAttentionPool2d
except ImportError:
timm = None
from .utils import freeze_batch_norm_2d
class TimmModel(nn.Module):
""" timm model adapter
# FIXME this adapter is a work in progress, may change in ways that break weight compat
"""
def __init__(
self,
model_name,
embed_dim,
image_size=224,
pool='avg',
proj='linear',
proj_bias=False,
drop=0.,
drop_path=None,
pretrained=False,
):
super().__init__()
if timm is None:
raise RuntimeError("Please `pip install timm` to use timm models.")
self.image_size = to_2tuple(image_size)
timm_kwargs = {}
if drop_path is not None:
timm_kwargs['drop_path_rate'] = drop_path
self.trunk = timm.create_model(model_name, pretrained=pretrained, **timm_kwargs)
feat_size = self.trunk.default_cfg.get('pool_size', None)
feature_ndim = 1 if not feat_size else 2
if pool in ('abs_attn', 'rot_attn'):
assert feature_ndim == 2
# if attn pooling used, remove both classifier and default pool
self.trunk.reset_classifier(0, global_pool='')
else:
# reset global pool if pool config set, otherwise leave as network default
reset_kwargs = dict(global_pool=pool) if pool else {}
self.trunk.reset_classifier(0, **reset_kwargs)
prev_chs = self.trunk.num_features
head_layers = OrderedDict()
if pool == 'abs_attn':
head_layers['pool'] = AbsAttentionPool2d(prev_chs, feat_size=feat_size, out_features=embed_dim)
prev_chs = embed_dim
elif pool == 'rot_attn':
head_layers['pool'] = RotAttentionPool2d(prev_chs, out_features=embed_dim)
prev_chs = embed_dim
else:
assert proj, 'projection layer needed if non-attention pooling is used.'
# NOTE attention pool ends with a projection layer, so proj should usually be set to '' if such pooling is used
if proj == 'linear':
head_layers['drop'] = nn.Dropout(drop)
head_layers['proj'] = nn.Linear(prev_chs, embed_dim, bias=proj_bias)
elif proj == 'mlp':
head_layers['mlp'] = Mlp(prev_chs, 2 * embed_dim, embed_dim, drop=(drop, 0), bias=(True, proj_bias))
self.head = nn.Sequential(head_layers)
def lock(self, unlocked_groups=0, freeze_bn_stats=False):
""" lock modules
Args:
unlocked_groups (int): leave last n layer groups unlocked (default: 0)
"""
if not unlocked_groups:
# lock full model
for param in self.trunk.parameters():
param.requires_grad = False
if freeze_bn_stats:
freeze_batch_norm_2d(self.trunk)
else:
# NOTE: partial freeze requires latest timm (master) branch and is subject to change
try:
# FIXME import here until API stable and in an official release
from timm.models.helpers import group_parameters, group_modules
except ImportError:
raise RuntimeError(
'Please install latest timm `pip install git+https://github.com/rwightman/pytorch-image-models`')
matcher = self.trunk.group_matcher()
gparams = group_parameters(self.trunk, matcher)
max_layer_id = max(gparams.keys())
max_layer_id = max_layer_id - unlocked_groups
for group_idx in range(max_layer_id + 1):
group = gparams[group_idx]
for param in group:
self.trunk.get_parameter(param).requires_grad = False
if freeze_bn_stats:
gmodules = group_modules(self.trunk, matcher, reverse=True)
gmodules = {k for k, v in gmodules.items() if v <= max_layer_id}
freeze_batch_norm_2d(self.trunk, gmodules)
@torch.jit.ignore
def set_grad_checkpointing(self, enable=True):
try:
self.trunk.set_grad_checkpointing(enable)
except Exception as e:
logging.warning('grad checkpointing not supported for this timm image tower, continuing without...')
def forward(self, x):
x = self.trunk(x)
x = self.head(x)
return x
|
open_clip-main
|
src/open_clip/timm_model.py
|
import tensorflow as tf
import numpy as np
import pandas as pd
from pyfaidx import Fasta
from functools import partial
from random import randrange
# efficient way for one hot encoding DNA sequence from string
# modified from https://gist.github.com/hannes-brt/54ca5d4094b3d96237fa2e820c0945dd
embed = np.zeros([89, 4], np.float32)
embed[ord('A')] = np.array([1, 0, 0, 0])
embed[ord('C')] = np.array([0, 1, 0, 0])
embed[ord('G')] = np.array([0, 0, 1, 0])
embed[ord('T')] = np.array([0, 0, 0, 1])
embed[ord('a')] = np.array([1, 0, 0, 0])
embed[ord('c')] = np.array([0, 1, 0, 0])
embed[ord('g')] = np.array([0, 0, 1, 0])
embed[ord('t')] = np.array([0, 0, 0, 1])
embed[ord('.')] = np.array([.25, .25, .25, .25])
embedding_table = tf.convert_to_tensor(embed)
def one_hot_encode_seq(dna_input, embed, name = "encode_seq"):
with tf.name_scope(name):
b = bytearray()
b.extend(map(ord, str(dna_input)))
t = tf.convert_to_tensor(b)
t = tf.cast(t, tf.int32)
encoded_dna = tf.nn.embedding_lookup(embedding_table, t)
return encoded_dna
# fetching longer context based on fasta file and pyfaidx
def get_datum(
ind,
fasta_ref,
bed_df,
context_length = None,
rand_shift_range = None
):
row = bed_df.iloc[ind]
chrname, start, end, t = bed_df.iloc[ind].tolist()
interval_length = end - start
chromosome = fasta_ref[chrname]
chromosome_length = len(chromosome)
if rand_shift_range is not None:
min_shift, max_shift = rand_shift_range
adj_min_shift = max(start + min_shift, 0) - start
adj_max_shift = min(end + max_shift, chromosome_length) - end
left_padding = adj_min_shift - min_shift
right_padding = max_shift - adj_max_shift
start += adj_min_shift
end += adj_max_shift
if context_length is None or context_length <= interval_length:
seq = chromosome[start:end]
return one_hot_encode_seq(seq, embed)
left_padding = right_padding = 0
extra_seq = context_length - interval_length
extra_left_seq = extra_seq // 2
extra_right_seq = extra_seq - extra_left_seq
start -= extra_left_seq
end += extra_right_seq
if start < 0:
left_padding = -start
start = 0
if end > chromosome_length:
right_padding = end - chromosome_length
end = chromosome_length
seq = ('.' * left_padding) + str(chromosome[start:end]) + ('.' * right_padding)
return one_hot_encode_seq(seq, embed)
def get_dna_sample(
bed_file,
fasta_file,
filter_type = None,
context_length = None,
rand_shift_range = (-2, 2)
):
df = pd.read_csv(bed_file, sep = '\t', header = None)
if filter_type is not None:
df = df[df[3] == filter_type]
fasta = Fasta(fasta_file, sequence_always_upper = True)
yield_data_fn = partial(get_datum, fasta_ref = fasta, bed_df = df, context_length = context_length, rand_shift_range = rand_shift_range)
def inner():
for ind in range(len(df)):
yield yield_data_fn(ind)
return inner
# main function
if __name__ == '__main__':
generator_fn = get_dna_sample(
bed_file = './human-sequences.bed',
fasta_file = './hg38.ml.fa',
filter_type = 'valid',
context_length = 196_608
)
dataset = tf.data.Dataset.from_generator(generator_fn, tf.float32)
print(next(iter(dataset)).shape)
|
enformer-tensorflow-sonnet-training-script-main
|
sequence.py
|
from itertools import islice
from functools import partial
import tensorflow as tf
# old get_dataset functions, but only returning labels to zip in new longer sequneces
def organism_path(organism):
return os.path.join(f'gs://basenji_barnyard/data', organism)
def get_dataset(organism, subset, num_threads=8):
metadata = get_metadata(organism)
files = tfrecord_files(organism, subset)
dataset = tf.data.TFRecordDataset(files,
compression_type='ZLIB',
num_parallel_reads=None)
dataset = dataset.map(functools.partial(deserialize, metadata=metadata),
num_parallel_calls=num_threads)
return dataset
def get_metadata(organism):
path = os.path.join(organism_path(organism), 'statistics.json')
with tf.io.gfile.GFile(path, 'r') as f:
return json.load(f)
def tfrecord_files(organism, subset):
return sorted(tf.io.gfile.glob(os.path.join(
organism_path(organism), 'tfrecords', f'{subset}-*.tfr'
)), key=lambda x: int(x.split('-')[-1].split('.')[0]))
def deserialize(serialized_example, metadata):
feature_map = {
'sequence': tf.io.FixedLenFeature([], tf.string),
'target': tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_example(serialized_example, feature_map)
target = tf.io.decode_raw(example['target'], tf.float16)
target = tf.reshape(target, (metadata['target_length'], metadata['num_targets']))
target = tf.cast(target, tf.float32)
return target
# tfrecord functions
def chunk(it, size):
it = iter(it)
return iter(lambda: tuple(islice(it, size)), ())
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def parse_single_example(seq, target):
seq = seq.numpy()
target = target.numpy()
data = {
'seq' : _float_feature(seq.flatten()),
'target' : _float_feature(target.flatten()),
}
out = tf.train.Example(features=tf.train.Features(feature=data))
return out
NUM_TRACKS_CONFIG = dict(human = 5313, mouse = 1643)
def map_seq_target(
element,
seq_len,
species, # 'human' or 'mouse'
shifts = None
):
assert species in NUM_TRACKS_CONFIG, f'{species} not found in config'
num_tracks = NUM_TRACKS_CONFIG[species]
num_shifts = 0 if shifts is None else len(list(range(shifts[0], shifts[1] + 1)))
data = {
'seq':tf.io.FixedLenFeature([(seq_len + num_shifts) * 4], tf.float32),
'target':tf.io.FixedLenFeature([896 * num_tracks], tf.float32),
}
content = tf.io.parse_single_example(element, data)
return content
def create_tfrecords(ds, path = './', chunk_size = 256):
for ind, batch in enumerate(chunk(iter(ds), chunk_size)):
writer = tf.io.TFRecordWriter(f'{path}{ind}.tfrecord', 'ZLIB')
for seq, target in batch:
features = parse_single_example(seq, target)
writer.write(features.SerializeToString())
writer.close()
if __name__ == '__main__':
# writing example
generator_fn = get_dna_sample(
bed_file = './human-sequences.bed',
fasta_file = './hg38.ml.fa',
filter_type = 'train',
context_length = 196_608
)
seq_ds = tf.data.Dataset.from_generator(generator_fn, tf.float32)
label_ds = get_dataset('human', 'train')
zipped_ds = tf.data.Dataset.zip((seq_ds, label_ds))
create_tfrecords(zipped_ds, 'gs://enformer-new-data-path/')
# reading
dataset = tf.data.TFRecordDataset(['./0.tfrecord', './1.tfrecord'], compression_type = 'ZLIB')
map_element_fn = partial(map_seq_target, seq_len = 196608, species = 'human', shifts = (-2, 2))
dataset = dataset.map(map_element_fn)
|
enformer-tensorflow-sonnet-training-script-main
|
create_tfrecords.py
|
# Copyright 2021 Calico LLC
# Copyright 2021 DeepMind Technologies Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import os
import glob
import json
import functools
import inspect
from pathlib import Path
import tensorflow as tf
from tqdm import tqdm
import numpy as np
import pandas as pd
from typing import Any, Callable, Dict, Optional, Text, Union, Iterable, List, Sequence
import sonnet as snt
from sonnet.src import base, once, types, utils
from sonnet.src.optimizers import optimizer_utils
import tensorflow as tf
import wandb
# attribute
# Enformer tensorflow code was directly taken and modified for distributed training
# https://github.com/deepmind/deepmind-research/tree/master/enformer
# Genetic augmentation code was taken from
# https://github.com/calico/basenji/blob/84c681a4b02f592a3de90799cee7f17d96f81ef8/basenji/archive/augmentation.py
# constants
NUM_CORES_ENFORCE = 64 # using v3-64
SEQUENCE_LENGTH = 196_608
TARGET_LENGTH = 896
BIN_SIZE = 128
# assert TPUs
tpu = tf.distribute.cluster_resolver.TPUClusterResolver(tpu = 'enformer')
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
tpu_strategy = snt.distribute.TpuReplicator(tpu)
num_cores = tpu_strategy.num_replicas_in_sync
assert num_cores == NUM_CORES_ENFORCE, f'must betraining on {num_cores} cores'
# optimizer
def adam_update(g, alpha, beta_1, beta_2, epsilon, t, m, v):
"""Implements 'Algorithm 1' from :cite:`kingma2014adam`."""
m = beta_1 * m + (1. - beta_1) * g # Biased first moment estimate.
v = beta_2 * v + (1. - beta_2) * g * g # Biased second raw moment estimate.
m_hat = m / (1. - tf.pow(beta_1, t)) # Bias corrected 1st moment estimate.
v_hat = v / (1. - tf.pow(beta_2, t)) # Bias corrected 2nd moment estimate.
update = alpha * m_hat / (tf.sqrt(v_hat) + epsilon)
return update, m, v
# https://github.com/deepmind/sonnet/blob/v2/sonnet/src/optimizers/adam.py
# modified for Adam with decoupled weight decay
class Adam(base.Optimizer):
def __init__(self,
learning_rate: Union[types.FloatLike, tf.Variable] = 0.001,
beta1: Union[types.FloatLike, tf.Variable] = 0.9,
beta2: Union[types.FloatLike, tf.Variable] = 0.999,
epsilon: Union[types.FloatLike, tf.Variable] = 1e-8,
weight_decay: Union[types.FloatLike, tf.Variable] = 1e-4,
name: Optional[str] = None):
super().__init__(name=name)
self.learning_rate = learning_rate
self.beta1 = beta1
self.beta2 = beta2
self.epsilon = epsilon
self.weight_decay = weight_decay
# TODO(petebu): Consider allowing the user to pass in a step.
self.step = tf.Variable(0, trainable=False, name="t", dtype=tf.int64)
self.m = []
self.v = []
@once.once
def _initialize(self, parameters: Sequence[tf.Variable]):
"""First and second order moments are initialized to zero."""
zero_var = lambda p: utils.variable_like(p, trainable=False)
with tf.name_scope("m"):
self.m.extend(zero_var(p) for p in parameters)
with tf.name_scope("v"):
self.v.extend(zero_var(p) for p in parameters)
def apply(self, updates: Sequence[types.ParameterUpdate],
parameters: Sequence[tf.Variable]):
optimizer_utils.check_distribution_strategy()
optimizer_utils.check_updates_parameters(updates, parameters)
self._initialize(parameters)
self.step.assign_add(1)
for update, param, m_var, v_var in zip(updates, parameters, self.m, self.v):
if update is None:
continue
optimizer_utils.check_same_dtype(update, param)
learning_rate = tf.cast(self.learning_rate, update.dtype)
beta_1 = tf.cast(self.beta1, update.dtype)
beta_2 = tf.cast(self.beta2, update.dtype)
epsilon = tf.cast(self.epsilon, update.dtype)
step = tf.cast(self.step, update.dtype)
update, m, v = adam_update(
g=update, alpha=learning_rate, beta_1=beta_1, beta_2=beta_2,
epsilon=epsilon, t=step, m=m_var, v=v_var)
# decoupled weight decay
# hack for now to exclude biases
weight_decay_update = (param * self.weight_decay * learning_rate) if 'w:0' in param.name else tf.zeros_like(param)
param.assign_sub(update)
param.assign_sub(weight_decay_update)
m_var.assign(m)
v_var.assign(v)
# classes
class MultiheadAttention(snt.Module):
"""Multi-head attention."""
def __init__(self,
value_size: int,
key_size: int,
num_heads: int,
scaling: bool = True,
attention_dropout_rate: float = 0.1,
relative_positions: bool = False,
relative_position_symmetric: bool = False,
relative_position_functions: Optional[List[str]] = None,
num_relative_position_features: Optional[int] = None,
positional_dropout_rate: float = 0.1,
zero_initialize: bool = True,
initializer: Optional[snt.initializers.Initializer] = None,
name: str = None):
"""Creates a MultiheadAttention module.
Args:.prefetch(2)
value_size: The size of each value embedding per head.
key_size: The size of each key and query embedding per head.
num_heads: The number of independent queries per timestep.
scaling: Whether to scale the attention logits.
attention_dropout_rate: Dropout rate for attention logits.
relative_positions: Whether to use TransformerXL style relative attention.
relative_position_symmetric: If True, the symmetric version of basis
functions will be used. If False, a symmetric and asymmetric versions
will be use.
relative_position_functions: List of function names used for relative
positional biases.
num_relative_position_features: Number of relative positional features
to compute. If None, `value_size * num_heads` is used.
positional_dropout_rate: Dropout rate for the positional encodings if
relative positions are used.
zero_initialize: if True, the final linear layer will be 0 initialized.
initializer: Initializer for the projection layers. If unspecified,
VarianceScaling is used with scale = 2.0.
name: Name of module.
"""
super().__init__(name=name)
self._value_size = value_size
self._key_size = key_size
self._num_heads = num_heads
self._attention_dropout_rate = attention_dropout_rate
self._scaling = scaling
self._relative_positions = relative_positions
self._relative_position_symmetric = relative_position_symmetric
self._relative_position_functions = relative_position_functions
if num_relative_position_features is None:
# num_relative_position_features needs to be divisible by the number of
# relative positional functions *2 (for symmetric & asymmetric version).
divisible_by = 2 * len(self._relative_position_functions)
self._num_relative_position_features = (
(self._value_size // divisible_by) * divisible_by)
else:
self._num_relative_position_features = num_relative_position_features
self._positional_dropout_rate = positional_dropout_rate
self._initializer = initializer
if self._initializer is None:
self._initializer = snt.initializers.VarianceScaling(scale=2.0)
key_proj_size = self._key_size * self._num_heads
embedding_size = self._value_size * self._num_heads
self._q_layer = snt.Linear(
key_proj_size,
name='q_layer',
with_bias=False,
w_init=self._initializer)
self._k_layer = snt.Linear(
key_proj_size,
name='k_layer',
with_bias=False,
w_init=self._initializer)
self._v_layer = snt.Linear(
embedding_size,
name='v_layer',
with_bias=False,
w_init=self._initializer)
w_init = snt.initializers.Constant(1e-8) if zero_initialize else self._initializer
self._embedding_layer = snt.Linear(
embedding_size,
name='embedding_layer',
w_init=w_init,
b_init= snt.initializers.Constant(1e-8))
# Create additional layers if using relative positions.
if self._relative_positions:
self._r_k_layer = snt.Linear(
key_proj_size,
name='r_k_layer',
with_bias=False,
w_init=self._initializer)
self._r_w_bias = tf.Variable(
self._initializer([1, self._num_heads, 1, self._key_size],
dtype=tf.float32),
name='r_w_bias')
self._r_r_bias = tf.Variable(
self._initializer([1, self._num_heads, 1, self._key_size],
dtype=tf.float32),
name='r_r_bias')
def _multihead_output(self, linear, inputs):
"""Applies a standard linear to inputs and returns multihead output."""
output = snt.BatchApply(linear)(inputs) # [B, T, H * KV]
num_kv_channels = output.shape[-1] // self._num_heads
# Split H * Channels into separate axes.
output = snt.reshape(output,
output_shape=[-1, self._num_heads, num_kv_channels])
# [B, T, H, KV] -> [B, H, T, KV]
return tf.transpose(output, [0, 2, 1, 3])
def __call__(self,
inputs,
is_training=False):
# Initialise the projection layers.
embedding_size = self._value_size * self._num_heads
seq_len = inputs.shape[1]
# Compute q, k and v as multi-headed projections of the inputs.
q = self._multihead_output(self._q_layer, inputs) # [B, H, T, K]
k = self._multihead_output(self._k_layer, inputs) # [B, H, T, K]
v = self._multihead_output(self._v_layer, inputs) # [B, H, T, V]
# Scale the query by the square-root of key size.
if self._scaling:
q *= self._key_size**-0.5
if self._relative_positions:
# For relative positions, we project positions to form relative keys.
distances = tf.range(-seq_len + 1, seq_len, dtype=tf.float32)[tf.newaxis]
positional_encodings = positional_features_all(
positions=distances,
feature_size=self._num_relative_position_features,
seq_length=seq_len,
feature_functions=self._relative_position_functions,
symmetric=self._relative_position_symmetric)
# [1, 2T-1, Cr]
if is_training:
positional_encodings = tf.nn.dropout(
positional_encodings, rate=self._positional_dropout_rate)
# [1, H, 2T-1, K]
r_k = self._multihead_output(self._r_k_layer, positional_encodings)
# Add shifted relative logits to content logits.
# [B, H, T', T]
content_logits = tf.matmul(q + self._r_w_bias, k, transpose_b=True)
# [B, H, T', 2T-1]
relative_logits = tf.matmul(
q + self._r_r_bias, r_k, transpose_b=True)
# [B, H, T', T]
relative_logits = relative_shift(relative_logits)
logits = content_logits + relative_logits
else:
# [B, H, T', T]
logits = tf.matmul(q, k, transpose_b=True)
weights = tf.nn.softmax(logits)
# Dropout on the attention weights.
if is_training:
weights = tf.nn.dropout(weights, rate=self._attention_dropout_rate)
# Transpose and reshape the output.
output = tf.matmul(weights, v) # [B, H, T', V]
output_transpose = tf.transpose(output, [0, 2, 1, 3]) # [B, T', H, V]
# Final linear layer.
attended_inputs = snt.reshape(
output_transpose, output_shape=[embedding_size], preserve_dims=2)
output = self._embedding_layer(attended_inputs)
return output
def relative_shift(x):
"""Shift the relative logits like in TransformerXL."""
# We prepend zeros on the final timescale dimension.
to_pad = tf.zeros_like(x[..., :1])
x = tf.concat([to_pad, x], -1)
_, num_heads, t1, t2 = x.shape
x = tf.reshape(x, [-1, num_heads, t2, t1])
x = tf.slice(x, [0, 0, 1, 0], [-1, -1, -1, -1])
x = tf.reshape(x, [-1, num_heads, t1, t2 - 1])
x = tf.slice(x, [0, 0, 0, 0], [-1, -1, -1, (t2 + 1) // 2])
return x
# Available feature functions:
def get_positional_feature_function(name):
"""Returns positional feature functions."""
available = {
'positional_features_exponential': positional_features_exponential,
'positional_features_central_mask': positional_features_central_mask,
'positional_features_gamma': positional_features_gamma
}
if name not in available:
raise ValueError(f'Function {name} not available in {available.keys()}')
return available[name]
def positional_features_all(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
feature_functions: Optional[List[str]] = None,
symmetric=False):
"""Compute relative positional encodings/features.
Each positional feature function will compute/provide the same fraction of
features, making up the total of feature_size.
Args:
positions: Tensor of relative positions of arbitrary shape.
feature_size: Total number of basis functions.
seq_length: Sequence length denoting the characteristic length that
the individual positional features can use. This is required since the
parametrization of the input features should be independent of `positions`
while it could still require to use the total number of features.
bin_size: Bin sized used to partition the sequence. This can be used to
compute features on the absolute scale relative to the genome.
feature_functions: List of different feature functions to use. Each function
will take as argument: positions, sequence length and number of features
to compute.
symmetric: If True, the resulting features will be symmetric across the
relative position of 0 (i.e. only absolute value of positions will
matter). If false, then both the symmetric and asymmetric version
(symmetric multiplied by sign(positions)) of the features will be used.
Returns:
Tensor of shape: `positions.shape + (feature_size,)`.
"""
if feature_functions is None:
feature_functions = ['positional_features_exponential',
'positional_features_central_mask',
'positional_features_gamma']
num_components = len(feature_functions) # 1 per each basis function
if not symmetric:
num_components = 2 * num_components
# For now, we do not allow odd sized embeddings.
if feature_size % num_components != 0:
raise ValueError(
f'feature_size has to be divisible by {num_components}')
feature_functions = [get_positional_feature_function(f)
for f in feature_functions]
num_basis_per_class = feature_size // num_components
embeddings = tf.concat([f(tf.abs(positions), num_basis_per_class,
seq_length, bin_size)
for f in feature_functions],
axis=-1)
if not symmetric:
embeddings = tf.concat([embeddings,
tf.sign(positions)[..., tf.newaxis] * embeddings],
axis=-1)
tf.TensorShape(embeddings.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return embeddings
def _prepend_dims(x, num_dims):
return tf.reshape(x, shape=[1] * num_dims + x.shape)
def positional_features_exponential(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
min_half_life: Optional[float] = 3.0):
"""Create exponentially decaying positional weights.
Args:
positions: Position tensor (arbitrary shape).
feature_size: Number of basis functions to use.
seq_length: Sequence length.
bin_size: (unused). See `positional_features_all`.
min_half_life: Smallest exponential half life in the grid of half lives.
Returns:
A Tensor with shape [2 * seq_length - 1, feature_size].
"""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
# Grid of half lifes from [3, seq_length / 2] with feature_size
# distributed on the log scale.
seq_length = tf.cast(seq_length, dtype=tf.float32)
max_range = tf.math.log(seq_length) / tf.math.log(2.0)
half_life = tf.pow(2.0, tf.linspace(min_half_life, max_range, feature_size))
half_life = _prepend_dims(half_life, positions.shape.rank)
positions = tf.abs(positions)
outputs = tf.exp(-tf.math.log(2.0) / half_life * positions[..., tf.newaxis])
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def positional_features_central_mask(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None):
"""Positional features using a central mask (allow only central features)."""
del seq_length # Unused.
del bin_size # Unused.
center_widths = tf.pow(2.0, tf.range(1, feature_size + 1, dtype=tf.float32))
center_widths = center_widths - 1
center_widths = _prepend_dims(center_widths, positions.shape.rank)
outputs = tf.cast(center_widths > tf.abs(positions)[..., tf.newaxis],
tf.float32)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
def gamma_pdf(x, concentration, rate):
"""Gamma probability distribution function: p(x|concentration, rate)."""
log_unnormalized_prob = tf.math.xlogy(concentration - 1., x) - rate * x
log_normalization = (tf.math.lgamma(concentration) -
concentration * tf.math.log(rate))
return tf.exp(log_unnormalized_prob - log_normalization)
def positional_features_gamma(positions: tf.Tensor,
feature_size: int,
seq_length: Optional[int] = None,
bin_size: Optional[int] = None,
stddev=None,
start_mean=None):
"""Positional features computed using the gamma distributions."""
del bin_size # Unused.
if seq_length is None:
seq_length = tf.reduce_max(tf.abs(positions)) + 1
if stddev is None:
stddev = seq_length / (2 * feature_size)
if start_mean is None:
start_mean = seq_length / feature_size
mean = tf.linspace(start_mean, seq_length, num=feature_size)
mean = _prepend_dims(mean, positions.shape.rank)
concentration = (mean / stddev)**2
rate = mean / stddev**2
probabilities = gamma_pdf(
tf.abs(tf.cast(positions, dtype=tf.float32))[..., tf.newaxis],
concentration, rate)
probabilities += 1e-8 # To ensure numerical stability.
outputs = probabilities / tf.reduce_max(probabilities)
tf.TensorShape(outputs.shape).assert_is_compatible_with(
positions.shape + [feature_size])
return outputs
class Enformer(snt.Module):
"""Main model."""
def __init__(self,
channels: int = 1536,
num_transformer_layers: int = 11,
num_heads: int = 8,
pooling_type: str = 'attention',
use_convnext: bool = False,
name: str = 'enformer'):
"""Enformer model.
Args:
channels: Number of convolutional filters and the overall 'width' of the
model.
num_transformer_layers: Number of transformer layers.
num_heads: Number of attention heads.
pooling_type: Which pooling function to use. Options: 'attention' or max'.
name: Name of sonnet module.
"""
super().__init__(name=name)
# pylint: disable=g-complex-comprehension,g-long-lambda,cell-var-from-loop
heads_channels = {'human': 5313, 'mouse': 1643}
dropout_rate = 0.4
assert channels % num_heads == 0, ('channels needs to be divisible '
f'by {num_heads}')
whole_attention_kwargs = {
'attention_dropout_rate': 0.05,
'initializer': None,
'key_size': 64,
'num_heads': num_heads,
'num_relative_position_features': channels // num_heads,
'positional_dropout_rate': 0.01,
'relative_position_functions': [
'positional_features_exponential',
'positional_features_central_mask',
'positional_features_gamma'
],
'relative_positions': True,
'scaling': True,
'value_size': channels // num_heads,
'zero_initialize': True
}
trunk_name_scope = tf.name_scope('trunk')
trunk_name_scope.__enter__()
from sonnet.src import moving_averages
# lambda is used in Sequential to construct the module under tf.name_scope.
def conv_block(filters, width=1, w_init=None, name='conv_block', **kwargs):
with tf.name_scope(name or "batch_norm"):
moving_mean = moving_averages.ExponentialMovingAverage(
0.9, name="moving_mean")
moving_variance = moving_averages.ExponentialMovingAverage(
0.9, name="moving_variance")
return Sequential(lambda: [
snt.distribute.CrossReplicaBatchNorm(create_scale=True,
create_offset=True,
moving_mean = moving_mean,
moving_variance = moving_variance,
scale_init=snt.initializers.Ones()),
gelu,
snt.Conv1D(filters, width, w_init=w_init, **kwargs)
], name=name)
def convnext_block(filters, width=1, mult = 4, ds_conv_kernel_size = 7, w_init=None, name='convnext_block', **kwargs):
return Sequential(lambda: [
ExpandDims(2),
snt.DepthwiseConv2D((ds_conv_kernel_size, 1), name ='convnext_ds_conv'),
Squeeze(2),
snt.LayerNorm(axis=-1, create_scale=True, create_offset=True),
snt.Linear(filters * mult, name='convnext_project_in'),
tf.nn.relu,
snt.Linear(filters, name='convnext_project_out')
], name=name)
conv_block_fn = convnext_block if use_convnext else conv_block
stem = Sequential(lambda: [
snt.Conv1D(channels // 2, 15),
Residual(conv_block(channels // 2, 1, name='pointwise_conv_block')),
pooling_module(pooling_type, pool_size=2),
], name='stem')
filter_list = exponential_linspace_int(start=channels // 2, end=channels,
num=6, divisible_by=128)
conv_tower = Sequential(lambda: [
Sequential(lambda: [
conv_block(num_filters, 5),
Residual(conv_block(num_filters, 1, name='pointwise_conv_block')),
pooling_module(pooling_type, pool_size=2),
],
name=f'conv_tower_block_{i}')
for i, num_filters in enumerate(filter_list)], name='conv_tower')
# Transformer.
def transformer_mlp():
return Sequential(lambda: [
snt.LayerNorm(axis=-1, create_scale=True, create_offset=True),
snt.Linear(channels * 2, name = 'project_in'),
snt.Dropout(dropout_rate),
tf.nn.relu,
snt.Linear(channels, name = 'project_out'),
snt.Dropout(dropout_rate)], name='mlp')
transformer = Sequential(lambda: [
Sequential(lambda: [
Residual(Sequential(lambda: [
snt.LayerNorm(axis=-1,
create_scale=True, create_offset=True,
scale_init=snt.initializers.Ones()),
MultiheadAttention(**whole_attention_kwargs,
name=f'attention_{i}'),
snt.Dropout(dropout_rate),
], name='mha')),
Residual(transformer_mlp())], name=f'transformer_block_{i}')
for i in range(num_transformer_layers)], name='transformer')
crop_final = TargetLengthCrop1D(TARGET_LENGTH, name='target_input')
final_pointwise = Sequential(lambda: [
conv_block(channels * 2, 1),
snt.Dropout(dropout_rate / 8),
gelu], name='final_pointwise')
self._trunk = Sequential([stem,
conv_tower,
transformer,
crop_final,
final_pointwise],
name='trunk')
trunk_name_scope.__exit__(None, None, None)
with tf.name_scope('heads'):
self._heads = {
head: Sequential(
lambda: [snt.Linear(num_channels), tf.nn.softplus],
name=f'head_{head}')
for head, num_channels in heads_channels.items()
}
# pylint: enable=g-complex-comprehension,g-long-lambda,cell-var-from-loop
@property
def trunk(self):
return self._trunk
@property
def heads(self):
return self._heads
def __call__(self, inputs: tf.Tensor,
is_training: bool) -> Dict[str, tf.Tensor]:
trunk_embedding = self.trunk(inputs, is_training=is_training)
return {
head: head_module(trunk_embedding, is_training=is_training)
for head, head_module in self.heads.items()
}
@tf.function(input_signature=[
tf.TensorSpec([None, SEQUENCE_LENGTH, 4], tf.float32)])
def predict_on_batch(self, x):
"""Method for SavedModel."""
return self(x, is_training=False)
class TargetLengthCrop1D(snt.Module):
"""Crop sequence to match the desired target length."""
def __init__(self, target_length: int, name='target_length_crop'):
super().__init__(name=name)
self._target_length = target_length
def __call__(self, inputs):
trim = (inputs.shape[-2] - self._target_length) // 2
if trim < 0:
raise ValueError('inputs longer than target length')
return inputs[..., trim:-trim, :]
class ExpandDims(snt.Module):
def __init__(self, dim: int, name='expand_dims'):
super().__init__(name=name)
self._dim = dim
def __call__(self, inputs):
return tf.expand_dims(inputs, self._dim)
class Squeeze(snt.Module):
def __init__(self, dim: int, name='squeeze'):
super().__init__(name=name)
self._dim = dim
def __call__(self, inputs):
return tf.squeeze(inputs, self._dim)
class Sequential(snt.Module):
"""snt.Sequential automatically passing is_training where it exists."""
def __init__(self,
layers: Optional[Union[Callable[[], Iterable[snt.Module]],
Iterable[Callable[..., Any]]]] = None,
name: Optional[Text] = None):
super().__init__(name=name)
if layers is None:
self._layers = []
else:
# layers wrapped in a lambda function to have a common namespace.
if hasattr(layers, '__call__'):
with tf.name_scope(name):
layers = layers()
self._layers = [layer for layer in layers if layer is not None]
def __call__(self, inputs: tf.Tensor, is_training: bool, **kwargs):
outputs = inputs
for _, mod in enumerate(self._layers):
if accepts_is_training(mod):
outputs = mod(outputs, is_training=is_training, **kwargs)
else:
outputs = mod(outputs, **kwargs)
return outputs
def pooling_module(kind, pool_size):
"""Pooling module wrapper."""
if kind == 'attention':
return SoftmaxPooling1D(pool_size=pool_size, per_channel=True,
w_init_scale=2.0)
elif kind == 'max':
return tf.keras.layers.MaxPool1D(pool_size=pool_size, padding='same')
else:
raise ValueError(f'Invalid pooling kind: {kind}.')
class SoftmaxPooling1D(snt.Module):
"""Pooling operation with optional weights."""
def __init__(self,
pool_size: int = 2,
per_channel: bool = False,
w_init_scale: float = 0.0,
name: str = 'softmax_pooling'):
"""Softmax pooling.
Args:
pool_size: Pooling size, same as in Max/AvgPooling.
per_channel: If True, the logits/softmax weights will be computed for
each channel separately. If False, same weights will be used across all
channels.
w_init_scale: When 0.0 is equivalent to avg pooling, and when
~2.0 and `per_channel=False` it's equivalent to max pooling.
name: Module name.
"""
super().__init__(name=name)
self._pool_size = pool_size
self._per_channel = per_channel
self._w_init_scale = w_init_scale
self._logit_linear = None
@snt.once
def _initialize(self, num_features):
self._logit_linear = snt.Linear(
output_size=num_features if self._per_channel else 1,
with_bias=False, # Softmax is agnostic to shifts.
w_init=snt.initializers.Identity(self._w_init_scale))
def __call__(self, inputs):
_, length, num_features = inputs.shape
self._initialize(num_features)
inputs = tf.reshape(
inputs,
(-1, length // self._pool_size, self._pool_size, num_features))
return tf.reduce_sum(
inputs * tf.nn.softmax(self._logit_linear(inputs), axis=-2),
axis=-2)
class Residual(snt.Module):
"""Residual block."""
def __init__(self, module: snt.Module, name='residual'):
super().__init__(name=name)
self._module = module
def __call__(self, inputs: tf.Tensor, is_training: bool, *args,
**kwargs) -> tf.Tensor:
return inputs + self._module(inputs, is_training, *args, **kwargs)
def gelu(x: tf.Tensor) -> tf.Tensor:
"""Applies the Gaussian error linear unit (GELU) activation function.
Using approximiation in section 2 of the original paper:
https://arxiv.org/abs/1606.08415
Args:
x: Input tensor to apply gelu activation.
Returns:
Tensor with gelu activation applied to it.
"""
return tf.nn.sigmoid(1.702 * x) * x
def one_hot_encode(sequence: str,
alphabet: str = 'ACGT',
neutral_alphabet: str = 'N',
neutral_value: Any = 0,
dtype=np.float32) -> np.ndarray:
"""One-hot encode sequence."""
def to_uint8(string):
return np.frombuffer(string.encode('ascii'), dtype=np.uint8)
hash_table = np.zeros((np.iinfo(np.uint8).max, len(alphabet)), dtype=dtype)
hash_table[to_uint8(alphabet)] = np.eye(len(alphabet), dtype=dtype)
hash_table[to_uint8(neutral_alphabet)] = neutral_value
hash_table = hash_table.astype(dtype)
return hash_table[to_uint8(sequence)]
def exponential_linspace_int(start, end, num, divisible_by=1):
"""Exponentially increasing values of integers."""
def _round(x):
return int(np.round(x / divisible_by) * divisible_by)
base = np.exp(np.log(end / start) / (num - 1))
return [_round(start * base**i) for i in range(num)]
def accepts_is_training(module):
return 'is_training' in list(inspect.signature(module.__call__).parameters)
# data related functions
# @title `get_targets(organism)`
def get_targets(organism):
targets_txt = f'https://raw.githubusercontent.com/calico/basenji/master/manuscripts/cross2020/targets_{organism}.txt'
return pd.read_csv(targets_txt, sep='\t')
# @title `get_dataset(organism, subset, num_threads=8)`
def reverse_complement_transform(seq):
"""Reverse complement of batched onehot seq and corresponding label and na."""
# reverse complement sequence
seq_rc = tf.gather(seq, [3, 2, 1, 0], axis=-1)
seq_rc = tf.reverse(seq_rc, axis=[0])
return seq_rc
def shift_sequence(seq, shift_amount, pad_value=0.25):
"""Shift a sequence left or right by shift_amount.
Args:
seq: a [batch_size, sequence_length, sequence_depth] sequence to shift
shift_amount: the signed amount to shift (tf.int32 or int)
pad_value: value to fill the padding (primitive or scalar tf.Tensor)
"""
input_shape = seq.shape
pad = pad_value * tf.ones_like(seq[0:tf.abs(shift_amount), :])
def _shift_right(_seq):
sliced_seq = _seq[:-shift_amount:, :]
return tf.concat([pad, sliced_seq], axis=0)
def _shift_left(_seq):
sliced_seq = _seq[-shift_amount:, :]
return tf.concat([sliced_seq, pad], axis=0)
output = tf.cond(
tf.greater(shift_amount, 0), lambda: _shift_right(seq),
lambda: _shift_left(seq))
output.set_shape(input_shape)
return output
def augment_stochastic_shifts(seq, augment_shifts):
"""Apply a stochastic shift augmentation.
Args:
seq: input sequence of size [batch_size, length, depth]
augment_shifts: list of int offsets to sample from
Returns:
shifted and padded sequence of size [batch_size, length, depth]
"""
shift_index = tf.random.uniform(shape=[], minval=0,
maxval=len(augment_shifts), dtype=tf.int64)
shift_value = tf.gather(tf.constant(augment_shifts), shift_index)
seq = tf.cond(tf.not_equal(shift_value, 0),
lambda: shift_sequence(seq, shift_value),
lambda: seq)
return seq
def augment_stochastic_shifts_map_fn(datum):
augment_shifts = [-2, -1, 0, 1, 2]
return dict(
sequence = augment_stochastic_shifts(datum['sequence'], augment_shifts),
target = datum['target']
)
def augment_stochastic_rc_map_fn(datum):
sequence, target = (datum['sequence'], datum['target'])
augment = tf.random.uniform(shape=[]) > 0.5
sequence, target = tf.cond(augment, lambda: (sequence[::-1, ::-1], target[::-1, :]),
lambda: (sequence, target))
return dict(sequence = sequence, target = target)
def organism_path(organism):
return os.path.join(f'gs://basenji_barnyard/data', organism)
def get_dataset(organism, subset, num_threads=8, shuffle=True, rotate = 0, augment = False):
metadata = get_metadata(organism)
files = tfrecord_files(organism, subset)
files = files[rotate:] + files[:rotate]
dataset = tf.data.TFRecordDataset(files,
compression_type='ZLIB',
num_parallel_reads=num_threads)
if shuffle:
dataset = dataset.repeat()
dataset = dataset.shuffle(5000, seed = 42)
dataset = dataset.map(functools.partial(deserialize, metadata=metadata),
num_parallel_calls=num_threads)
if augment:
dataset = dataset.map(augment_stochastic_shifts_map_fn, num_parallel_calls=num_threads)
dataset = dataset.map(augment_stochastic_rc_map_fn, num_parallel_calls=num_threads)
return dataset
def get_metadata(organism):
# Keys:
# num_targets, train_seqs, valid_seqs, test_seqs, seq_length,
# pool_width, crop_bp, target_length
path = os.path.join(organism_path(organism), 'statistics.json')
with tf.io.gfile.GFile(path, 'r') as f:
return json.load(f)
def tfrecord_files(organism, subset):
# Sort the values by int(*).
return sorted(tf.io.gfile.glob(os.path.join(
organism_path(organism), 'tfrecords', f'{subset}-*.tfr'
)), key=lambda x: int(x.split('-')[-1].split('.')[0]))
def deserialize(serialized_example, metadata):
"""Deserialize bytes stored in TFRecordFile."""
feature_map = {
'sequence': tf.io.FixedLenFeature([], tf.string),
'target': tf.io.FixedLenFeature([], tf.string),
}
example = tf.io.parse_example(serialized_example, feature_map)
sequence = tf.io.decode_raw(example['sequence'], tf.bool)
sequence = tf.reshape(sequence, (metadata['seq_length'], 4))
sequence = tf.cast(sequence, tf.float32)
target = tf.io.decode_raw(example['target'], tf.float16)
target = tf.reshape(target,
(metadata['target_length'], metadata['num_targets']))
target = tf.cast(target, tf.float32)
return {'sequence': sequence,
'target': target}
# new get_dataset, for sequences that are actually 196_608
NEW_TFRECORD_LOCATIONS = dict(
human = dict(
train = 'gs://enformer-human-train/',
valid = 'gs://enformer-human-valid/'
),
mouse = dict(
train = 'gs://enformer-mouse-train/',
valid = 'gs://enformer-mouse-valid/'
)
)
NUM_TRACKS_CONFIG = dict(human = 5313, mouse = 1643)
def new_dataset_map_seq_target(
element,
seq_len,
species, # 'human' or 'mouse'
target_length = 896,
shifts = None,
augment_rc = False
):
assert species in NUM_TRACKS_CONFIG, f'{species} not found in config'
num_tracks = NUM_TRACKS_CONFIG[species]
num_shifts = 0 if shifts is None else len(list(range(shifts[0], shifts[1] + 1)))
data = {
'seq': tf.io.FixedLenFeature([(seq_len + num_shifts) * 4], tf.float32),
'target': tf.io.FixedLenFeature([target_length * num_tracks], tf.float32),
}
content = tf.io.parse_single_example(element, data)
content['sequence'] = content.pop('seq')
content['sequence'] = tf.reshape(content['sequence'], (-1, 4))
content['target'] = tf.reshape(content['target'], (target_length, -1))
# take care of shift augmentation
shifts = tf.pad(tf.random.uniform(shape = [1], minval = 0, maxval = num_shifts, dtype = tf.int64), [[0, 1]])
content['sequence'] = tf.slice(content['sequence'], shifts, (seq_len, -1))
if augment_rc:
content = augment_stochastic_rc_map_fn(content)
content['sequence'].set_shape(tf.TensorShape([seq_len, 4]))
content['target'].set_shape(tf.TensorShape([target_length, num_tracks]))
return content
def get_dataset_new(
organism,
datatype,
shifts = (-2, 2),
augment_rc = False,
num_threads = 8
):
gcs_path = NEW_TFRECORD_LOCATIONS[organism][datatype]
files = sorted(tf.io.gfile.glob(f'{gcs_path}*.tfrecord'))
dataset = tf.data.TFRecordDataset(files, compression_type = 'ZLIB', num_parallel_reads = num_threads)
map_element_fn = partial(new_dataset_map_seq_target, seq_len = SEQUENCE_LENGTH, species = organism, shifts = shifts, augment_rc = augment_rc)
dataset = dataset.map(map_element_fn)
return dataset
# training related functions
def corr_coef(x, y, eps = 0):
x2 = tf.math.square(x)
y2 = tf.math.square(y)
xy = x * y
ex = tf.reduce_mean(x, axis = 1)
ey = tf.reduce_mean(y, axis = 1)
exy = tf.reduce_mean(xy, axis = 1)
ex2 = tf.reduce_mean(x2, axis = 1)
ey2 = tf.reduce_mean(y2, axis = 1)
r = (exy - ex * ey) / ((tf.math.sqrt(ex2 - tf.math.square(ex) + eps) * tf.math.sqrt(ey2 - tf.math.square(ey) + eps)) + eps)
return tf.reduce_mean(r, axis = -1)
def create_eval_step(model, head):
@tf.function
def predict(seq, target):
pred = model(seq, is_training=False)[head]
return corr_coef(pred, target)
return predict
def create_step_function(model, optimizer, head, clip_grad_norm = 1.0, weight_decay = 0.0001):
@tf.function
def train_step(batch_seq, batch_target):
with tf.GradientTape() as tape:
with snt.mixed_precision.scope(tf.float16):
outputs = model(batch_seq, is_training=True)[head]
corr_coef_loss = 1 - corr_coef(outputs, batch_target, eps = 1e-8)
poisson = tf.reduce_mean(
tf.keras.losses.poisson(batch_target, outputs))
loss = poisson
gradients = tape.gradient(loss, model.trainable_variables, unconnected_gradients=tf.UnconnectedGradients.ZERO)
gradients = [tf.clip_by_norm(grad, clip_grad_norm) for grad in gradients]
ctx = tf.distribute.get_replica_context()
gradients = ctx.all_reduce("mean", gradients)
optimizer.apply(gradients, model.trainable_variables)
return loss
return train_step
# instantiate model and training / eval functions
with tpu_strategy.scope():
model = Enformer(channels=1536,
num_heads=8,
num_transformer_layers=11)
learning_rate = tf.Variable(0., trainable=False, name='learning_rate')
optimizer = snt.optimizers.Adam(learning_rate=learning_rate)
train_step_human = create_step_function(model, optimizer, 'human')
train_step_mouse = create_step_function(model, optimizer, 'mouse')
eval_step_human = create_eval_step(model, 'human')
eval_step_mouse = create_eval_step(model, 'mouse')
# experiment tracker
wandb.init(project='enformer')
wandb.run.save()
# Train the model
num_steps = int(2e6)
num_warmup_steps = 5000
target_learning_rate = 5e-4
checkpoint_every = 2500
max_eval_steps = 25
eval_every = 500
# Step variables
global_step = tf.Variable(0, name='global_step', trainable=False)
# checkpointing
checkpoint_root = "gs://enformer/"
checkpoint_name = "enformer"
save_prefix = os.path.join(checkpoint_root, checkpoint_name)
checkpoint = tf.train.Checkpoint(module = model, step = global_step, optimizer = optimizer)
# load latest checkpoint if possible
latest = tf.train.latest_checkpoint(checkpoint_root)
if latest is not None:
checkpoint.restore(latest)
@tf.function
def step():
global_step.assign(global_step + 1)
batch_human, batch_mouse = next(data_it)
loss_human = tpu_strategy.run(train_step_human, args = (batch_human['sequence'], batch_human['target']))
loss_mouse = tpu_strategy.run(train_step_mouse, args = (batch_mouse['sequence'], batch_mouse['target']))
loss_human = tpu_strategy.reduce('mean', loss_human, axis = None)
loss_mouse = tpu_strategy.reduce('mean', loss_mouse, axis = None)
learning_rate_frac = tf.math.minimum(1.0, tf.cast(global_step, tf.float32) / tf.math.maximum(1.0, float(num_warmup_steps)))
learning_rate.assign(target_learning_rate * learning_rate_frac)
return loss_human, loss_mouse
@tf.function
def eval_step():
batch_human = next(valid_human_data_it)
batch_mouse = next(valid_mouse_data_it)
human_r = tpu_strategy.run(eval_step_human, args = (batch_human['sequence'], batch_human['target']))
mouse_r = tpu_strategy.run(eval_step_mouse, args = (batch_mouse['sequence'], batch_mouse['target']))
human_r = tpu_strategy.reduce('mean', human_r, axis = 0)
mouse_r = tpu_strategy.reduce('mean', mouse_r, axis = 0)
return human_r, mouse_r
i = global_step.numpy()
total_mice = 114 * 256 + 111
total_human = 132 * 256 + 229
bucket_size = 256
num_seen = i * num_cores
human_file_skip = (num_seen % total_human) // bucket_size
mouse_file_skip = (num_seen % total_mice) // bucket_size
human_dataset = get_dataset('human', 'train', rotate = human_file_skip).batch(num_cores, drop_remainder = True)
mouse_dataset = get_dataset('mouse', 'train', rotate = mouse_file_skip).batch(num_cores, drop_remainder = True)
human_mouse_dataset = tf.data.Dataset.zip((human_dataset, mouse_dataset)).prefetch(2)
human_valid_dataset = get_dataset('human', 'valid', shuffle = False).repeat().batch(num_cores)
mouse_valid_dataset = get_dataset('mouse', 'valid', shuffle = False).repeat().batch(num_cores)
data_it = iter(tpu_strategy.experimental_distribute_dataset(human_mouse_dataset))
valid_human_data_it = iter(tpu_strategy.experimental_distribute_dataset(human_valid_dataset))
valid_mouse_data_it = iter(tpu_strategy.experimental_distribute_dataset(mouse_valid_dataset))
print(f'starting from {i}')
while i < num_steps:
print(f'processing step {i}')
loss_human, loss_mouse = step()
loss_human = loss_human.numpy()
loss_mouse = loss_mouse.numpy()
learning_rate_numpy = learning_rate.numpy()
print(f'completed step {i}')
log = {
'loss_human': loss_human,
'loss_mouse': loss_mouse,
'learning_rate': learning_rate_numpy
}
if i and not i % eval_every:
print('evaluating')
human_pearson_r, mouse_pearson_r = eval_step()
human_pearson_r = human_pearson_r.numpy()
mouse_pearson_r = mouse_pearson_r.numpy()
log = {
**log,
'human_pearson_r': human_pearson_r,
'mouse_pearson_r': mouse_pearson_r
}
wandb.log(log, step = i)
if not i % checkpoint_every:
print('checkpointing')
checkpoint.save(save_prefix)
i += 1
|
enformer-tensorflow-sonnet-training-script-main
|
train.py
|
from setuptools import setup, find_packages
setup(
name = 'recurrent-memory-transformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.5.5',
license='MIT',
description = 'Recurrent Memory Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/recurrent-memory-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'recurrence',
'memory',
'long-context'
],
install_requires=[
'einops>=0.6.1',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
recurrent-memory-transformer-pytorch-main
|
setup.py
|
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from recurrent_memory_transformer_pytorch import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 250
GENERATE_LENGTH = 2048
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# instantiate palm
model = RecurrentMemoryTransformer(
num_tokens = 256,
dim = 512,
depth = 6,
dim_head = 64,
heads = 8,
seq_len = 512,
use_flash_attn = True,
num_memory_tokens = 128,
use_xl_memories = True,
xl_mem_len = 256
)
model = RecurrentMemoryTransformerWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
total_loss = 0.
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(
next(train_loader),
memory_replay_backprop = True,
mrbp_loss_weight = 1. / GRADIENT_ACCUMULATE_EVERY
)
total_loss += loss
print(f"training loss: {total_loss.item()}")
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss, _ = model(next(val_loader), return_loss = True)
print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
print(f"%s \n\n %s", (prime, "*" * 100))
sample = model.generate(inp[None, :], length = GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
print(output_str, "\n")
|
recurrent-memory-transformer-pytorch-main
|
train.py
|
from recurrent_memory_transformer_pytorch.recurrent_memory_transformer import RecurrentMemoryTransformer, RecurrentMemoryTransformerWrapper
|
recurrent-memory-transformer-pytorch-main
|
recurrent_memory_transformer_pytorch/__init__.py
|
from collections import namedtuple
from functools import wraps
from packaging import version
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# helpers
def exists(val):
return val is not None
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
# main class
class Attend(nn.Module):
def __init__(
self,
dropout = 0.,
causal = False,
use_flash = False
):
super().__init__()
self.dropout = dropout
self.attn_dropout = nn.Dropout(dropout)
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash = use_flash
assert not (use_flash and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
if exists(mask):
if mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask.expand(-1, heads, q_len, -1)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = mask,
dropout_p = self.dropout if self.training else 0.,
is_causal = self.causal
)
return out
def forward(self, q, k, v, mask = None):
"""
einstein notation
b - batch
h - heads
n, i, j - sequence length (base sequence length, source, target)
d - feature dimension
"""
n, device = q.shape[-2], q.device
scale = q.shape[-1] ** -0.5
if self.use_flash:
return self.flash_attn(q, k, v, mask = mask)
# similarity
sim = einsum("b h i d, b h j d -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
if mask.ndim != 4:
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
causal_mask = self.get_mask(n, device)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
attn = self.attn_dropout(attn)
# aggregate values
out = einsum("b h i j, b h j d -> b h i d", attn, v)
return out
|
recurrent-memory-transformer-pytorch-main
|
recurrent_memory_transformer_pytorch/attend.py
|
import math
from functools import partial
from itertools import zip_longest
from contextlib import nullcontext
from typing import Optional, List, Tuple
import torch
import torch.nn.functional as F
from torch import nn, einsum, Tensor
from einops import rearrange, repeat, pack, unpack
from recurrent_memory_transformer_pytorch.attend import Attend
# constants
Linear = partial(nn.Linear, bias = False)
# helpers
def exists(val):
return val is not None
def identity(t, *args, **kwargs):
return t
def default(*vals):
for val in vals:
if exists(val):
return val
return None
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def divisible_by(numer, denom):
return (numer % denom) == 0
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
def token_shift_fn(t, ps):
read_mem, t, write_mem = unpack(t, ps, 'b * d')
t, t_shift = t.chunk(2, dim = -1)
t_shift = F.pad(t_shift, (0, 0, 1, -1), value = 0.)
t = torch.cat((t, t_shift), dim = -1)
return torch.cat((read_mem, t, write_mem), dim = -2)
def frac_gradient(t, frac = 1.):
if frac == 1.:
return t
return t * frac + t.detach() * (1. - frac)
# rotary embedding
class RotaryEmbedding(nn.Module):
def __init__(self, dim, theta = 32768):
super().__init__()
inv_freq = 1. / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer('inv_freq', inv_freq)
def forward(self, positions):
freqs = torch.einsum('i , j -> i j', positions, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
return freqs
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(pos, t):
return (t * pos.cos()) + (rotate_half(t) * pos.sin())
# norms
class RMSNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.scale * self.gamma
# feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return x * F.gelu(gate)
def FeedForward(dim, mult = 4):
dim_inner = int(dim * mult * 2 / 3)
return nn.Sequential(
Linear(dim, dim_inner * 2, bias = False),
GEGLU(),
RMSNorm(dim_inner),
Linear(dim_inner, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
*,
dim,
causal = False,
dim_head = 64,
heads = 8,
dropout = 0.,
use_flash_attn = False,
use_custom_causal_attn_mask = False
):
super().__init__()
dim_inner = dim_head * heads
self.heads = heads
self.attend = Attend(
causal = causal and not use_custom_causal_attn_mask,
dropout = dropout,
use_flash = use_flash_attn
)
self.null_kv = nn.Parameter(torch.randn(2, heads, dim_head))
self.to_q = Linear(dim, dim_inner)
self.to_kv = Linear(dim, dim_inner * 2)
self.to_out = Linear(dim_inner, dim)
def forward(
self,
x,
rotary_emb: Optional[Tuple[Tensor, Tensor]] = None,
mask = None,
xl_memories = None
):
h = self.heads
q = self.to_q(x)
k, v = self.to_kv(x).chunk(2, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
# add a null key / value
# to protect against an entirely masked out sequence
# as well as giving attention ability to attend to nothing
nk, nv = map(lambda t: repeat(t, 'h d -> b h 1 d', b = x.shape[0]), self.null_kv)
k = torch.cat((nk, k), dim = -2)
v = torch.cat((nv, v), dim = -2)
if exists(mask):
mask = F.pad(mask, (1, 0), value = True)
# manage memories
next_xl_memories = torch.stack((k, v))
if exists(xl_memories):
kx, vx = xl_memories
k = torch.cat((kx, k), dim = -2)
v = torch.cat((vx, v), dim = -2)
if exists(mask):
mask = F.pad(mask, (xl_memories.shape[-2], 0), value = True)
if exists(rotary_emb):
q_rotary_emb, k_rotary_emb = rotary_emb
q = apply_rotary_pos_emb(q_rotary_emb, q)
k = apply_rotary_pos_emb(k_rotary_emb, k)
out = self.attend(q, k, v, mask = mask)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out), next_xl_memories
# transformer
class RecurrentMemoryTransformer(nn.Module):
def __init__(
self,
dim,
*,
num_tokens,
depth,
num_memory_tokens,
seq_len,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
use_flash_attn = False,
ignore_index = -1,
abs_pos_emb = True,
rotary_pos_emb = False,
token_shift = True,
use_xl_memories = True,
xl_mem_len = None,
enhanced_xl_recurrence = False, # add simple method for enhancing receptive field of xl memories, from ernie-doc paper
emb_gradient_frac = 0.1, # trick from cogview paper that leads to a bit more stability
memory_not_causal = True, # flash attention behaves a bit more optimally if causal mask is not explicitly passed in - but if the memories perform better without a causal mask, it is necessary to have this turned on
add_write_to_next_write_mem = False, # add the write memories of previous step to the next write step - thanks to @IcarusWizard for pointing out this discrepancy
next_write_mem_stop_grad = True, # whether to stop gradient of previous read memory -> next write memory
always_have_read_memories = True, # whether to always have read memories, even on the first step, so to make the model onnx-able
resi_dual_scale = 1., # in the case of overflows in fp16 on the prenorm branch, set this to a value less than 1.
):
super().__init__()
self.causal = causal
self.seq_len = seq_len
self.emb_gradient_frac = emb_gradient_frac
assert 0 < resi_dual_scale <= 1., 'resiDual scale must be between 0 and 1'
self.resi_dual_scale = resi_dual_scale
assert num_memory_tokens > 0
self.token_emb = nn.Embedding(num_tokens, dim)
# positions
assert any([abs_pos_emb, rotary_pos_emb, token_shift])
self.pos_emb = nn.Embedding(seq_len, dim) if abs_pos_emb else None
self.rotary_pos_emb = RotaryEmbedding(dim_head) if rotary_pos_emb else None
self.maybe_token_shift = token_shift_fn if token_shift else identity
# memory related
self.num_memory_tokens = num_memory_tokens
self.read_memory_emb = nn.Parameter(torch.zeros(num_memory_tokens, dim))
nn.init.normal_(self.read_memory_emb, std = 0.02)
self.memory_tokens = nn.Parameter(torch.randn(num_memory_tokens, dim))
nn.init.normal_(self.memory_tokens, std = 0.02)
# xl memories
xl_mem_len = default(xl_mem_len, seq_len)
assert xl_mem_len <= seq_len
self.xl_mem_len = xl_mem_len
self.use_xl_memories = use_xl_memories
self.enhanced_xl_recurrence = enhanced_xl_recurrence
# layers
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
Attention(
dim = dim,
dim_head = dim_head,
causal = causal,
heads = heads,
use_flash_attn = use_flash_attn,
use_custom_causal_attn_mask = memory_not_causal
),
RMSNorm(dim),
FeedForward(dim = dim, mult = ff_mult),
RMSNorm(dim)
]))
self.norm = RMSNorm(dim)
self.to_logits = nn.Linear(dim, num_tokens)
self.ignore_index = ignore_index
# whether to use custom attention mask if causal and memory should not be causal
self.use_custom_causal_attn_mask = causal and memory_not_causal
# in the paper, they actually also use the previous write memories for the next write memories
self.add_write_to_next_write_mem = add_write_to_next_write_mem
self.next_write_mem_stop_grad = next_write_mem_stop_grad
# allow for attending to raw read memory positional embeddings on first step
# hack to make it onnx-able and should not hurt
self.always_have_read_memories = always_have_read_memories
def init_memory(self, batch):
return repeat(self.memory_tokens, 'm d -> b m d', b = batch)
def forward(
self,
x,
read_memories = None,
*,
mask = None,
labels = None,
xl_memories: Optional[List[Tensor]] = None,
mask_out_read_memories = False # in the case one is passing in 0s for read memories, for onnx-able model
):
has_xl_memories = exists(xl_memories) and len(xl_memories) > 0
b, n, device, mem_length, return_loss = *x.shape, x.device, self.num_memory_tokens, exists(labels)
assert n <= self.seq_len
pos = torch.arange(n, device = device)
x = self.token_emb(x)
# maybe absolute positional embedding
if exists(self.pos_emb):
x = x + self.pos_emb(pos)
# trick from cogview paper
x = frac_gradient(x, self.emb_gradient_frac)
# prepare write memories, as in paper
write_memories = self.init_memory(b)
if exists(read_memories) and self.add_write_to_next_write_mem:
maybe_detach = torch.detach if self.next_write_mem_stop_grad else identity
write_memories = write_memories + maybe_detach(read_memories)
# prepare read memories
if exists(read_memories):
if read_memories.ndim == 2:
read_memories = repeat(read_memories, 'n d -> b n d', b = b)
read_mem_length = mem_length
read_memories = read_memories + self.read_memory_emb
elif self.always_have_read_memories:
read_mem_length = mem_length
read_memories = repeat(self.read_memory_emb, 'n d -> b n d', b = b)
else:
read_mem_length = 0
read_memories = x[:, 0:0]
# concat to main sequence using einop's pack
x, ps = pack([read_memories, x, write_memories], 'b * d')
# take care of mask
if exists(mask):
mask = F.pad(mask, (read_mem_length, mem_length), value = True)
# custom causal mask, if needed
if self.use_custom_causal_attn_mask:
causal_mask = torch.ones((n, n), device = device, dtype = torch.bool).tril()
causal_mask = F.pad(causal_mask, (0, mem_length, read_mem_length, 0), value = False)
causal_mask = F.pad(causal_mask, (read_mem_length, 0, 0, mem_length), value = True)
causal_mask = rearrange(causal_mask, 'i j -> 1 1 i j')
if exists(mask):
mask = rearrange(mask, 'b j -> b 1 1 j')
mask = mask & causal_mask
else:
mask = causal_mask
# masking out read memories, either for passing in 0s for read memories on first step, or if you are doing some regularization game on the memories
if read_mem_length > 0 and mask_out_read_memories:
read_mem_mask = torch.arange(x.shape[-2], device = device) < read_mem_length
if exists(mask):
mask = mask & ~read_mem_mask
else:
mask = read_mem_mask
# rotary embedding - offset main positions by 10000, and keep all memories at position 0
rotary_emb = None
if exists(self.rotary_pos_emb):
mem_rel_dist = 10000
q_pos = pos + mem_rel_dist
if has_xl_memories:
xl_mem_length = xl_memories[0].shape[-2]
q_pos += xl_mem_length
q_pos = F.pad(q_pos, (read_mem_length, mem_length), value = 0)
q_rotary_emb = self.rotary_pos_emb(q_pos)
# kind of confusing at the moment
# but the order of the keys are - [xl memories] [read memories] [main sequence] [ write memories]
# so the positions are (say xl memory length of 256) - [10001, 10002, 10003 ...] [0, 0, ...] [10256, 10257, ...] [0, 0, ...]
if has_xl_memories:
k_pos = torch.arange(xl_mem_length, device = device) + mem_rel_dist
k_pos = torch.cat((k_pos, q_pos), dim = -1)
else:
k_pos = q_pos
# account for null key / value
k_pos = F.pad(k_pos, (1, 0), value = mem_rel_dist - 1) # give a null memory token, to allow for attending to nothing
k_rotary_emb = self.rotary_pos_emb(k_pos)
rotary_emb = (q_rotary_emb, k_rotary_emb)
# maybe token shift function
shift_fn = partial(self.maybe_token_shift, ps = ps)
# prepare xl memories
xl_memories = default(xl_memories, [])
xl_memories_iter = iter(xl_memories)
new_xl_memories = []
if has_xl_memories and self.enhanced_xl_recurrence and len(xl_memories) > 1: # simply shift all the xl memories down by one, so lower layer gets access to representations from layer above
xl_memories = [*xl_memories[1:], xl_memories[0]]
# attention and feedforward
residual = x * self.resi_dual_scale
for attn, attn_post_norm, ff, ff_post_norm in self.layers:
attn_out, xl_memories = attn(shift_fn(x), mask = mask, xl_memories = next(xl_memories_iter, None), rotary_emb = rotary_emb)
new_xl_memories.append(xl_memories)
x = attn_post_norm(x + attn_out)
residual = residual + attn_out * self.resi_dual_scale
ff_out = ff(shift_fn(x))
x = ff_post_norm(x + ff_out)
residual = residual + ff_out * self.resi_dual_scale
# whether to return xl memories
next_xl_memories = None
if self.use_xl_memories:
next_xl_memories = list(map(lambda t: torch.detach(t[..., -self.xl_mem_len:, :]), new_xl_memories))
# add final norm of residual, as in resiDual paper
x = x + self.norm(residual)
# split out memories using unpack
read_memories, x, write_memories = unpack(x, ps, 'b * d')
# to logits
logits = self.to_logits(x)
if not return_loss:
return logits, write_memories, next_xl_memories
loss = F.cross_entropy(
rearrange(logits, 'b n c -> b c n'),
labels,
ignore_index = self.ignore_index
)
return loss, write_memories, next_xl_memories
# wrapper to manage many segments
class RecurrentMemoryTransformerWrapper(nn.Module):
def __init__(
self,
transformer: RecurrentMemoryTransformer,
truncate_at_step = None # number of steps before detaching memories (truncated bptt). with memory replay checkpointing, there should be no memory issues, but in case of instability, as reported in initial paper
):
super().__init__()
self.transformer = transformer
self.seq_len = transformer.seq_len
self.truncate_at_step = truncate_at_step
@torch.no_grad()
@eval_decorator
def generate(
self,
prime,
*,
length,
memories = None,
xl_memories: Optional[List[Tensor]] = None,
temperature = 1.,
filter_thres = 0.9
):
assert self.transformer.causal, 'only autoregressive transformers can generate'
start_len, seq_len = prime.shape[-1], self.seq_len
assert length >= start_len
*past_segments, curr_segment = prime.split(seq_len, dim = -1)
# catch memories up to the current segment
for past_segment in past_segments:
_, memories, xl_memories = self.transformer(past_segment, memories, xl_memories = xl_memories)
# sample for the remaining length
for ind in range(length - start_len):
logits, next_memories, next_xl_memories = self.transformer(curr_segment, memories, xl_memories = xl_memories)
logits = logits[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature)
sampled = rearrange(sampled, 'b -> b 1')
curr_segment = torch.cat((curr_segment, sampled), dim = -1)
if divisible_by(curr_segment.shape[-1] - 1, seq_len):
memories = next_memories
xl_memories = next_xl_memories
past_segment, curr_segment = curr_segment[..., :seq_len], curr_segment[..., -1:]
past_segments.append(past_segment)
# add current segment to all segments
past_segments.append(curr_segment)
# reconcat all segments
output = torch.cat(past_segments, dim = -1)
output = output[:, start_len:]
return output
def forward(
self,
x,
memories = None,
*,
mask = None,
xl_memories: Optional[List[Tensor]] = None,
return_loss = False,
labels = None,
truncate_at_step = None, # if set, this would override the truncate_at_step at init
memory_replay_backprop = False, # whether to have the class do the backwards pass memory efficiently
mrbp_loss_weight = 1. # if using memory replay backprop with gradient accumulation, scale loss by this factor ex. (1. / <num grad accum steps>)
):
seq_len, truncate_at_step = self.seq_len, default(truncate_at_step, self.truncate_at_step)
labels = None
if (return_loss or memory_replay_backprop) and not exists(labels):
x, labels = x[:, :-1], x[:, 1:]
# segment input
segments = x.split(seq_len, dim = -1)
total_length = x.shape[-1]
num_segments = len(segments)
segment_length_frac = tuple(map(lambda t: t.shape[-1] / total_length, segments))
# default values
label_segments = mask_segments = (None,)
# take care of labels
if exists(labels):
label_segments = labels.split(seq_len, dim = -1)
# take care of the mask
if exists(mask):
mask_segments = mask.split(seq_len, dim = -1)
# keep replay buffer
replay_buffer = [memories]
# replay buffer for xl memories
xl_segments = [xl_memories]
# decide context of forward depending on whether doing memory-replay-backprop
forward_context = nullcontext if not memory_replay_backprop else torch.no_grad
# forward and get all outputs (can be either loss or logits)
logits = []
losses = []
for step, (segment, mask_segment, label_segment, loss_weight) in enumerate(zip_longest(segments, mask_segments, label_segments, segment_length_frac)):
with forward_context():
output, memories, xl_memories = self.transformer(segment, memories, mask = mask_segment, labels = label_segment)
if exists(truncate_at_step) and divisible_by(step + 1, truncate_at_step):
memories = memories.detach()
replay_buffer.append(memories)
xl_segments.append(xl_memories)
if return_loss:
losses.append(output * loss_weight)
else:
logits.append(output)
# whether to do memory replay backpropagation
# https://arxiv.org/abs/2010.06891
# algorithm 1
if memory_replay_backprop:
memories_grad = torch.zeros_like(replay_buffer[-1])
reversed_inputs = zip_longest(*map(reversed, [
range(num_segments),
segments,
replay_buffer[:-1],
xl_segments[:-1],
mask_segments,
label_segments,
segment_length_frac,
]))
total_loss = 0.
for step, segment, segment_memories, segment_xl_memories, mask_segment, label_segment, loss_weight in reversed_inputs:
is_first = step == 0
if exists(segment_memories):
segment_memories.requires_grad_()
loss, next_segment_memories, _ = self.transformer(segment, segment_memories, mask = mask_segment, xl_memories = segment_xl_memories, labels = label_segment)
weighted_loss = loss * loss_weight * mrbp_loss_weight
weighted_loss.backward(retain_graph = True)
next_segment_memories.backward(memories_grad)
total_loss += weighted_loss
if is_first:
continue
if exists(truncate_at_step) and divisible_by(step, truncate_at_step):
memories_grad.zero_()
else:
memories_grad.copy_(segment_memories.grad.data)
return total_loss
# return logits if needed
if not return_loss:
logits = torch.cat(logits, dim = -2)
return logits, memories
# otherwise return losses
return sum(losses), memories
|
recurrent-memory-transformer-pytorch-main
|
recurrent_memory_transformer_pytorch/recurrent_memory_transformer.py
|
from setuptools import setup, find_packages
setup(
name = 'panoptic-transformer',
packages = find_packages(exclude=[]),
version = '0.0.1',
license='MIT',
description = 'Panoptic Transformer',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/panoptic-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention-mechanism',
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
panoptic-transformer-main
|
setup.py
|
import torch
from torch import nn, einsum
from einops import rearrange
import torch.nn.functional as F
class Attention(nn.Module):
def __init__(
self,
dim,
*,
dim_head = 64,
heads = 8
):
super().__init__()
inner_dim = heads * dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim, bias = False)
def forward(self, x):
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
q = rearrange(q, 'b n (h d) -> b h n d', h = self.heads)
q = q * self.scale
sim = einsum('b h i d, b j d -> b h i j', q, k)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b j d -> b h i d', attn , v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class PanopticTransformer(nn.Module):
def __init__(
self,
dim,
dim_head = 64,
heads = 8
):
super().__init__()
def forward(self, x):
return x
|
panoptic-transformer-main
|
panoptic_transformer/panoptic_transformer.py
|
from panoptic_transformer.panoptic_transformer import PanopticTransformer
|
panoptic-transformer-main
|
panoptic_transformer/__init__.py
|
from pathlib import Path
from random import choice
from PIL import Image
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.utils.data import random_split
from torchvision import transforms as T
# helper functions
def cycle(dl):
while True:
for el in dl:
yield el
# pathfinder dataset
class PathfinderXDataset(Dataset):
def __init__(
self,
folder,
augment = False
):
super().__init__()
metadata_files = [*Path(folder).glob(f'**/*.npy')]
assert len(metadata_files) > 0, 'not able to find more than 1 metadata file'
metadata_file = metadata_files[0]
metadata = np.load(str(metadata_file))
root_path = metadata_file.parents[1]
self.augment = augment
self.data = [(str(root_path / m[0] / m[1]), int(m[3])) for m in metadata]
def __len__(self):
return len(self.data)
def __getitem__(self, ind):
path, label = self.data[ind]
img = Image.open(path)
img = T.Compose([
T.RandomHorizontalFlip() if self.augment else nn.Identity(),
T.RandomVerticalFlip() if self.augment else nn.Identity(),
T.PILToTensor()
])(img)
label = torch.tensor(label, dtype = torch.float32)
if self.augment:
rand_rotate = [0, 90, 180, 270]
img = T.functional.rotate(img, choice(rand_rotate))
rand_padding = [(0, 0, 0, 0), (1, -1, 0, 0), (-1, 1, 0, 0), (0, 0, 1, -1), (0, 0, -1, 1)]
img = F.pad(img, choice(rand_padding))
return img.float(), label
# get training and validation dataloader functions
def get_dataloaders(
data_path,
*,
augment = True,
frac_valids = 0.05,
batch_size
):
ds = PathfinderXDataset(data_path, augment = augment)
total_samples = len(ds)
num_valid = int(frac_valids * total_samples)
num_train = total_samples - num_valid
print(f'training with {num_train} samples and validating with {num_valid} samples')
train_ds, valid_ds = random_split(ds, [num_train, num_valid])
train_dl = DataLoader(train_ds, batch_size = batch_size, shuffle = True)
valid_dl = DataLoader(valid_ds, batch_size = batch_size, shuffle = True)
return cycle(train_dl), cycle(valid_dl)
|
panoptic-transformer-main
|
panoptic_transformer/data.py
|
# taken from https://github.com/drewlinsley/pathfinder/blob/master/snakes2_wrapper.py
# but modified with path-x specific settings
import time
import sys
import numpy as np
import os
import snakes2
class Args:
def __init__(self,
contour_path = './contour', batch_id=0, n_images = 200000,
window_size=[256,256], padding=22, antialias_scale = 4,
LABEL =1, seed_distance= 27, marker_radius = 3,
contour_length=15, distractor_length=5, num_distractor_snakes=6, snake_contrast_list=[1.], use_single_paddles=True,
max_target_contour_retrial = 4, max_distractor_contour_retrial = 4, max_paddle_retrial=2,
continuity = 1.4, paddle_length=5, paddle_thickness=1.5, paddle_margin_list=[4], paddle_contrast_list=[1.],
pause_display=False, save_images=True, save_metadata=True):
self.contour_path = contour_path
self.batch_id = batch_id
self.n_images = n_images
self.window_size = window_size
self.padding = padding
self.antialias_scale = antialias_scale
self.LABEL = LABEL
self.seed_distance = seed_distance
self.marker_radius = marker_radius
self.contour_length = contour_length
self.distractor_length = distractor_length
self.num_distractor_snakes = num_distractor_snakes
self.snake_contrast_list = snake_contrast_list
self.use_single_paddles = use_single_paddles
self.max_target_contour_retrial = max_target_contour_retrial
self.max_distractor_contour_retrial = max_distractor_contour_retrial
self.max_paddle_retrial = max_paddle_retrial
self.continuity = continuity
self.paddle_length = paddle_length
self.paddle_thickness = paddle_thickness
self.paddle_margin_list = paddle_margin_list # if multiple elements in a list, a number will be sampled in each IMAGE
self.paddle_contrast_list = paddle_contrast_list # if multiple elements in a list, a number will be sampled in each PADDLE
self.pause_display = pause_display
self.save_images = save_images
self.save_metadata = save_metadata
t = time.time()
args = Args()
num_machines = int(sys.argv[1])
current_id = int(sys.argv[2])
args.batch_id = current_id
total_images = int(sys.argv[3])
args.n_images = total_images/num_machines
dataset_root = './pathx-data' #'/media/data_cifs/pathfinder_seg/'
if len(sys.argv)==4:
print('Using default path...')
elif len(sys.argv)==5:
print('Using custom save path...')
dataset_root = str(sys.argv[4])
args.padding = 1
args.antialias_scale = 4
args.paddle_margin_list = [2,3]
args.seed_distance = 20
args.window_size = [128,128]
args.marker_radius = 3
args.contour_length = 14
args.paddle_thickness = 1.5
args.antialias_scale = 2
args.continuity = 1.8 # from 1.8 to 0.8, with steps of 66%
args.distractor_length = args.contour_length // 3
args.num_distractor_snakes = 35 / args.distractor_length
args.snake_contrast_list = [0.9]
args.use_single_paddles = False
args.segmentation_task = False # False
args.segmentation_task_double_circle = False
dataset_subpath = 'curv_baseline'
args.contour_path = os.path.join(dataset_root, dataset_subpath)
snakes2.from_wrapper(args)
|
panoptic-transformer-main
|
scripts/gen-pathx.py
|
# standard imports
import os
import sys
import pickle
# non-standard imports
import numpy as np
from sklearn import svm
from sqlite3 import dbapi2 as sqlite3
# local imports
from utils import safe_pickle_dump, strip_version, Config
num_recommendations = 500 # papers to recommend per user
# -----------------------------------------------------------------------------
if not os.path.isfile(Config.database_path):
print("the database file as.db should exist. You can create an empty database with sqlite3 as.db < schema.sql")
sys.exit()
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = sqldb.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
# -----------------------------------------------------------------------------
# fetch all users
users = query_db('''select * from user''')
print('number of users: ', len(users))
# load the tfidf matrix and meta
meta = pickle.load(open(Config.meta_path, 'rb'))
out = pickle.load(open(Config.tfidf_path, 'rb'))
X = out['X']
X = X.todense()
xtoi = { strip_version(x):i for x,i in meta['ptoi'].items() }
user_sim = {}
for ii,u in enumerate(users):
print("%d/%d building an SVM for %s" % (ii, len(users), u['username'].encode('utf-8')))
uid = u['user_id']
lib = query_db('''select * from library where user_id = ?''', [uid])
pids = [x['paper_id'] for x in lib] # raw pids without version
posix = [xtoi[p] for p in pids if p in xtoi]
if not posix:
continue # empty library for this user maybe?
print(pids)
y = np.zeros(X.shape[0])
for ix in posix: y[ix] = 1
clf = svm.LinearSVC(class_weight='balanced', verbose=False, max_iter=10000, tol=1e-6, C=0.1)
clf.fit(X,y)
s = clf.decision_function(X)
sortix = np.argsort(-s)
sortix = sortix[:min(num_recommendations, len(sortix))] # crop paper recommendations to save space
user_sim[uid] = [strip_version(meta['pids'][ix]) for ix in list(sortix)]
print('writing', Config.user_sim_path)
safe_pickle_dump(user_sim, Config.user_sim_path)
|
arxiv-sanity-preserver-master
|
buildsvm.py
|
"""
Very simple script that simply iterates over all files data/pdf/f.pdf
and create a file data/txt/f.pdf.txt that contains the raw text, extracted
using the "pdftotext" command. If a pdf cannot be converted, this
script will not produce the output file.
"""
import os
import sys
import time
import shutil
import pickle
from utils import Config
# make sure pdftotext is installed
if not shutil.which('pdftotext'): # needs Python 3.3+
print('ERROR: you don\'t have pdftotext installed. Install it first before calling this script')
sys.exit()
if not os.path.exists(Config.txt_dir):
print('creating ', Config.txt_dir)
os.makedirs(Config.txt_dir)
have = set(os.listdir(Config.txt_dir))
files = os.listdir(Config.pdf_dir)
for i,f in enumerate(files): # there was a ,start=1 here that I removed, can't remember why it would be there. shouldn't be, i think.
txt_basename = f + '.txt'
if txt_basename in have:
print('%d/%d skipping %s, already exists.' % (i, len(files), txt_basename, ))
continue
pdf_path = os.path.join(Config.pdf_dir, f)
txt_path = os.path.join(Config.txt_dir, txt_basename)
cmd = "pdftotext %s %s" % (pdf_path, txt_path)
os.system(cmd)
print('%d/%d %s' % (i, len(files), cmd))
# check output was made
if not os.path.isfile(txt_path):
# there was an error with converting the pdf
print('there was a problem with parsing %s to text, creating an empty text file.' % (pdf_path, ))
os.system('touch ' + txt_path) # create empty file, but it's a record of having tried to convert
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
arxiv-sanity-preserver-master
|
parse_pdf_to_text.py
|
import os
import json
import time
import pickle
import dateutil.parser
import argparse
from random import shuffle
import numpy as np
from sqlite3 import dbapi2 as sqlite3
from hashlib import md5
from flask import Flask, request, session, url_for, redirect, \
render_template, abort, g, flash, _app_ctx_stack
from flask_limiter import Limiter
from werkzeug import check_password_hash, generate_password_hash
from utils import safe_pickle_dump, strip_version, isvalidid, Config
# various globals
# -----------------------------------------------------------------------------
# database configuration
if os.path.isfile('secret_key.txt'):
SECRET_KEY = open('secret_key.txt', 'r').read()
else:
SECRET_KEY = 'devkey, should be in a file'
app = Flask(__name__)
app.config.from_object(__name__)
limiter = Limiter(app, global_limits=["100 per hour", "20 per minute"])
SEARCH_DICT = {}
# -----------------------------------------------------------------------------
# utilities for database interactions
# -----------------------------------------------------------------------------
# to initialize the database: sqlite3 as.db < schema.sql
def connect_db():
sqlite_db = sqlite3.connect(Config.database_path)
sqlite_db.row_factory = sqlite3.Row # to return dicts rather than tuples
return sqlite_db
def query_db(query, args=(), one=False):
"""Queries the database and returns a list of dictionaries."""
cur = g.db.execute(query, args)
rv = cur.fetchall()
return (rv[0] if rv else None) if one else rv
def get_user_id(username):
"""Convenience method to look up the id for a username."""
rv = query_db('select user_id from user where username = ?',
[username], one=True)
return rv[0] if rv else None
def get_username(user_id):
"""Convenience method to look up the username for a user."""
rv = query_db('select username from user where user_id = ?',
[user_id], one=True)
return rv[0] if rv else None
# -----------------------------------------------------------------------------
# connection handlers
# -----------------------------------------------------------------------------
@app.before_request
def before_request():
# this will always request database connection, even if we dont end up using it ;\
g.db = connect_db()
# retrieve user object from the database if user_id is set
g.user = None
if 'user_id' in session:
g.user = query_db('select * from user where user_id = ?',
[session['user_id']], one=True)
@app.teardown_request
def teardown_request(exception):
db = getattr(g, 'db', None)
if db is not None:
db.close()
# -----------------------------------------------------------------------------
# search/sort functionality
# -----------------------------------------------------------------------------
def date_sort():
scores = []
for pid,p in db.items():
timestruct = dateutil.parser.parse(p['updated'])
p['time_updated'] = int(timestruct.strftime("%s")) # store in struct for future convenience
timestruct = dateutil.parser.parse(p['published'])
p['time_published'] = int(timestruct.strftime("%s")) # store in struct for future convenience
scores.append((p['time_updated'], p))
scores.sort(reverse=True, key=lambda x: x[0])
out = [sp[1] for sp in scores]
return out
def papers_search(qraw):
qparts = qraw.lower().strip().split() # split by spaces
# use reverse index and accumulate scores
scores = []
for pid,p in db.items():
score = sum(SEARCH_DICT[pid].get(q,0) for q in qparts)
if score == 0:
continue # no match whatsoever, dont include
# give a small boost to more recent papers
score += 0.0001*p['tscore']
scores.append((score, p))
scores.sort(reverse=True, key=lambda x: x[0]) # descending
out = [x[1] for x in scores if x[0] > 0]
return out
def papers_similar(pid):
rawpid = strip_version(pid)
# check if we have this paper at all, otherwise return empty list
if not rawpid in db:
return []
# check if we have distances to this specific version of paper id (includes version)
if pid in sim_dict:
# good, simplest case: lets return the papers
return [db[strip_version(k)] for k in sim_dict[pid]]
else:
# ok we don't have this specific version. could be a stale URL that points to,
# e.g. v1 of a paper, but due to an updated version of it we only have v2 on file
# now. We want to use v2 in that case.
# lets try to retrieve the most recent version of this paper we do have
kok = [k for k in sim_dict if rawpid in k]
if kok:
# ok we have at least one different version of this paper, lets use it instead
id_use_instead = kok[0]
return [db[strip_version(k)] for k in sim_dict[id_use_instead]]
else:
# return just the paper. we dont have similarities for it for some reason
return [db[rawpid]]
def papers_from_library():
out = []
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = [strip_version(x['paper_id']) for x in user_library]
out = [db[x] for x in libids]
out = sorted(out, key=lambda k: k['updated'], reverse=True)
return out
def papers_from_svm(recent_days=None):
out = []
if g.user:
uid = session['user_id']
if not uid in user_sim:
return []
# we want to exclude papers that are already in user library from the result, so fetch them.
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
plist = user_sim[uid]
out = [db[x] for x in plist if not x in libids]
if recent_days is not None:
# filter as well to only most recent papers
curtime = int(time.time()) # in seconds
out = [x for x in out if curtime - x['time_published'] < recent_days*24*60*60]
return out
def papers_filter_version(papers, v):
if v != '1':
return papers # noop
intv = int(v)
filtered = [p for p in papers if p['_version'] == intv]
return filtered
def encode_json(ps, n=10, send_images=True, send_abstracts=True):
libids = set()
if g.user:
# user is logged in, lets fetch their saved library data
uid = session['user_id']
user_library = query_db('''select * from library where user_id = ?''', [uid])
libids = {strip_version(x['paper_id']) for x in user_library}
ret = []
for i in range(min(len(ps),n)):
p = ps[i]
idvv = '%sv%d' % (p['_rawid'], p['_version'])
struct = {}
struct['title'] = p['title']
struct['pid'] = idvv
struct['category'] = p['arxiv_primary_category']['term']
struct['authors'] = [a['name'] for a in p['authors']]
struct['link'] = p['link']
struct['in_library'] = 1 if p['_rawid'] in libids else 0
if send_abstracts:
struct['abstract'] = p['summary']
if send_images:
struct['img'] = '/static/thumbs/' + idvv + '.pdf.jpg'
struct['tags'] = [t['term'] for t in p['tags']]
timestruct = dateutil.parser.parse(p['updated'])
struct['published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
timestruct = dateutil.parser.parse(p['published'])
struct['originally_published_time'] = '%s/%s/%s' % (timestruct.month, timestruct.day, timestruct.year)
cc = p.get('arxiv_comment', '')
if len(cc) > 100:
cc = cc[:100] + '...' # crop very long comments
struct['comment'] = cc
ret.append(struct)
return ret
# -----------------------------------------------------------------------------
# flask request handling
# -----------------------------------------------------------------------------
def default_context(papers, **kws):
top_papers = encode_json(papers, args.num_results)
ans = dict(papers=top_papers, numresults=len(papers), totpapers=len(db), msg='')
ans.update(kws)
return ans
@app.route("/")
def intmain():
vstr = request.args.get('vfilter', 'all')
papers = DATE_SORTED_PAPERS # precomputed
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recent',
msg='Showing most recent Arxiv papers:')
return render_template('main.html', **ctx)
@app.route("/<request_pid>")
def rank(request_pid=None):
if not isvalidid(request_pid):
return '' # these are requests for icons, things like robots.txt, etc
papers = papers_similar(request_pid)
ctx = default_context(papers, render_format='paper')
return render_template('main.html', **ctx)
@app.route("/search", methods=['GET'])
def search():
q = request.args.get('q', '') # get the search request
papers = papers_search(q) # perform the query and get sorted documents
ctx = default_context(papers, render_format="search")
return render_template('main.html', **ctx)
@app.route('/recommend', methods=['GET'])
def recommend():
""" return user's svm sorted list """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365}
tt = legend.get(ttstr, None)
papers = papers_from_svm(recent_days=tt)
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='recommend',
msg='Recommended papers: (based on SVM trained on tfidf of papers in your library, refreshed every day or so)' if g.user else 'You must be logged in and have some papers saved in your library.')
return render_template('main.html', **ctx)
@app.route('/top', methods=['GET'])
def top():
""" return top papers """
ttstr = request.args.get('timefilter', 'week') # default is week
vstr = request.args.get('vfilter', 'all') # default is all (no filter)
legend = {'day':1, '3days':3, 'week':7, 'month':30, 'year':365, 'alltime':10000}
tt = legend.get(ttstr, 7)
curtime = int(time.time()) # in seconds
papers = [p for p in TOP_SORTED_PAPERS if curtime - p['time_published'] < tt*24*60*60]
papers = papers_filter_version(papers, vstr)
ctx = default_context(papers, render_format='top',
msg='Top papers based on people\'s libraries:')
return render_template('main.html', **ctx)
@app.route('/toptwtr', methods=['GET'])
def toptwtr():
""" return top papers """
papers = TWITTER_TOP
ctx = default_context(papers, render_format='toptwtr',
msg='Top papers mentioned on Twitter over last 5 days:')
return render_template('main.html', **ctx)
@app.route('/library')
def library():
""" render user's library """
papers = papers_from_library()
ret = encode_json(papers, 500) # cap at 500 papers in someone's library. that's a lot!
if g.user:
msg = '%d papers in your library:' % (len(ret), )
else:
msg = 'You must be logged in. Once you are, you can save papers to your library (with the save icon on the right of each paper) and they will show up here.'
ctx = default_context(papers, render_format='library', msg=msg)
return render_template('main.html', **ctx)
@app.route('/libtoggle', methods=['POST'])
def review():
""" user wants to toggle a paper in his library """
# make sure user is logged in
if not g.user:
return 'NO' # fail... (not logged in). JS should prevent from us getting here.
idvv = request.form['pid'] # includes version
if not isvalidid(idvv):
return 'NO' # fail, malformed id. weird.
pid = strip_version(idvv)
if not pid in db:
return 'NO' # we don't know this paper. wat
uid = session['user_id'] # id of logged in user
# check this user already has this paper in library
record = query_db('''select * from library where
user_id = ? and paper_id = ?''', [uid, pid], one=True)
print(record)
ret = 'NO'
if record:
# record exists, erase it.
g.db.execute('''delete from library where user_id = ? and paper_id = ?''', [uid, pid])
g.db.commit()
#print('removed %s for %s' % (pid, uid))
ret = 'OFF'
else:
# record does not exist, add it.
rawpid = strip_version(pid)
g.db.execute('''insert into library (paper_id, user_id, update_time) values (?, ?, ?)''',
[rawpid, uid, int(time.time())])
g.db.commit()
#print('added %s for %s' % (pid, uid))
ret = 'ON'
return ret
@app.route('/login', methods=['POST'])
def login():
""" logs in the user. if the username doesn't exist creates the account """
if not request.form['username']:
flash('You have to enter a username')
elif not request.form['password']:
flash('You have to enter a password')
elif get_user_id(request.form['username']) is not None:
# username already exists, fetch all of its attributes
user = query_db('''select * from user where
username = ?''', [request.form['username']], one=True)
if check_password_hash(user['pw_hash'], request.form['password']):
# password is correct, log in the user
session['user_id'] = get_user_id(request.form['username'])
flash('User ' + request.form['username'] + ' logged in.')
else:
# incorrect password
flash('User ' + request.form['username'] + ' already exists, wrong password.')
else:
# create account and log in
creation_time = int(time.time())
g.db.execute('''insert into user (username, pw_hash, creation_time) values (?, ?, ?)''',
[request.form['username'],
generate_password_hash(request.form['password']),
creation_time])
user_id = g.db.execute('select last_insert_rowid()').fetchall()[0][0]
g.db.commit()
session['user_id'] = user_id
flash('New account %s created' % (request.form['username'], ))
return redirect(url_for('intmain'))
@app.route('/logout')
def logout():
session.pop('user_id', None)
flash('You were logged out')
return redirect(url_for('intmain'))
# -----------------------------------------------------------------------------
# int main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--prod', dest='prod', action='store_true', help='run in prod?')
parser.add_argument('-r', '--num_results', dest='num_results', type=int, default=200, help='number of results to return per query')
parser.add_argument('--port', dest='port', type=int, default=5000, help='port to serve on')
args = parser.parse_args()
print(args)
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
print('loading tfidf_meta', Config.meta_path)
meta = pickle.load(open(Config.meta_path, "rb"))
vocab = meta['vocab']
idf = meta['idf']
print('loading paper similarities', Config.sim_path)
sim_dict = pickle.load(open(Config.sim_path, "rb"))
print('loading user recommendations', Config.user_sim_path)
if os.path.isfile(Config.user_sim_path):
user_sim = pickle.load(open(Config.user_sim_path, 'rb'))
else:
user_sim = {}
print('loading twitter top', Config.tweet_path)
if os.path.isfile(Config.tweet_path):
TWITTER_TOP = pickle.load(open(Config.tweet_path, 'rb'))
TWITTER_TOP = [db[pid] for count,pid in TWITTER_TOP]
else:
TWITTER_TOP = []
print('precomputing papers date sorted...')
DATE_SORTED_PAPERS = date_sort()
if not os.path.isfile(Config.database_path):
print('did not find as.db, trying to create an empty database from schema.sql...')
print('this needs sqlite3 to be installed!')
os.system('sqlite3 as.db < schema.sql')
# compute top papers in peoples' libraries
print('computing top papers...')
def get_popular():
sqldb = sqlite3.connect(Config.database_path)
sqldb.row_factory = sqlite3.Row # to return dicts rather than tuples
libs = sqldb.execute('''select * from library''').fetchall()
counts = {}
for lib in libs:
pid = lib['paper_id']
counts[pid] = counts.get(pid, 0) + 1
return counts
top_counts = get_popular()
top_paper_counts = sorted([(v,k) for k,v in top_counts.items() if v > 0], reverse=True)
print(top_paper_counts[:min(30, len(top_paper_counts))])
TOP_SORTED_PAPERS = [db[q[1]] for q in top_paper_counts]
# compute min and max time for all papers
tts = [time.mktime(dateutil.parser.parse(p['updated']).timetuple()) for pid,p in db.items()]
ttmin = min(tts)*1.0
ttmax = max(tts)*1.0
for pid,p in db.items():
tt = time.mktime(dateutil.parser.parse(p['updated']).timetuple())
p['tscore'] = (tt-ttmin)/(ttmax-ttmin)
# some utilities for creating a search index for faster search
punc = "'!\"#$%&\'()*+,./:;<=>?@[\\]^_`{|}~'" # removed hyphen from string.punctuation
trans_table = {ord(c): None for c in punc}
def makedict(s, forceidf=None, scale=1.0):
words = set(s.lower().translate(trans_table).strip().split())
out = {}
for w in words: # todo: if we're using bigrams in vocab then this won't search over them
if forceidf is None:
if w in vocab:
# we have idf for this
idfval = idf[vocab[w]]*scale
else:
idfval = 1.0*scale # assume idf 1.0 (low)
else:
idfval = forceidf
out[w] = idfval
return out
def merge_dicts(dlist):
out = {}
for d in dlist:
for k,v in d.items():
out[k] = out.get(k,0) + v
return out
# caching: check if db.p is younger than search_dict.p
recompute_index = True
if os.path.isfile(Config.search_dict_path):
db_modified_time = os.path.getmtime(Config.db_path)
search_modified_time = os.path.getmtime(Config.search_dict_path)
if search_modified_time > db_modified_time:
# search index exists and is more recent, no need
recompute_index = False
if recompute_index:
print('building an index for faster search...')
for pid in db:
p = db[pid]
dict_title = makedict(p['title'], forceidf=5, scale=3)
dict_authors = makedict(' '.join(x['name'] for x in p['authors']), forceidf=5)
dict_categories = {x['term'].lower():5 for x in p['tags']}
if 'and' in dict_authors:
# special case for "and" handling in authors list
del dict_authors['and']
dict_summary = makedict(p['summary'])
SEARCH_DICT[pid] = merge_dicts([dict_title, dict_authors, dict_categories, dict_summary])
# and cache it in file
print('writing ', Config.search_dict_path, ' as cache...')
safe_pickle_dump(SEARCH_DICT, Config.search_dict_path)
else:
print('loading cached index for faster search from', Config.search_dict_path)
SEARCH_DICT = pickle.load(open(Config.search_dict_path, 'rb'))
# start
if args.prod:
# run on Tornado instead, since running raw Flask in prod is not recommended
print('starting tornado!')
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
from tornado.log import enable_pretty_logging
enable_pretty_logging()
http_server = HTTPServer(WSGIContainer(app))
http_server.listen(args.port)
IOLoop.instance().start()
else:
print('starting flask!')
app.debug = True
app.run(port=args.port)
|
arxiv-sanity-preserver-master
|
serve.py
|
"""
Queries arxiv API and downloads papers (the query is a parameter).
The script is intended to enrich an existing database pickle (by default db.p),
so this file will be loaded first, and then new results will be added to it.
"""
import os
import time
import pickle
import random
import argparse
import urllib.request
import feedparser
from utils import Config, safe_pickle_dump
def encode_feedparser_dict(d):
"""
helper function to get rid of feedparser bs with a deep copy.
I hate when libs wrap simple things in their own classes.
"""
if isinstance(d, feedparser.FeedParserDict) or isinstance(d, dict):
j = {}
for k in d.keys():
j[k] = encode_feedparser_dict(d[k])
return j
elif isinstance(d, list):
l = []
for k in d:
l.append(encode_feedparser_dict(k))
return l
else:
return d
def parse_arxiv_url(url):
"""
examples is http://arxiv.org/abs/1512.08756v2
we want to extract the raw id and the version
"""
ix = url.rfind('/')
idversion = j['id'][ix+1:] # extract just the id (and the version)
parts = idversion.split('v')
assert len(parts) == 2, 'error parsing url ' + url
return parts[0], int(parts[1])
if __name__ == "__main__":
# parse input arguments
parser = argparse.ArgumentParser()
parser.add_argument('--search-query', type=str,
default='cat:cs.CV+OR+cat:cs.AI+OR+cat:cs.LG+OR+cat:cs.CL+OR+cat:cs.NE+OR+cat:stat.ML',
help='query used for arxiv API. See http://arxiv.org/help/api/user-manual#detailed_examples')
parser.add_argument('--start-index', type=int, default=0, help='0 = most recent API result')
parser.add_argument('--max-index', type=int, default=10000, help='upper bound on paper index we will fetch')
parser.add_argument('--results-per-iteration', type=int, default=100, help='passed to arxiv API')
parser.add_argument('--wait-time', type=float, default=5.0, help='lets be gentle to arxiv API (in number of seconds)')
parser.add_argument('--break-on-no-added', type=int, default=1, help='break out early if all returned query papers are already in db? 1=yes, 0=no')
args = parser.parse_args()
# misc hardcoded variables
base_url = 'http://export.arxiv.org/api/query?' # base api query url
print('Searching arXiv for %s' % (args.search_query, ))
# lets load the existing database to memory
try:
db = pickle.load(open(Config.db_path, 'rb'))
except Exception as e:
print('error loading existing database:')
print(e)
print('starting from an empty database')
db = {}
# -----------------------------------------------------------------------------
# main loop where we fetch the new results
print('database has %d entries at start' % (len(db), ))
num_added_total = 0
for i in range(args.start_index, args.max_index, args.results_per_iteration):
print("Results %i - %i" % (i,i+args.results_per_iteration))
query = 'search_query=%s&sortBy=lastUpdatedDate&start=%i&max_results=%i' % (args.search_query,
i, args.results_per_iteration)
with urllib.request.urlopen(base_url+query) as url:
response = url.read()
parse = feedparser.parse(response)
num_added = 0
num_skipped = 0
for e in parse.entries:
j = encode_feedparser_dict(e)
# extract just the raw arxiv id and version for this paper
rawid, version = parse_arxiv_url(j['id'])
j['_rawid'] = rawid
j['_version'] = version
# add to our database if we didn't have it before, or if this is a new version
if not rawid in db or j['_version'] > db[rawid]['_version']:
db[rawid] = j
print('Updated %s added %s' % (j['updated'].encode('utf-8'), j['title'].encode('utf-8')))
num_added += 1
num_added_total += 1
else:
num_skipped += 1
# print some information
print('Added %d papers, already had %d.' % (num_added, num_skipped))
if len(parse.entries) == 0:
print('Received no results from arxiv. Rate limiting? Exiting. Restart later maybe.')
print(response)
break
if num_added == 0 and args.break_on_no_added == 1:
print('No new papers were added. Assuming no new papers exist. Exiting.')
break
print('Sleeping for %i seconds' % (args.wait_time , ))
time.sleep(args.wait_time + random.uniform(0, 3))
# save the database before we quit, if we found anything new
if num_added_total > 0:
print('Saving database with %d papers to %s' % (len(db), Config.db_path))
safe_pickle_dump(db, Config.db_path)
|
arxiv-sanity-preserver-master
|
fetch_papers.py
|
"""
Use imagemagick to convert all pfds to a sequence of thumbnail images
requires: sudo apt-get install imagemagick
"""
import os
import time
import shutil
from subprocess import Popen
from utils import Config
# make sure imagemagick is installed
if not shutil.which('convert'): # shutil.which needs Python 3.3+
print("ERROR: you don\'t have imagemagick installed. Install it first before calling this script")
sys.exit()
# create if necessary the directories we're using for processing and output
pdf_dir = os.path.join('data', 'pdf')
if not os.path.exists(Config.thumbs_dir): os.makedirs(Config.thumbs_dir)
if not os.path.exists(Config.tmp_dir): os.makedirs(Config.tmp_dir)
# fetch all pdf filenames in the pdf directory
files_in_pdf_dir = os.listdir(pdf_dir)
pdf_files = [x for x in files_in_pdf_dir if x.endswith('.pdf')] # filter to just pdfs, just in case
# iterate over all pdf files and create the thumbnails
for i,p in enumerate(pdf_files):
pdf_path = os.path.join(pdf_dir, p)
thumb_path = os.path.join(Config.thumbs_dir, p + '.jpg')
if os.path.isfile(thumb_path):
print("skipping %s, thumbnail already exists." % (pdf_path, ))
continue
print("%d/%d processing %s" % (i, len(pdf_files), p))
# take first 8 pages of the pdf ([0-7]), since 9th page are references
# tile them horizontally, use JPEG compression 80, trim the borders for each image
#cmd = "montage %s[0-7] -mode Concatenate -tile x1 -quality 80 -resize x230 -trim %s" % (pdf_path, "thumbs/" + f + ".jpg")
#print "EXEC: " + cmd
# nvm, below using a roundabout alternative that is worse and requires temporary files, yuck!
# but i found that it succeeds more often. I can't remember wha thappened anymore but I remember
# that the version above, while more elegant, had some problem with it on some pdfs. I think.
# erase previous intermediate files thumb-*.png in the tmp directory
if os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
for i in range(8):
f = os.path.join(Config.tmp_dir, 'thumb-%d.png' % (i,))
f2= os.path.join(Config.tmp_dir, 'thumbbuf-%d.png' % (i,))
if os.path.isfile(f):
cmd = 'mv %s %s' % (f, f2)
os.system(cmd)
# okay originally I was going to issue an rm call, but I am too terrified of
# running scripted rm queries, so what we will do is instead issue a "mv" call
# to rename the files. That's a bit safer, right? We have to do this because if
# some papers are shorter than 8 pages, then results from previous paper will
# "leek" over to this result, through the intermediate files.
# spawn async. convert can unfortunately enter an infinite loop, have to handle this.
# this command will generate 8 independent images thumb-0.png ... thumb-7.png of the thumbnails
pp = Popen(['convert', '%s[0-7]' % (pdf_path, ), '-thumbnail', 'x156', os.path.join(Config.tmp_dir, 'thumb.png')])
t0 = time.time()
while time.time() - t0 < 20: # give it 15 seconds deadline
ret = pp.poll()
if not (ret is None):
# process terminated
break
time.sleep(0.1)
ret = pp.poll()
if ret is None:
print("convert command did not terminate in 20 seconds, terminating.")
pp.terminate() # give up
if not os.path.isfile(os.path.join(Config.tmp_dir, 'thumb-0.png')):
# failed to render pdf, replace with missing image
missing_thumb_path = os.path.join('static', 'missing.jpg')
os.system('cp %s %s' % (missing_thumb_path, thumb_path))
print("could not render pdf, creating a missing image placeholder")
else:
cmd = "montage -mode concatenate -quality 80 -tile x1 %s %s" % (os.path.join(Config.tmp_dir, 'thumb-*.png'), thumb_path)
print(cmd)
os.system(cmd)
time.sleep(0.01) # silly way for allowing for ctrl+c termination
|
arxiv-sanity-preserver-master
|
thumb_pdf.py
|
from contextlib import contextmanager
import os
import re
import pickle
import tempfile
# global settings
# -----------------------------------------------------------------------------
class Config(object):
# main paper information repo file
db_path = 'db.p'
# intermediate processing folders
pdf_dir = os.path.join('data', 'pdf')
txt_dir = os.path.join('data', 'txt')
thumbs_dir = os.path.join('static', 'thumbs')
# intermediate pickles
tfidf_path = 'tfidf.p'
meta_path = 'tfidf_meta.p'
sim_path = 'sim_dict.p'
user_sim_path = 'user_sim.p'
tweet_path = 'twitter.p' # written by twitter_daemon.py
# sql database file
database_path = 'as.db'
search_dict_path = 'search_dict.p'
tmp_dir = 'tmp'
# Context managers for atomic writes courtesy of
# http://stackoverflow.com/questions/2333872/atomic-writing-to-file-with-python
@contextmanager
def _tempfile(*args, **kws):
""" Context for temporary file.
Will find a free temporary filename upon entering
and will try to delete the file on leaving
Parameters
----------
suffix : string
optional file suffix
"""
fd, name = tempfile.mkstemp(*args, **kws)
os.close(fd)
try:
yield name
finally:
try:
os.remove(name)
except OSError as e:
if e.errno == 2:
pass
else:
raise e
@contextmanager
def open_atomic(filepath, *args, **kwargs):
""" Open temporary file object that atomically moves to destination upon
exiting.
Allows reading and writing to and from the same filename.
Parameters
----------
filepath : string
the file path to be opened
fsync : bool
whether to force write the file to disk
kwargs : mixed
Any valid keyword arguments for :code:`open`
"""
fsync = kwargs.pop('fsync', False)
with _tempfile(dir=os.path.dirname(filepath)) as tmppath:
with open(tmppath, *args, **kwargs) as f:
yield f
if fsync:
f.flush()
os.fsync(file.fileno())
os.rename(tmppath, filepath)
def safe_pickle_dump(obj, fname):
with open_atomic(fname, 'wb') as f:
pickle.dump(obj, f, -1)
# arxiv utils
# -----------------------------------------------------------------------------
def strip_version(idstr):
""" identity function if arxiv id has no version, otherwise strips it. """
parts = idstr.split('v')
return parts[0]
# "1511.08198v1" is an example of a valid arxiv id that we accept
def isvalidid(pid):
return re.match('^\d+\.\d+(v\d+)?$', pid)
|
arxiv-sanity-preserver-master
|
utils.py
|
import re
import pytz
import time
import pickle
import datetime
from dateutil import parser
import twitter # pip install python-twitter
from utils import Config, safe_pickle_dump
sleep_time = 60*10 # in seconds
max_days_keep = 5 # max number of days to keep a tweet in memory
def get_db_pids():
print('loading the paper database', Config.db_path)
db = pickle.load(open(Config.db_path, 'rb'))
# I know this looks weird, but I don't trust dict_keys to be efficient with "in" operator.
# I also don't trust it to keep some reference to the whole dict, as I'm hoping db here deallocates.
# Can't find good docs here
pid_dict = {p:1 for p in db}
return pid_dict
def get_keys():
lines = open('twitter.txt', 'r').read().splitlines()
return lines
# authenticate
keys = get_keys()
api = twitter.Api(consumer_key=keys[0],
consumer_secret=keys[1],
access_token_key=keys[2],
access_token_secret=keys[3])
print(api.VerifyCredentials())
def extract_arxiv_pids(r):
pids = []
for u in r.urls:
m = re.search('arxiv.org/abs/(.+)', u.expanded_url)
if m:
rawid = m.group(1)
pids.append(rawid)
return pids
db_pids = get_db_pids()
seen = {}
epochd = datetime.datetime(1970,1,1,tzinfo=pytz.utc) # time of epoch
while True:
try:
results = api.GetSearch(raw_query="q=arxiv.org&result_type=recent&count=100")
ok = True
except Exception as e:
print('there was some problem:')
print(e)
time.sleep(sleep_time)
continue
tnow = time.time()
num_processed = 0
parsed = []
for r in results:
arxiv_pids = extract_arxiv_pids(r)
arxiv_pids = [p for p in arxiv_pids if p in db_pids] # filter to those that are in our paper db
if not arxiv_pids: continue # nothing relevant here, lets move on
if r.id in seen: continue # skip, already saw and recorded
seen[r.id] = {'seen':tnow} # mark as seen at this time
num_processed += 1
# collect all arxiv paper ids from valid urls
seen[r.id]['pids'] = arxiv_pids
# parse & records time of this tweet
d = parser.parse(r.created_at)
time_posted = (d - epochd).total_seconds()
seen[r.id]['time_posted'] = time_posted
print('processed %d/%d new tweets. Currently maintaining total %d' % (num_processed, len(results), len(seen)))
# maintain state: if something was seen > few days ago, forget it
maxdt = 60*60*24*max_days_keep
seen_new = { tweetid:d for tweetid,d in seen.items() if tnow - d['time_posted'] < maxdt }
print('previous seen dict had %d tweets, pruning to %d' % (len(seen), len(seen_new)))
seen = seen_new # swap
# compile all votes and write output for serving
votes = {}
for tweetid,d in seen.items():
for pid in d['pids']:
votes[pid] = votes.get(pid, 0) + 1
votes = [(v,k) for k,v in votes.items()]
votes.sort(reverse=True, key=lambda x: x[0]) # descending
print('top votes', votes[:min(len(votes), 10)])
print('writing', Config.tweet_path)
safe_pickle_dump(votes, Config.tweet_path)
# and sleep for a while
print('sleeping', sleep_time)
time.sleep(sleep_time)
|
arxiv-sanity-preserver-master
|
twitter_daemon.py
|
import os
import time
import pickle
import shutil
import random
from urllib.request import urlopen
from utils import Config
timeout_secs = 10 # after this many seconds we give up on a paper
if not os.path.exists(Config.pdf_dir): os.makedirs(Config.pdf_dir)
have = set(os.listdir(Config.pdf_dir)) # get list of all pdfs we already have
numok = 0
numtot = 0
db = pickle.load(open(Config.db_path, 'rb'))
for pid,j in db.items():
pdfs = [x['href'] for x in j['links'] if x['type'] == 'application/pdf']
assert len(pdfs) == 1
pdf_url = pdfs[0] + '.pdf'
basename = pdf_url.split('/')[-1]
fname = os.path.join(Config.pdf_dir, basename)
# try retrieve the pdf
numtot += 1
try:
if not basename in have:
print('fetching %s into %s' % (pdf_url, fname))
req = urlopen(pdf_url, None, timeout_secs)
with open(fname, 'wb') as fp:
shutil.copyfileobj(req, fp)
time.sleep(0.05 + random.uniform(0,0.1))
else:
print('%s exists, skipping' % (fname, ))
numok+=1
except Exception as e:
print('error downloading: ', pdf_url)
print(e)
print('%d/%d of %d downloaded ok.' % (numok, numtot, len(db)))
print('final number of papers downloaded okay: %d/%d' % (numok, len(db)))
|
arxiv-sanity-preserver-master
|
download_pdfs.py
|
"""
Reads txt files of all papers and computes tfidf vectors for all papers.
Dumps results to file tfidf.p
"""
import os
import pickle
from random import shuffle, seed
import numpy as np
from sklearn.feature_extraction.text import TfidfVectorizer
from utils import Config, safe_pickle_dump
seed(1337)
max_train = 10000 # max number of tfidf training documents (chosen randomly), for memory efficiency
# read database
db = pickle.load(open(Config.db_path, 'rb'))
# read all text files for all papers into memory
txt_paths, pids = [], []
n = 0
for pid,j in db.items():
n += 1
idvv = '%sv%d' % (j['_rawid'], j['_version'])
txt_path = os.path.join('data', 'txt', idvv) + '.pdf.txt'
if os.path.isfile(txt_path): # some pdfs dont translate to txt
with open(txt_path, 'r') as f:
txt = f.read()
if len(txt) > 1000 and len(txt) < 500000: # 500K is VERY conservative upper bound
txt_paths.append(txt_path) # todo later: maybe filter or something some of them
pids.append(idvv)
print("read %d/%d (%s) with %d chars" % (n, len(db), idvv, len(txt)))
else:
print("skipped %d/%d (%s) with %d chars: suspicious!" % (n, len(db), idvv, len(txt)))
else:
print("could not find %s in txt folder." % (txt_path, ))
print("in total read in %d text files out of %d db entries." % (len(txt_paths), len(db)))
# compute tfidf vectors with scikits
v = TfidfVectorizer(input='content',
encoding='utf-8', decode_error='replace', strip_accents='unicode',
lowercase=True, analyzer='word', stop_words='english',
token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b',
ngram_range=(1, 2), max_features = 10000,
norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True,
max_df=1.0, min_df=1)
# create an iterator object to conserve memory
def make_corpus(paths):
for p in paths:
with open(p, 'r') as f:
txt = f.read()
yield txt
# train
train_txt_paths = list(txt_paths) # duplicate
shuffle(train_txt_paths) # shuffle
train_txt_paths = train_txt_paths[:min(len(train_txt_paths), max_train)] # crop
print("training on %d documents..." % (len(train_txt_paths), ))
train_corpus = make_corpus(train_txt_paths)
v.fit(train_corpus)
# transform
print("transforming %d documents..." % (len(txt_paths), ))
corpus = make_corpus(txt_paths)
X = v.transform(corpus)
print(v.vocabulary_)
print(X.shape)
# write full matrix out
out = {}
out['X'] = X # this one is heavy!
print("writing", Config.tfidf_path)
safe_pickle_dump(out, Config.tfidf_path)
# writing lighter metadata information into a separate (smaller) file
out = {}
out['vocab'] = v.vocabulary_
out['idf'] = v._tfidf.idf_
out['pids'] = pids # a full idvv string (id and version number)
out['ptoi'] = { x:i for i,x in enumerate(pids) } # pid to ix in X mapping
print("writing", Config.meta_path)
safe_pickle_dump(out, Config.meta_path)
print("precomputing nearest neighbor queries in batches...")
X = X.todense() # originally it's a sparse matrix
sim_dict = {}
batch_size = 200
for i in range(0,len(pids),batch_size):
i1 = min(len(pids), i+batch_size)
xquery = X[i:i1] # BxD
ds = -np.asarray(np.dot(X, xquery.T)) #NxD * DxB => NxB
IX = np.argsort(ds, axis=0) # NxB
for j in range(i1-i):
sim_dict[pids[i+j]] = [pids[q] for q in list(IX[:50,j])]
print('%d/%d...' % (i, len(pids)))
print("writing", Config.sim_path)
safe_pickle_dump(sim_dict, Config.sim_path)
|
arxiv-sanity-preserver-master
|
analyze.py
|
from setuptools import setup, find_packages
setup(
name = 'deep-linear-network',
packages = find_packages(),
version = '0.0.1',
license='MIT',
description = 'Deep Linear Network - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/deep-linear-network',
keywords = [
'artificial intelligence',
'attention mechanism',
],
install_requires=[
'torch',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
deep-linear-network-main
|
setup.py
|
from deep_linear_network.deep_linear_network import DeepLinear
|
deep-linear-network-main
|
deep_linear_network/__init__.py
|
import torch
from torch import nn
from functools import reduce
def mm(x, y):
return x @ y
class DeepLinear(nn.Module):
def __init__(self, dim_in, *dims):
super().__init__()
dims = [dim_in, *dims]
pairs = list(zip(dims[:-1], dims[1:]))
weights = list(map(lambda d: nn.Parameter(torch.randn(d)), pairs))
self.weights = nn.ParameterList(weights)
self._cache = None
def forward(self, x):
if self.training:
self._cache = None
return reduce(mm, self.weights, x)
if self._cache is not None:
return x @ self._cache
head, *tail = self.weights
weight = reduce(mm, tail, head)
self._cache = weight
return x @ weight
|
deep-linear-network-main
|
deep_linear_network/deep_linear_network.py
|
from setuptools import setup, find_packages
setup(
name = 'molecule-attention-transformer',
packages = find_packages(),
version = '0.0.4',
license='MIT',
description = 'Molecule Attention Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/molecule-attention-transformer',
keywords = [
'artificial intelligence',
'attention mechanism',
'molecules'
],
install_requires=[
'torch>=1.6',
'einops>=0.3'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
molecule-attention-transformer-main
|
setup.py
|
from molecule_attention_transformer.molecule_attention_transformer import MAT
|
molecule-attention-transformer-main
|
molecule_attention_transformer/__init__.py
|
import torch
import torch.nn.functional as F
from functools import partial
from torch import nn, einsum
from einops import rearrange
# constants
DIST_KERNELS = {
'exp': {
'fn': lambda t: torch.exp(-t),
'mask_value_fn': lambda t: torch.finfo(t.dtype).max
},
'softmax': {
'fn': lambda t: torch.softmax(t, dim = -1),
'mask_value_fn': lambda t: -torch.finfo(t.dtype).max
}
}
# helpers
def exists(val):
return val is not None
def default(val, d):
return d if not exists(val) else val
# helper classes
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return x + self.fn(x, **kwargs)
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, dim_out = None, mult = 4):
super().__init__()
dim_out = default(dim_out, dim)
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim_out)
)
def forward(self, x):
return self.net(x)
class Attention(nn.Module):
def __init__(self, dim, heads = 8, dim_head = 64, Lg = 0.5, Ld = 0.5, La = 1, dist_kernel_fn = 'exp'):
super().__init__()
inner_dim = dim_head * heads
self.heads= heads
self.scale = dim_head ** -0.5
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
# hyperparameters controlling the weighted linear combination from
# self-attention (La)
# adjacency graph (Lg)
# pair-wise distance matrix (Ld)
self.La = La
self.Ld = Ld
self.Lg = Lg
self.dist_kernel_fn = dist_kernel_fn
def forward(self, x, mask = None, adjacency_mat = None, distance_mat = None):
h, La, Ld, Lg, dist_kernel_fn = self.heads, self.La, self.Ld, self.Lg, self.dist_kernel_fn
qkv = self.to_qkv(x)
q, k, v = rearrange(qkv, 'b n (h qkv d) -> b h n qkv d', h = h, qkv = 3).unbind(dim = -2)
dots = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
assert dist_kernel_fn in DIST_KERNELS, f'distance kernel function needs to be one of {DISTANCE_KERNELS.keys()}'
dist_kernel_config = DIST_KERNELS[dist_kernel_fn]
if exists(distance_mat):
distance_mat = rearrange(distance_mat, 'b i j -> b () i j')
if exists(adjacency_mat):
adjacency_mat = rearrange(adjacency_mat, 'b i j -> b () i j')
if exists(mask):
mask_value = torch.finfo(dots.dtype).max
mask = mask[:, None, :, None] * mask[:, None, None, :]
# mask attention
dots.masked_fill_(~mask, -mask_value)
if exists(distance_mat):
# mask distance to infinity
# todo - make sure for softmax distance kernel, use -infinity
dist_mask_value = dist_kernel_config['mask_value_fn'](dots)
distance_mat.masked_fill_(~mask, dist_mask_value)
if exists(adjacency_mat):
adjacency_mat.masked_fill_(~mask, 0.)
attn = dots.softmax(dim = -1)
# sum contributions from adjacency and distance tensors
attn = attn * La
if exists(adjacency_mat):
attn = attn + Lg * adjacency_mat
if exists(distance_mat):
distance_mat = dist_kernel_config['fn'](distance_mat)
attn = attn + Ld * distance_mat
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# main class
class MAT(nn.Module):
def __init__(
self,
*,
dim_in,
model_dim,
dim_out,
depth,
heads = 8,
Lg = 0.5,
Ld = 0.5,
La = 1,
dist_kernel_fn = 'exp'
):
super().__init__()
self.embed_to_model = nn.Linear(dim_in, model_dim)
self.layers = nn.ModuleList([])
for _ in range(depth):
layer = nn.ModuleList([
Residual(PreNorm(model_dim, Attention(model_dim, heads = heads, Lg = Lg, Ld = Ld, La = La, dist_kernel_fn = dist_kernel_fn))),
Residual(PreNorm(model_dim, FeedForward(model_dim)))
])
self.layers.append(layer)
self.norm_out = nn.LayerNorm(model_dim)
self.ff_out = FeedForward(model_dim, dim_out)
def forward(
self,
x,
mask = None,
adjacency_mat = None,
distance_mat = None
):
x = self.embed_to_model(x)
for (attn, ff) in self.layers:
x = attn(
x,
mask = mask,
adjacency_mat = adjacency_mat,
distance_mat = distance_mat
)
x = ff(x)
x = self.norm_out(x)
x = x.mean(dim = -2)
x = self.ff_out(x)
return x
|
molecule-attention-transformer-main
|
molecule_attention_transformer/molecule_attention_transformer.py
|
from setuptools import setup, find_packages
setup(
name = 'perfusion-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.23',
license='MIT',
description = 'Perfusion - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/perfusion-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'memory editing',
'text-to-image'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'open-clip-torch',
'opt-einsum',
'torch>=2.0'
],
include_package_data = True,
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
perfusion-pytorch-main
|
setup.py
|
from math import ceil
from copy import deepcopy
from pathlib import Path
from beartype import beartype
from beartype.typing import Union, List, Optional, Tuple
import torch
from torch import nn, einsum, Tensor
from torch.nn import Module
import torch.nn.functional as F
from einops import rearrange, reduce
from opt_einsum import contract as opt_einsum
from perfusion_pytorch.open_clip import OpenClipAdapter
# precomputed covariance paths
# will add for more models going forward, if the paper checks out
CURRENT_DIR = Path(__file__).parents[0]
DATA_DIR = CURRENT_DIR / 'data'
assert DATA_DIR.is_dir()
COVARIANCE_FILENAME_BY_TEXT_IMAGE_MODEL = dict(
SD15 = DATA_DIR / 'covariance_CLIP_ViT-L-14.pt'
)
assert all([filepath.exists() for filepath in COVARIANCE_FILENAME_BY_TEXT_IMAGE_MODEL.values()])
# helpers
def exists(val):
return val is not None
def is_all_unique(arr):
return len(set(arr)) == len(arr)
# function for calculating C - input covariance
@beartype
@torch.no_grad()
def calculate_input_covariance(
clip: OpenClipAdapter,
texts: List[str],
batch_size = 32,
**cov_kwargs
):
num_batches = ceil(len(texts) / batch_size)
all_embeds = []
length = len(texts)
for batch_ind in range(num_batches):
start_index = batch_ind * batch_size
batch_texts = texts[start_index:(start_index + batch_size)]
embeds, mask = clip.embed_texts(batch_texts)
all_embeds.append(embeds[mask])
all_embeds = torch.cat(all_embeds, dim = 0)
return einsum('n d, n e -> d e', all_embeds, all_embeds) / length
# loss weighted by the mask
@beartype
def loss_fn_weighted_by_mask(
pred: Tensor,
target: Tensor,
mask: Tensor,
normalized_mask_min_value = 0.
):
assert mask.shape[-2:] == pred.shape[-2:] == target.shape[-2:]
assert mask.shape[0] == pred.shape[0] == target.shape[0]
assert (mask.amin() >= 0.).all(), 'mask should not have values below 0'
if mask.ndim == 4:
assert mask.shape[1] == 1
mask = rearrange(mask, 'b 1 h w -> b h w')
loss = F.mse_loss(pred, target, reduction = 'none')
loss = reduce(loss, 'b c h w -> b h w')
# normalize mask by max
normalized_mask = mask / mask.amax(dim = -1, keepdim = True).clamp(min = 1e-5)
normalized_mask = normalized_mask.clamp(min = normalized_mask_min_value)
loss = loss * normalized_mask
return loss.mean()
# a module that wraps the keys and values projection of the cross attentions to text encodings
class Rank1EditModule(Module):
@beartype
def __init__(
self,
key_or_values_proj: nn.Linear,
*,
num_concepts: int = 1,
C: Optional[Tensor] = None, # covariance of input, precomputed from 100K laion text
default_model = 'SD15',
text_seq_len: int = 77,
is_key_proj: bool = False,
input_decay = 0.99,
train_beta = 0.75,
train_temperature = 0.1,
eval_beta = 0.70, # in paper, specified a range (0.6 - 0.75) for local-key lock, and (0.4 -0.6) for global-key lock
eval_temperature = 0.15,
frac_gradient_concept_embed = 0.1, # they use a slower learning rate for the embed - this can be achieved by a trick to reduce the gradients going backwards through an operation
multi_concepts_use_cholesky = False # use an approximated technique without Cholesky root for multiple concepts
):
super().__init__()
assert not exists(key_or_values_proj.bias), 'key value projection in attention should not have bias'
self.num_concepts = num_concepts
self.multi_concepts_use_cholesky = multi_concepts_use_cholesky
self.weight = key_or_values_proj.weight
dim_output, dim_input = self.weight.shape
self.train_beta = train_beta
self.train_temperature = train_temperature
self.eval_beta = eval_beta
self.eval_temperature = eval_temperature
self.input_decay = input_decay
self.text_seq_len = text_seq_len
# for the lowered learning rate on the concept embed (0.006 vs 0.03 or something)
assert 0 < frac_gradient_concept_embed <= 1.
self.frac_gradient_concept_embed = frac_gradient_concept_embed
# for exponentially smoothing the inputs
# will smooth both concept and superclass token inputs
self.register_buffer('initted', torch.zeros(num_concepts, 1).bool())
self.register_buffer('ema_concept_text_encs', torch.zeros(num_concepts, dim_input))
# concept outputs - only optimized for values, but not keys
self.is_key_proj = is_key_proj # will lock the output to the super-class, and turn off gradients
self.concept_outputs = nn.Parameter(torch.zeros(num_concepts, dim_output), requires_grad = not is_key_proj)
# input covariance C in the paper, inverse precomputed
# if covariance was not passed in, then use default for SD1.5, precomputed by @BradVidler
if not exists(C):
covariance_filepath = COVARIANCE_FILENAME_BY_TEXT_IMAGE_MODEL.get(default_model, None)
assert exists(covariance_filepath), f'{default_model} not found in the list of precomputed covariances {tuple(COVARIANCE_FILENAME_BY_TEXT_IMAGE_MODEL.keys())}'
C = torch.load(str(covariance_filepath))
print(f'precomputed covariance loaded from {str(covariance_filepath)}')
# calculate C_inv
C_inv = torch.inverse(C)
self.register_buffer('C_inv', C_inv)
@property
def num_concepts(self):
return self._num_concepts
@num_concepts.setter
def num_concepts(self, value):
self._num_concepts = value
if value == 1 or not self.multi_concepts_use_cholesky:
return
# for multiple concepts
# need cholesky decomposed L_t_inv
# Appendix B
try:
L = torch.linalg.cholesky(self.C_inv)
except:
print('unable to perform cholesky. please make sure input covariance matrix is properly calculated')
exit()
L_T = L.T
L_T_inv = torch.inverse(L_T)
self.register_buffer('L_T', L_T, persistent = False)
self.register_buffer('L_T_inv', L_T_inv, persistent = False)
@property
def device(self):
return next(self.buffers()).device
def parameters(self):
if not self.is_key_proj:
return []
return [self.concept_outputs]
@beartype
def forward(
self,
text_enc: Tensor,
*,
concept_indices: Optional[Tensor] = None,
text_enc_with_superclass: Optional[Tensor] = None,
concept_id: Union[int, Tuple[int, ...]] = 0
):
assert text_enc.shape[-2] == self.text_seq_len, f'CLIP text sequence length is set to be {self.text_seq_len}, but received text encoding with length {text_enc.shape[-2]}'
"""
following the pseudocode of Algorithm 1 in appendix
einstein notation:
b - batch
n - sequence
d - feature dimension
i - input dimension
o - output dimension
c - concepts dimension (for multiple concepts)
"""
batch, device = text_enc.shape[0], self.C_inv.device
weights, decay, Ci = self.weight, self.input_decay, self.C_inv
# reduce learning rate going back to the text encoder and into the concept embed
text_enc = text_enc * self.frac_gradient_concept_embed + text_enc.detach() * (1 - self.frac_gradient_concept_embed)
# beta and temperature depends on whether training or inference
beta, temperature = (self.train_beta, self.train_temperature) if self.training else (self.eval_beta, self.eval_temperature)
# determine whether it is single (for training) or multi-concept (only at inference)
# may separate into different modules at a future date if too complex in one module
is_multi_concepts = isinstance(concept_id, tuple)
if is_multi_concepts:
assert not self.training, 'multi concepts can only be done at inference'
assert is_all_unique(concept_id)
assert all([cid < self.num_concepts for cid in concept_id])
concept_id_tensor = torch.tensor(concept_id, dtype = torch.long, device = self.device)
else:
assert concept_id < self.num_concepts
concept_id_tensor = torch.tensor([concept_id], dtype = torch.long, device = self.device)
# get the initialization state
if self.training:
initted = self.initted[concept_id].item()
# extract the concept text encoding input
batch_indices = torch.arange(batch, device = device)
batch_indices = rearrange(batch_indices, 'b -> b 1')
concept_indices = rearrange(concept_indices, 'b -> b 1')
concept_text_enc = text_enc[batch_indices, concept_indices]
concept_text_enc = reduce(concept_text_enc, 'b 1 d -> d', 'mean')
# only if training
# do exponential smoothing of the inputs, both concept and superclass
if exists(text_enc_with_superclass):
superclass_text_enc = text_enc_with_superclass[batch_indices, concept_indices]
superclass_text_enc = reduce(superclass_text_enc, 'b 1 d -> d', 'mean')
superclass_output = einsum('i, o i -> o', superclass_text_enc, weights)
# store the superclass i* if not all initialized
# else fetch it from the buffer
if not initted:
assert exists(superclass_output), 'text_enc_with_superclass must be passed in for the first batch'
# init concept output with superclass output - fixed for keys, learned for values
self.concept_outputs[concept_id].data.copy_(superclass_output)
elif exists(superclass_output) and self.is_key_proj:
# if text enc with superclass is passed in for more than 1 batch
# just take the opportunity to exponentially average it a bit more for the keys, which have fixed concept output (to superclass)
ema_concept_output = self.concept_outputs[concept_id] * decay + superclass_output * (1. - decay)
self.concept_outputs[concept_id].data.copy_(ema_concept_output)
# if any in the batch is not initialized, initialize
if not initted:
ema_concept_text_enc = concept_text_enc
else:
ema_concept_text_enc = self.ema_concept_text_encs[concept_id]
# exponential moving average for concept input encoding
ema_concept_text_enc = ema_concept_text_enc * decay + concept_text_enc * (1. - decay)
# store
if not initted:
self.initted[concept_id].data.copy_(Tensor([True]))
# update ema i_*
self.ema_concept_text_encs[concept_id].data.copy_(ema_concept_text_enc)
else:
assert self.initted[concept_id_tensor].all(), 'you have not initialized or trained this module for the concepts id given'
# make it easier to match with paper
i, o, W = self.ema_concept_text_encs[concept_id_tensor], self.concept_outputs[concept_id_tensor], weights
# if training, i* needs gradients. use straight-through?
# check with author about this
if self.training:
i = i + concept_text_enc - concept_text_enc.detach()
# main contribution eq (3)
i_energy = opt_einsum('c o, o i, c i -> c', i, Ci, i)
i_energy_inv = i_energy ** -1
sim = opt_einsum('b n o, o i, c i -> c b n', text_enc, Ci, i)
# calculate W_em_orthogonal_term - depends on single or multiple concepts
if is_multi_concepts:
if self.multi_concepts_use_cholesky:
L_T, L_T_inv = self.L_T, self.L_T_inv
# metric - metric space - variable with tilde in Appendix B
# equation (6)
i_metric = einsum('o i, c i -> c o', L_T, i)
u_metric, _ = torch.linalg.qr(i_metric.T)
u = einsum('o i, i c -> c o', L_T_inv, u_metric)
# equation (10)
em_orthogonal = text_enc - opt_einsum('c o, b n i, c i -> b n o', u, text_enc, u)
W_em_orthogonal_term = einsum('b n i, o i -> b n o', em_orthogonal, W)
else:
# an approximated version, without Cholesky root
# author says to use this preferentially, and fallback to Cholesky root if there are issues
text_enc_output = einsum('b n i, o i -> b n o', text_enc, W)
W_em_orthogonal_term = text_enc_output - opt_einsum('c b n, c i, o i, c -> b n o', sim, i, W, i_energy_inv)
else:
text_enc_output = einsum('b n i, o i -> b n o', text_enc, W)
concept_output = einsum('c i, o i -> c o', i, W)
W_em_orthogonal_term = text_enc_output - opt_einsum('c b n, c o, c -> b n o', sim, concept_output, i_energy_inv)
# calculate sigmoid_term (gating)
sim = rearrange(sim, 'c b n -> c b n 1')
i_energy = rearrange(i_energy, 'c -> c 1 1 1')
sigmoid_term = (((sim / i_energy) - beta) / temperature).sigmoid()
gated_term = sigmoid_term * rearrange(o, 'c d -> c 1 1 d')
gated_term = reduce(gated_term, 'c ... -> ...', 'sum')
return W_em_orthogonal_term + gated_term
# for merging trained Rank1EditModule(s) above
@beartype
def merge_rank1_edit_modules(
*modules: Rank1EditModule,
use_cholesky = False
) -> Rank1EditModule:
assert all([m.initted.all() for m in modules]), 'all modules must be initialized and ideally trained'
assert len(set([m.concept_outputs.shape[-1] for m in modules])) == 1, 'concept output dimension must be the same'
assert len(set([m.is_key_proj for m in modules])) == 1, 'all modules must be either for keys, or values. you cannot merge rank 1 edit modules of keys and values together'
first_module = modules[0]
merged_module = deepcopy(first_module)
merged_module.multi_concepts_use_cholesky = use_cholesky
total_concepts = sum([m.num_concepts for m in modules])
merged_module.num_concepts = total_concepts
concept_outputs = torch.cat(tuple(m.concept_outputs.data for m in modules), dim = 0)
merged_module.concept_outputs = nn.Parameter(concept_outputs, requires_grad = not first_module.is_key_proj)
ema_concept_text_encs = torch.cat(tuple(m.ema_concept_text_encs.data for m in modules), dim = 0)
merged_module.register_buffer('ema_concept_text_encs', ema_concept_text_encs)
merged_module.register_buffer('initted', torch.ones(total_concepts, 1).bool())
return merged_module
# function for wiring up the cross attention
@beartype
def make_key_value_proj_rank1_edit_modules_(
cross_attention: nn.Module,
*,
input_covariance: Tensor,
key_proj_name: str,
value_proj_name: str,
**rank1_edit_module_kwargs
):
linear_key = getattr(cross_attention, key_proj_name, None)
linear_values = getattr(cross_attention, value_proj_name, None)
assert isinstance(linear_key, nn.Linear), f'{key_proj_name} must point to where the keys projection is (ex. self.to_keys = nn.Linear(in, out, bias = False) -> key_proj_name = "to_keys")'
assert isinstance(linear_values, nn.Linear), f'{value_proj_name} must point to where the values projection is (ex. self.to_values = nn.Linear(in, out, bias = False) -> value_proj_name = "to_values")'
rank1_edit_module_keys = Rank1EditModule(linear_key, input_covariance = input_covariance, is_key_proj = True, **rank1_edit_module_kwargs)
rank1_edit_module_values = Rank1EditModule(linear_values, input_covariance = input_covariance, is_key_proj = False, **rank1_edit_module_kwargs)
setattr(cross_attention, key_proj_name, rank1_edit_module_keys)
setattr(cross_attention, value_proj_name, rank1_edit_module_values)
|
perfusion-pytorch-main
|
perfusion_pytorch/perfusion.py
|
from pathlib import Path
import torch
from torch import nn
from torch.nn import Module
from beartype import beartype
from perfusion_pytorch.embedding import EmbeddingWrapper
from perfusion_pytorch.perfusion import Rank1EditModule
# helper functions
def exists(val):
return val is not None
# saving and loading the necessary extra finetuned params
@beartype
def save(
text_image_model: Module,
path: str
):
path = Path(path)
path.parents[0].mkdir(exist_ok = True, parents = True)
embed_params = None
key_value_params = []
C_inv = None
for module in text_image_model.modules():
if isinstance(module, EmbeddingWrapper):
assert not exists(embed_params), 'there should only be one wrapped EmbeddingWrapper'
embed_params = module.concepts.data
elif isinstance(module, Rank1EditModule):
key_value_params.append([
module.ema_concept_text_encs.data,
module.concept_outputs.data
])
C_inv = module.C_inv.data
assert exists(C_inv), 'Rank1EditModule not found. you likely did not wire up the text to image model correctly'
pkg = dict(
embed_params = embed_params,
key_value_params = key_value_params,
C_inv = C_inv
)
torch.save(pkg, f'{str(path)}')
print(f'saved to {str(path)}')
@beartype
def load(
text_image_model: Module,
path: str
):
path = Path(path)
assert path.exists(), f'file not found at {str(path)}'
pkg = torch.load(str(path))
embed_params = pkg['embed_params']
key_value_params = pkg['key_value_params']
C_inv = pkg['C_inv']
for module in text_image_model.modules():
if isinstance(module, EmbeddingWrapper):
module.concepts.data.copy_(embed_params)
elif isinstance(module, Rank1EditModule):
assert len(key_value_params) > 0, 'mismatch between what was saved vs what is being loaded'
concept_input, concept_output = key_value_params.pop(0)
module.ema_concept_text_encs.data.copy_(concept_input)
module.concept_outputs.data.copy_(concept_output)
module.C_inv.copy_(C_inv)
module.initted.copy_(torch.tensor([True]))
print(f'loaded concept params from {str(path)}')
|
perfusion-pytorch-main
|
perfusion_pytorch/save_load.py
|
import torch
from torch import nn, Tensor
from torch.nn import Module
from collections import namedtuple
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Optional, Tuple, Union, Callable, List
from einops import rearrange
from open_clip import tokenizer
# constants
EmbeddingReturn = namedtuple('EmbeddingReturn', [
'embed_with_concept',
'embed_with_superclass',
'embed_mask',
'concept_indices'
])
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def is_all_unique(arr):
return len(set(arr)) == len(arr)
def filter_tuple_indices(tup, indices):
return tuple(tup[i] for i in indices)
@beartype
def get_mask(
x: Tensor,
ids: Tuple[int, ...]
):
masks = tuple(x == i for i in ids)
mask, *rest_masks = masks
for rest_mask in rest_masks:
mask = mask | rest_mask
return mask
# embedding wrapper class
class EmbeddingWrapper(Module):
@beartype
def __init__(
self,
embed: nn.Embedding,
num_concepts = 1,
superclass_embed_id: Optional[Union[int, Tuple[int, ...]]] = None,
superclass_string: Optional[str] = None,
tokenize: Callable[[List[str]], Tensor] = tokenizer.tokenize,
tokenizer_pad_id: int = 0,
tokenizer_sos_eos_id: Tuple[int, int] = (49406, 49407)
):
super().__init__()
self.embed = embed
num_embeds, dim = embed.weight.shape
self.num_embeds = num_embeds
self.num_concepts = num_concepts
self.concepts = nn.Parameter(torch.zeros(num_concepts, dim))
assert not (exists(superclass_embed_id) and exists(superclass_string)), 'either superclass embed id is given, or the superclass string'
self.pad_id = tokenizer_pad_id
self.tokenize = None
if exists(superclass_string):
self.tokenize = tokenize
ids = tokenize([superclass_string])[0]
mask_for_ids = get_mask(ids, (tokenizer_pad_id, *tokenizer_sos_eos_id))
ids = ids[~mask_for_ids]
assert ids.shape[-1] == 1, f'your superclass concept string must map exactly one token id'
superclass_embed_id = ids[0].item()
print(f'super class embed for "{superclass_string}"" set as {superclass_embed_id}')
print(f'you can now pass in a list of strings containing superclass concept, and this wrapper will return the embedding w/ concept and superclass required for finetuning')
self.superclass_embed_id = superclass_embed_id
assert not (exists(superclass_embed_id) and num_concepts > 1), 'cannot do multi concept with superclass embed id given'
if exists(superclass_embed_id):
# author had better results initializing the concept embed to the super class embed, allow for that option
if not isinstance(superclass_embed_id, tuple):
superclass_embed_id = (superclass_embed_id,)
superclass_embed_indices = torch.tensor(list(superclass_embed_id))
superclass_embeds = embed(superclass_embed_indices)
self.concepts.data.copy_(superclass_embeds)
else:
# otherwise initialize to usually small init for embeds
nn.init.normal_(self.concepts, std = 0.02)
self.concept_embed_ids = tuple(range(num_embeds, num_embeds + num_concepts))
def parameters(self):
return [self.concepts]
@property
def device(self):
return self.concepts.device
@beartype
def forward(
self,
x: Union[Tensor, List[str]],
concept_id: Optional[Union[int, Tuple[int, ...]]] = None,
return_embed_with_superclass = True,
clip_transformer_fn: Optional[Callable[[Tensor], Tensor]] = None
) -> EmbeddingReturn:
assert not (self.training and self.num_concepts > 1), 'cannot train with multiple concepts'
if self.training:
concept_id = default(concept_id, 0)
if exists(concept_id):
if not isinstance(concept_id, tuple):
concept_id = (concept_id,)
assert not self.training or len(concept_id) == 1, 'can only train or inference on single concepts if passing in list of superclass strings'
assert not self.training or self.num_concepts == 1
if is_bearable(x, List[str]):
inferred_concept_id = self.concept_embed_ids[0]
x = self.tokenize(x)
x = x.to(self.device)
superclass_mask = x == self.superclass_embed_id
assert superclass_mask.any(dim = -1).all(), 'superclass embed id must be present for all prompts'
# automatically replace the superclass id with the concept id
x = torch.where(superclass_mask, inferred_concept_id, x)
# get the embedding mask, defined as not padding id
# default to open clip tokenizer padding id of 0
embed_mask = x != self.pad_id
# get masks for all concepts (support for multi-concepts)
concept_masks = tuple(concept_id == x for concept_id in self.concept_embed_ids)
if exists(concept_id):
assert is_all_unique(concept_id), 'concept ids must be all unique'
assert all([cid < self.num_concepts for cid in concept_id])
has_concept = tuple(concept_mask.any(dim = -1).all() for concept_mask in concept_masks)
assert all(filter_tuple_indices(has_concept, concept_id)), f'concept ids {filter_tuple_indices(self.concept_embed_ids, concept_id)} not found in ids passed in'
concept_masks = filter_tuple_indices(concept_masks, concept_id)
# just fetch the first embedding, as the concept embeddings are kept external to nn.Embedding
for concept_mask in concept_masks:
x = x.masked_fill(concept_mask, 0)
# get all the embeddings that are not the concept or superclass concept
# rest of embeddings are also not learnable, only concept embedding
with torch.no_grad():
embeds = self.embed(x)
embeds.detach_()
# substitute the concept back into the embeddings
for concept, concept_mask in zip(self.concepts, concept_masks):
embeds = torch.where(
rearrange(concept_mask, '... -> ... 1'),
concept,
embeds
)
# whether to return concept indices for the rank-1-edit modules
concept_indices = None
if self.training and exists(concept_id) and len(concept_id) == 1:
concept_mask, = concept_masks
concept_indices = (concept_mask.cumsum(dim = -1) == 0).sum(dim = -1).long()
# if training, and superclass embed id given
# also return embeddings with superclass, for deriving superclass_text_enc
superclass_embeds = None
if self.training and exists(self.superclass_embed_id) and return_embed_with_superclass:
x = x.masked_fill(concept_masks[0], self.superclass_embed_id)
with torch.no_grad():
superclass_embeds = self.embed(x)
# if the clip transformer function is passed in, transform the embeds and superclass_embeds into the text_enc and superclass_text_enc, to be forwarded by cross attentions into the Rank1EditModules
if exists(clip_transformer_fn):
with torch.no_grad():
embeds = clip_transformer_fn(embeds)
if exists(superclass_embeds):
superclass_embeds = clip_transformer_fn(superclass_embeds)
# return tuple, with
# 1. text embeds | encodings
# 2. superclass text embeds | encoding
# 3. text mask
# 4. concept indices
return EmbeddingReturn(embeds, superclass_embeds, embed_mask, concept_indices)
# a wrapper for clip
# that automatically wraps the token embedding with new concept
# and on forward, passes the concept embeddings + superclass concept embeddings through the text transformer + final layernorm
# as well as make the forward pass the ids and superclass_ids through the modified text encoder twice (will attempt to substitute the nn.Embedding with an nn.Identity)
class OpenClipEmbedWrapper(Module):
@beartype
def __init__(
self,
clip: Module,
text_transformer_path = 'transformer',
ln_final_path = 'ln_final', # in CLIP, they had the final layernorm separate from the transformer
**embedding_wrapper_kwargs
):
super().__init__()
self.wrapped_embed = EmbeddingWrapper(clip.token_embedding, **embedding_wrapper_kwargs)
path_to_modules = dict([(path, mod) for path, mod in clip.named_modules()])
assert text_transformer_path in path_to_modules
text_transformer = path_to_modules[text_transformer_path]
ln_final = path_to_modules.get(ln_final_path, nn.Identity())
self.text_transformer = nn.Sequential(
text_transformer,
ln_final
)
def forward(
self,
x,
**kwargs
) -> EmbeddingWrapper:
text_embeds, superclass_text_embeds, text_mask, concept_indices = self.wrapped_embed(x, **kwargs)
text_enc = self.text_transformer(text_embeds)
superclass_text_enc = None
if exists(superclass_text_embeds):
superclass_text_enc = self.text_transformer(superclass_text_embeds)
return EmbeddingReturn(text_enc, superclass_text_enc, text_mask, concept_indices)
# merging multiple embedding wrappers (with one concepts) into a merged embedding wrapper with multiple concepts
@beartype
def merge_embedding_wrappers(
*embeds: EmbeddingWrapper
) -> EmbeddingWrapper:
total_concepts = sum([embed.num_concepts for embed in embeds])
assert len(set([tuple(embed.embed.weight.shape) for embed in embeds])) == 1
embed = embeds[0].embed
merged_concepts = EmbeddingWrapper(
embed = embed,
num_concepts = total_concepts
)
merged_concepts.eval()
concepts = torch.cat(tuple(embed.concepts.data for embed in embeds), dim = 0)
merged_concepts.concepts = nn.Parameter(concepts)
return merged_concepts
|
perfusion-pytorch-main
|
perfusion_pytorch/embedding.py
|
from perfusion_pytorch.perfusion import (
Rank1EditModule,
calculate_input_covariance,
loss_fn_weighted_by_mask,
merge_rank1_edit_modules,
make_key_value_proj_rank1_edit_modules_
)
from perfusion_pytorch.embedding import (
EmbeddingWrapper,
OpenClipEmbedWrapper,
merge_embedding_wrappers
)
from perfusion_pytorch.save_load import (
save,
load
)
from perfusion_pytorch.optimizer import (
get_finetune_parameters,
get_finetune_optimizer
)
|
perfusion-pytorch-main
|
perfusion_pytorch/__init__.py
|
from torch.nn import Module
from torch.optim import AdamW, Adam, Optimizer
from beartype import beartype
from perfusion_pytorch.embedding import EmbeddingWrapper
from perfusion_pytorch.perfusion import Rank1EditModule
# function that automatically finds all the parameters necessary for fine tuning
@beartype
def get_finetune_parameters(text_image_model: Module):
params = []
for module in text_image_model.modules():
if isinstance(module, (EmbeddingWrapper, Rank1EditModule)):
params.extend(module.parameters())
return params
@beartype
def get_finetune_optimizer(
text_image_model: Module,
lr = 1e-4,
wd = 1e-2,
betas = (0.9, 0.99),
eps = 1e-8,
**kwargs
) -> Optimizer:
params = get_finetune_parameters(text_image_model)
assert len(params) > 0, 'no finetuneable parameters found'
total_params = sum([p.numel() for p in params])
print(f'optimizing {total_params} parameters')
has_weight_decay = wd > 0
adam_klass = AdamW if has_weight_decay else Adam
adam_kwargs = dict(lr = lr, betas = betas, eps = eps)
if has_weight_decay:
adam_kwargs.update(weight_decay = wd)
return adam_klass(params, **adam_kwargs, **kwargs)
|
perfusion-pytorch-main
|
perfusion_pytorch/optimizer.py
|
from beartype import beartype
from beartype.typing import List, Optional
import torch
from torch import nn, einsum
import torch.nn.functional as F
from einops import rearrange
import open_clip
def exists(val):
return val is not None
def l2norm(t):
return F.normalize(t, dim = -1)
class OpenClipAdapter(nn.Module):
@beartype
def __init__(
self,
name = 'ViT-B/32',
pretrained = 'laion400m_e32',
tokenizer_name = 'ViT-B-32-quickgelu',
eos_id = 49407
):
super().__init__()
clip, _, preprocess = open_clip.create_model_and_transforms(name, pretrained = pretrained)
tokenizer = open_clip.get_tokenizer(tokenizer_name)
self.clip = clip
self.tokenizer = tokenizer
self.eos_id = eos_id
# hook for getting final text representation
text_attention_final = self.find_layer('ln_final')
self._dim_latent = text_attention_final.weight.shape[0]
self.text_handle = text_attention_final.register_forward_hook(self._text_hook)
# normalize fn
self.clip_normalize = preprocess.transforms[-1]
self.cleared = False
@property
def device(self):
return next(self.parameters()).device
def find_layer(self, layer):
modules = dict([*self.clip.named_modules()])
return modules.get(layer, None)
def clear(self):
if self.cleared:
return
self.text_handle()
def _text_hook(self, _, inputs, outputs):
self.text_encodings = outputs
@property
def dim_latent(self):
return self._dim_latent
@property
def max_text_len(self):
return self.clip.positional_embedding.shape[0]
@beartype
def embed_texts(
self,
texts: List[str]
):
ids = self.tokenizer(texts)
ids = ids.to(self.device)
ids = ids[..., :self.max_text_len]
is_eos_id = (ids == self.eos_id)
text_mask_excluding_eos = is_eos_id.cumsum(dim = -1) == 0
text_mask = F.pad(text_mask_excluding_eos, (1, -1), value = True)
text_mask = text_mask & (ids != 0)
assert not self.cleared
text_embed = self.clip.encode_text(ids)
text_encodings = self.text_encodings
text_encodings = text_encodings.masked_fill(~text_mask[..., None], 0.)
return text_encodings.float(), text_mask
|
perfusion-pytorch-main
|
perfusion_pytorch/open_clip.py
|
from setuptools import setup, find_packages
setup(
name = 'ponder-transformer',
packages = find_packages(),
version = '0.0.8',
license='MIT',
description = 'Ponder Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/ponder-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'adaptive computation time'
],
install_requires=[
'einops>=0.3',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
ponder-transformer-main
|
setup.py
|
from ponder_transformer.ponder_transformer import PonderTransformer
|
ponder-transformer-main
|
ponder_transformer/__init__.py
|
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat
from einops.layers.torch import Rearrange, Reduce
# constants
ABS_MAX_STEPS = 100
# helper functions
def exists(val):
return val is not None
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
def FeedForward(dim, mult = 4):
return nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Linear(dim * mult, dim)
)
class Attention(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
causal = False
):
super().__init__()
self.heads = heads
self.causal = causal
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, mask = None):
n, h, device = x.shape[1], self.heads, x.device
qkv = self.to_qkv(x).chunk(3, dim = -1)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), qkv)
sim = einsum('b h i d, b h j d -> b h i j', q, k) * self.scale
mask_value = -torch.finfo(sim.dtype).max
if exists(mask):
mask = rearrange(mask, 'b i -> b () i ()') * rearrange(mask, 'b j -> b () () j')
sim = sim.masked_fill(mask, mask_value)
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), device = device).triu(j - i + 1).bool()
sim = sim.masked_fill(causal_mask, mask_value)
attn = sim.softmax(dim = -1)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
# pondering classes and helper functions
def pad_to(t, padding, dim = -1, value = 0.):
if dim > 0:
dim = dim - t.ndim
zeroes = -dim - 1
return F.pad(t, (*((0, 0) * zeroes), *padding), value = value)
def safe_cumprod(t, eps = 1e-10, dim = -1):
t = torch.clip(t, min = eps, max = 1.)
return torch.exp(torch.cumsum(torch.log(t), dim = dim))
def exclusive_cumprod(t, dim = -1):
cum_prod = safe_cumprod(t, dim = dim)
return pad_to(cum_prod, (1, -1), value = 1., dim = dim)
def calc_geometric(l, dim = -1):
return exclusive_cumprod(1 - l, dim = dim) * l
# main class
class Block(nn.Module):
def __init__(
self,
*,
dim,
dim_head = 64,
heads = 8,
causal = False,
ff_mult = 4
):
super().__init__()
self.causal = causal
self.attn = PreNorm(dim, Attention(dim = dim, dim_head = dim_head, heads = heads, causal = causal))
self.ff = PreNorm(dim, FeedForward(dim = dim, mult = ff_mult))
self.to_halt_logits = nn.Sequential(
nn.Linear(dim, 1),
Rearrange('... () -> ...')
)
def forward(self, x, mask = None):
x = self.attn(x, mask = mask) + x
x = self.ff(x) + x
if self.causal:
denom = torch.arange(x.shape[-2], device = x.device)
denom = rearrange(denom, 'n -> () n ()')
halt_input = x.cumsum(dim = 1) / (denom + 1)
else:
halt_input = x.mean(dim = 1)
halt_logits = self.to_halt_logits(halt_input)
return x, halt_logits
class PonderTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
max_seq_len,
causal = True,
dim_head = 64,
heads = 8,
ponder_kl_div_loss_weight = 0.01,
ponder_lambda_p = 0.2,
ponder_epsilon = 0.05,
eps = 1e-20
):
super().__init__()
self.eps = eps
self.causal = causal
self.seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
self.pos_emb = nn.Embedding(max_seq_len, dim)
# calculate max steps
thres = 1 - ponder_epsilon
halt_probs = calc_geometric(torch.full((ABS_MAX_STEPS,), ponder_lambda_p))
cum_halt_probs = halt_probs.cumsum(dim = 0)
self.train_max_steps = (cum_halt_probs < thres).sum().item()
self.ponder_lambda_p = ponder_lambda_p
self.ponder_kl_div_loss_weight = ponder_kl_div_loss_weight
# pondering block
self.block = Block(
dim = dim,
dim_head = dim_head,
heads = heads,
causal = causal
)
# hidden state to 'y' - output
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, *, labels = None, mask = None):
n, device, eps, max_steps, causal = x.shape[1], x.device, self.eps, self.train_max_steps, self.causal
x = self.token_emb(x)
pos_emb = self.pos_emb(torch.arange(n, device = device))
x = x + rearrange(pos_emb, 'n d -> () n d')
if self.training:
assert exists(labels), 'labels must be passed in during training'
hiddens = []
halting_logits = []
# training mode
for _ in range(max_steps):
x, halt_logits = self.block(x)
hiddens.append(x)
halting_logits.append(halt_logits)
# stack halting probs (lambda) and y
halting_logits = torch.stack(halting_logits, dim = 1)
halting_probs = calc_geometric(halting_logits.sigmoid(), dim = 1)
hiddens = torch.stack(hiddens, dim = 1)
logits = self.to_logits(hiddens)
# calculate kl div with geometric prior
geometric_dist = calc_geometric(torch.full((max_steps,), self.ponder_lambda_p, device = device))
if self.causal:
geometric_dist = repeat(geometric_dist, 'l -> (l n)', n = n)
halting_probs = rearrange(halting_probs, '... l n -> ... (l n)')
kl_div_loss = F.kl_div(
torch.log(geometric_dist + eps),
halting_probs,
None, None,
'batchmean'
)
# calculate cross entropy loss
labels = repeat(labels, 'b n -> b (l n)', l = max_steps)
logits = rearrange(logits, 'b l n d -> b d (l n)')
ce_loss = F.cross_entropy(logits, labels, ignore_index = 0)
weighted_ce_loss = ce_loss * halting_probs
# sum loss
loss = weighted_ce_loss.mean() + self.ponder_kl_div_loss_weight * kl_div_loss.mean()
return loss
else:
# evaluation mode
hiddens = []
halting_logits = []
layer_halt = []
for i in range(self.train_max_steps):
is_last = i == (self.train_max_steps - 1)
x, halt_logits = self.block(x)
hiddens.append(x)
if self.causal:
halt_logits = halt_logits[..., -1]
halting_logits.append(halt_logits)
# calculating halting probs
halting_probs = torch.stack(halting_logits, dim = 1).sigmoid()
p = calc_geometric(halting_probs, dim = 1)[:, -1]
should_halt = torch.rand_like(p) <= p
# stack the halting signal across layers and determine whether to stop early
layer_halt.append(should_halt)
# do not exit early if it is the last one
if is_last:
continue
# break if halting has been sampled for all layers
layer_was_halted = torch.any(torch.stack(layer_halt), dim = 0)
if torch.all(layer_was_halted):
break
# calculate max number of layers
max_num_layers = len(layer_halt)
# stack the hiddens and the boolean tensor indicating halting for each layer
hiddens = torch.stack(hiddens, dim = 1)
layer_halt = torch.stack(layer_halt, dim = 1)
# calculate the index of the first halt signal, and make it the last layer if none of them halted
halt_layer_indices = (layer_halt.cumsum(dim = 1) == 0).sum(dim = 1).clamp(max = max_num_layers - 1)
# select out the correct hidden layers to logits
halt_layer_indices_expanded = repeat(halt_layer_indices, 'b -> b () n d', n = hiddens.shape[-2], d = hiddens.shape[-1])
hiddens = hiddens.gather(1, halt_layer_indices_expanded)
hiddens = rearrange(hiddens, 'b () n d -> b n d')
return self.to_logits(hiddens), halt_layer_indices
|
ponder-transformer-main
|
ponder_transformer/ponder_transformer.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Install compare_gan."""
from setuptools import find_packages
from setuptools import setup
setup(
name='compare_gan',
version='3.0',
description=(
'Compare GAN - A modular library for training and evaluating GANs.'),
author='Google LLC',
author_email='no-reply@google.com',
url='https://github.com/google/compare_gan',
license='Apache 2.0',
packages=find_packages(),
package_data={},
install_requires=[
'future',
'gin-config==0.1.4',
'numpy',
'pandas',
'six',
'tensorflow-datasets==1.0.1',
'tensorflow-hub>=0.2.0',
'tensorflow-gan==0.0.0.dev0',
'matplotlib>=1.5.2',
'pstar>=0.1.6',
'scipy>=1.0.0',
],
extras_require={
'tf': ['tensorflow>=1.12'],
# Evaluation of Hub modules with EMA variables requires TF > 1.12.
'tf_gpu': ['tf-nightly-gpu>=1.13.0.dev20190221'],
'pillow': ['pillow>=5.0.0'],
'tensorflow-probability': ['tensorflow-probability>=0.5.0'],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
keywords='tensorflow machine learning gan',
)
|
compare_gan-master
|
setup.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility classes and methods for testing."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from absl import flags
from compare_gan import eval_utils
from compare_gan.architectures import abstract_arch
from compare_gan.architectures import arch_ops
import gin
import mock
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
def create_fake_inception_graph():
"""Creates a graph that mocks inception.
It takes the input, multiplies it through a matrix full of 0.001 values
and returns as logits. It makes sure to match the tensor names of
the real inception model.
Returns:
tf.Graph object with a simple mock inception inside.
"""
fake_inception = tf.Graph()
with fake_inception.as_default():
graph_input = tf.placeholder(
tf.float32, shape=[None, 299, 299, 3], name="Mul")
matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001
output = tf.matmul(tf.layers.flatten(graph_input), matrix)
output = tf.identity(output, name="pool_3")
output = tf.identity(output, name="logits")
return fake_inception.as_graph_def()
class Generator(abstract_arch.AbstractGenerator):
"""Generator with a single linear layer from z to the output."""
def __init__(self, **kwargs):
super(Generator, self).__init__(**kwargs)
self.call_arg_list = []
def apply(self, z, y, is_training):
self.call_arg_list.append(dict(z=z, y=y, is_training=is_training))
batch_size = z.shape[0].value
out = arch_ops.linear(z, np.prod(self._image_shape), scope="fc_noise")
out = tf.nn.sigmoid(out)
return tf.reshape(out, [batch_size] + list(self._image_shape))
class Discriminator(abstract_arch.AbstractDiscriminator):
"""Discriminator with a single linear layer."""
def __init__(self, **kwargs):
super(Discriminator, self).__init__(**kwargs)
self.call_arg_list = []
def apply(self, x, y, is_training):
self.call_arg_list.append(dict(x=x, y=y, is_training=is_training))
h = tf.reduce_mean(x, axis=[1, 2])
out = arch_ops.linear(h, 1)
return tf.nn.sigmoid(out), out, h
class CompareGanTestCase(tf.test.TestCase):
"""Base class for test cases."""
def setUp(self):
super(CompareGanTestCase, self).setUp()
# Use fake datasets instead of reading real files.
FLAGS.data_fake_dataset = True
# Clear the gin cofiguration.
gin.clear_config()
# Mock the inception graph.
fake_inception_graph = create_fake_inception_graph()
self.inception_graph_def_mock = mock.patch.object(
eval_utils,
"get_inception_graph_def",
return_value=fake_inception_graph).start()
def _get_empty_model_dir(self):
unused_sub_dir = str(datetime.datetime.now().microsecond)
model_dir = os.path.join(FLAGS.test_tmpdir, unused_sub_dir)
assert not tf.gfile.Exists(model_dir)
return model_dir
|
compare_gan-master
|
compare_gan/test_utils.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains SessionRunHooks for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import logging
import tensorflow as tf
class AsyncCheckpointSaverHook(tf.contrib.tpu.AsyncCheckpointSaverHook):
"""Saves checkpoints every N steps in a asynchronous thread.
This is the same as tf.contrib.tpu.AsyncCheckpointSaverHook but guarantees
that there will be a checkpoint every `save_steps` steps. This helps to have
eval results at fixed step counts, even when training is paused between
regular checkpoint intervals.
"""
def after_create_session(self, session, coord):
super(AsyncCheckpointSaverHook, self).after_create_session(session, coord)
# Interruptions to the training job can cause non-regular checkpoints
# (between every_steps). Modify last triggered step to point to the last
# regular checkpoint step to make sure we trigger on the next regular
# checkpoint step.
step = session.run(self._global_step_tensor)
every_steps = self._timer._every_steps # pylint: disable=protected-access
last_triggered_step = step - step % every_steps
self._timer.update_last_triggered_step(last_triggered_step)
class EveryNSteps(tf.train.SessionRunHook):
""""Base class for hooks that execute callbacks every N steps.
class MyHook(EveryNSteps):
def __init__(self, every_n_steps):
super(MyHook, self).__init__(every_n_steps)
def every_n_steps_after_run(self, step, run_context, run_values):
# Your Implementation
If you do overwrite begin(), end(), before_run() or after_run() make sure to
call super() at the beginning.
"""
def __init__(self, every_n_steps):
"""Initializes an `EveryNSteps` hook.
Args:
every_n_steps: `int`, the number of steps to allow between callbacks.
"""
self._timer = tf.train.SecondOrStepTimer(every_steps=every_n_steps)
self._global_step_tensor = None
def begin(self):
self._global_step_tensor = tf.train.get_global_step()
if self._global_step_tensor is None:
raise RuntimeError("Global step must be created to use EveryNSteps.")
def before_run(self, run_context): # pylint: disable=unused-argument
"""Overrides `SessionRunHook.before_run`.
Args:
run_context: A `SessionRunContext` object.
Returns:
None or a `SessionRunArgs` object.
"""
return tf.train.SessionRunArgs({"global_step": self._global_step_tensor})
def after_run(self, run_context, run_values):
"""Overrides `SessionRunHook.after_run`.
Args:
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
step = run_values.results["global_step"]
if self._timer.should_trigger_for_step(step):
self.every_n_steps_after_run(step, run_context, run_values)
self._timer.update_last_triggered_step(step)
def end(self, sess):
step = sess.run(self._global_step_tensor)
self.every_n_steps_after_run(step, None, None)
def every_n_steps_after_run(self, step, run_context, run_values):
"""Callback after every n"th call to run().
Args:
step: Current global_step value.
run_context: A `SessionRunContext` object.
run_values: A SessionRunValues object.
"""
raise NotImplementedError("Subclasses of EveryNSteps should implement "
"every_n_steps_after_run().")
class ReportProgressHook(EveryNSteps):
"""SessionRunHook that reports progress to a `TaskManager` instance."""
def __init__(self, task_manager, max_steps, every_n_steps=100):
"""Create a new instance of ReportProgressHook.
Args:
task_manager: A `TaskManager` instance that implements report_progress().
max_steps: Maximum number of training steps.
every_n_steps: How frequently the hook should report progress.
"""
super(ReportProgressHook, self).__init__(every_n_steps=every_n_steps)
logging.info("Creating ReportProgressHook to report progress every %d "
"steps.", every_n_steps)
self.max_steps = max_steps
self.task_manager = task_manager
self.start_time = None
self.start_step = None
def every_n_steps_after_run(self, step, run_context, run_values):
if self.start_time is None:
# First call.
self.start_time = time.time()
self.start_step = step
return
time_elapsed = time.time() - self.start_time
steps_per_sec = float(step - self.start_step) / time_elapsed
eta_seconds = (self.max_steps - step) / (steps_per_sec + 0.0000001)
message = "{:.1f}% @{:d}, {:.1f} steps/s, ETA: {:.0f} min".format(
100 * step / self.max_steps, step, steps_per_sec, eta_seconds / 60)
logging.info("Reporting progress: %s", message)
self.task_manager.report_progress(message)
|
compare_gan-master
|
compare_gan/hooks.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests high level behavior of the runner_lib.py."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl import logging
from absl.testing import flagsaver
from absl.testing import parameterized
from compare_gan import eval_gan_lib
from compare_gan import runner_lib
from compare_gan import test_utils
from compare_gan.architectures import arch_ops
from compare_gan.gans.modular_gan import ModularGAN
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
FLAGS = flags.FLAGS
class RunnerLibTest(parameterized.TestCase, test_utils.CompareGanTestCase):
@parameterized.named_parameters([
("SameSeeds", 42, 42),
("DifferentSeeds", 1, 42),
("NoSeeds", None, None),
])
def testWeightInitialization(self, seed1, seed2):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("ModularGAN.g_optimizer_fn",
tf.train.GradientDescentOptimizer)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
work_dir = self._get_empty_model_dir()
seeds = [seed1, seed2]
for i in range(2):
model_dir = os.path.join(work_dir, str(i))
seed = seeds[i]
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir, tf_random_seed=seed)
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False)
checkpoint_path_0 = os.path.join(work_dir, "0/model.ckpt-0")
checkpoint_path_1 = os.path.join(work_dir, "1/model.ckpt-0")
checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0)
checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1)
for name, _ in tf.train.list_variables(checkpoint_path_0):
tf.logging.info(name)
t0 = checkpoint_reader_0.get_tensor(name)
t1 = checkpoint_reader_1.get_tensor(name)
zero_initialized_vars = [
"bias", "biases", "beta", "moving_mean", "global_step",
"global_step_disc"
]
one_initialized_vars = ["gamma", "moving_variance"]
if any(name.endswith(e) for e in zero_initialized_vars):
# Variables that are always initialized to 0.
self.assertAllClose(t0, np.zeros_like(t0))
self.assertAllClose(t1, np.zeros_like(t1))
elif any(name.endswith(e) for e in one_initialized_vars):
# Variables that are always initialized to 1.
self.assertAllClose(t0, np.ones_like(t0))
self.assertAllClose(t1, np.ones_like(t1))
elif seed1 is not None and seed1 == seed2:
# Same random seed.
self.assertAllClose(t0, t1)
else:
# Different random seeds.
logging.info("name=%s, t0=%s, t1=%s", name, t0, t1)
self.assertNotAllClose(t0, t1)
@parameterized.named_parameters([
("WithRealData", False),
("WithFakeData", True),
])
@flagsaver.flagsaver
def testTrainingIsDeterministic(self, fake_dataset):
FLAGS.data_fake_dataset = fake_dataset
gin.bind_parameter("dataset.name", "cifar10")
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 3,
"z_dim": 128,
}
work_dir = self._get_empty_model_dir()
for i in range(2):
model_dir = os.path.join(work_dir, str(i))
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir, tf_random_seed=3)
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1)
checkpoint_path_0 = os.path.join(work_dir, "0/model.ckpt-3")
checkpoint_path_1 = os.path.join(work_dir, "1/model.ckpt-3")
checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0)
checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1)
for name, _ in tf.train.list_variables(checkpoint_path_0):
tf.logging.info(name)
t0 = checkpoint_reader_0.get_tensor(name)
t1 = checkpoint_reader_1.get_tensor(name)
self.assertAllClose(t0, t1, msg=name)
@parameterized.parameters([
{"use_tpu": False},
# {"use_tpu": True},
])
def testTrainAndEval(self, use_tpu):
gin.bind_parameter("dataset.name", "cifar10")
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=use_tpu,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_files = [
"TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index", "model.ckpt-0.meta",
"model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index",
"model.ckpt-1.meta", "operative_config-0.gin", "tfhub"]
self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir))
def testTrainAndEvalWithSpectralNormAndEma(self):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("ModularGAN.g_use_ema", True)
gin.bind_parameter("G.spectral_norm", True)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_files = [
"TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index", "model.ckpt-0.meta",
"model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index",
"model.ckpt-1.meta", "operative_config-0.gin", "tfhub"]
self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir))
def testTrainAndEvalWithBatchNormAccu(self):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("standardize_batch.use_moving_averages", False)
gin.bind_parameter("G.batch_norm_fn", arch_ops.batch_norm)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
# Wrap _UpdateBnAccumulators to only perform one accumulator update step.
# Otherwise the test case would time out.
orig_update_bn_accumulators = eval_gan_lib._update_bn_accumulators
def mock_update_bn_accumulators(sess, generated, num_accu_examples):
del num_accu_examples
return orig_update_bn_accumulators(sess, generated, num_accu_examples=64)
eval_gan_lib._update_bn_accumulators = mock_update_bn_accumulators
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_tfhub_files = [
"checkpoint", "model-with-accu.ckpt.data-00000-of-00001",
"model-with-accu.ckpt.index", "model-with-accu.ckpt.meta"]
self.assertAllInSet(
expected_tfhub_files,
tf.gfile.ListDirectory(os.path.join(model_dir, "tfhub/0")))
if __name__ == "__main__":
tf.test.main()
|
compare_gan-master
|
compare_gan/runner_lib_test.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dataset loading utilities.
Creates a thin wrapper around TensorFlow Datasets (TFDS) to enable seamless
CPU/GPU/TPU workloads. The main entry point is 'get_dataset' which takes a
dataset name and a random seed and returns the corresponding tf.data.Dataset
object.
Available datasets are defined in the DATASETS dictionary. To add any dataset
supported by TFDS, simply extend the ImageDatasetV2 class as shown below with
the MNIST example and add it to DICTIONARY dictionary. Alternatively, you can
extend the ImageDatasetV2 class and load the datasets from another source.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import inspect
from absl import flags
from absl import logging
from compare_gan.tpu import tpu_random
import gin
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
FLAGS = flags.FLAGS
flags.DEFINE_string(
"tfds_data_dir", None,
"TFDS (TensorFlow Datasets) directory. If not set it will default to "
"'~/tensorflow_datasets'. If the directory does not contain the requested "
"dataset TFDS will download the dataset to this folder.")
flags.DEFINE_boolean(
"data_fake_dataset", False,
"If True don't load datasets from disk but create fake values.")
flags.DEFINE_integer(
"data_shuffle_buffer_size", 10000,
"Number of examples for the shuffle buffer.")
# Deprecated, only used for "replacing labels". TFDS will always use 64 threads.
flags.DEFINE_integer(
"data_reading_num_threads", 64,
"The number of threads used to read the dataset.")
class ImageDatasetV2(object):
"""Interface for Image datasets based on TFDS (TensorFlow Datasets).
This method handles both CPU/GPU and TPU data loading settings. If the flag
--data_fake_dataset is True the methods will create a small fake dataset from
in-memory NumPy arrays and not read from disk.
The pipleline of input operations is as follows:
1) Shuffle filenames (with seed).
2) Load file content from disk. Decode images.
Dataset content after this step is a dictionary.
3) Prefetch call here.
4) Filter examples (e.g. by size or label).
5) Parse example.
Dataset content after this step is a tuple of tensors (image, label).
6) train_only: Repeat dataset.
7) Transform (random cropping with seed, resizing).
8) Preprocess (adding sampled noise/labels with seed).
Dataset content after this step is a tuple (feature dictionary, label tensor).
9) train only: Shuffle examples (with seed).
10) Batch examples.
11) Prefetch examples.
Step 1-3 are done by _load_dataset() and wrap tfds.load().
Step 4-11 are done by train_input_fn() and eval_input_fn().
"""
def __init__(self,
name,
tfds_name,
resolution,
colors,
num_classes,
eval_test_samples,
seed):
logging.info("ImageDatasetV2(name=%s, tfds_name=%s, resolution=%d, "
"colors=%d, num_classes=%s, eval_test_samples=%s, seed=%s)",
name, tfds_name, resolution, colors, num_classes,
eval_test_samples, seed)
self._name = name
self._tfds_name = tfds_name
self._resolution = resolution
self._colors = colors
self._num_classes = num_classes
self._eval_test_sample = eval_test_samples
self._seed = seed
self._train_split = tfds.Split.TRAIN
self._eval_split = tfds.Split.TEST
@property
def name(self):
"""Name of the dataset."""
return self._name
@property
def num_classes(self):
return self._num_classes
@property
def eval_test_samples(self):
"""Number of examples in the "test" split of this dataset."""
if FLAGS.data_fake_dataset:
return 100
return self._eval_test_sample
@property
def image_shape(self):
"""Returns a tuple with the image shape."""
return (self._resolution, self._resolution, self._colors)
def _make_fake_dataset(self, split):
"""Returns a fake data set with the correct shapes."""
np.random.seed(self._seed)
num_samples_per_epoch = 100
num_epochs = self.eval_test_samples // 100 if split == "test" else None
images_shape = [num_samples_per_epoch] + list(self.image_shape)
images = np.random.uniform(size=images_shape).astype(np.float32)
labels = np.ones((num_samples_per_epoch,), dtype=np.int32)
ds = tf.data.Dataset.from_tensor_slices((images, labels))
return ds.repeat(num_epochs)
def _get_per_host_random_seed(self, tpu_context=None):
"""Returns the dataset seed for according to the TPUContext.
On CPU/GPU it returns the default seed. For TPUs the input_fn is executed
on every host machine (if per-host input is set, which is set by default).
We use a different (but deterministically computed) random seed on each host
to ensure each host machine sees a different stream of input data.
Args:
tpu_context: TPU execution context.
Returns:
The current seed if CPU/GPU and a host-specific seed for TPU.
"""
if self._seed is None:
logging.warning("Dataset seed not set.")
return None
if tpu_context is None:
logging.warning("No TPUContext, using unmodified dataset seed %s.",
self._seed)
return self._seed
seed = self._seed + tpu_context.current_host
logging.info("Running with %d hosts, modifying dataset seed for "
"host %d to %s.", tpu_context.num_hosts,
tpu_context.current_host, seed)
return seed
@gin.configurable("replace_labels", whitelist=["file_pattern"])
def _replace_labels(self, split, ds, file_pattern=None):
"""Replaces the labels in the dataset with labels from separate files.
This functionality is used if one wants to either replace the labels with
soft labels (i.e. softmax over the logits) or label the instances with
a new classifier.
Args:
split: Dataset split (e.g. train/test/validation).
ds: The underlying TFDS object.
file_pattern: Path to the replacement files.
Returns:
An instance of tf.data.Dataset with the updated labels.
"""
if not file_pattern:
return ds
file_pattern = file_pattern.format(split=split)
logging.warning("Using labels from %s for split %s.", file_pattern, split)
label_ds = tf.data.Dataset.list_files(file_pattern, shuffle=False)
label_ds = label_ds.interleave(
tf.data.TFRecordDataset,
cycle_length=FLAGS.data_reading_num_threads)
ds = tf.data.Dataset.zip((ds, label_ds)).map(self._replace_label)
return ds
def _replace_label(self, feature_dict, new_unparsed_label):
"""Replaces the label from the feature_dict with the new label.
Furthermore, if the feature_dict contains a key for the file_name which
identifies an instance, we double-check that the we are replacing the label
of the correct instance.
Args:
feature_dict: A serialized TFRecord containing the old label.
new_unparsed_label: A serialized TFRecord containing the new label.
Returns:
Updates the label in the label dict to the new label.
"""
label_spec = {
"file_name": tf.FixedLenFeature((), tf.string),
"label": tf.FixedLenFeature((), tf.int64),
}
parsed_label = tf.parse_single_example(new_unparsed_label, label_spec)
with tf.control_dependencies([
tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]):
feature_dict["label"] = tf.identity(parsed_label["label"])
return feature_dict
def _parse_fn(self, features):
image = tf.cast(features["image"], tf.float32) / 255.0
return image, features["label"]
def _load_dataset(self, split):
"""Loads the underlying dataset split from disk.
Args:
split: Name of the split to load.
Returns:
Returns a `tf.data.Dataset` object with a tuple of image and label tensor.
"""
if FLAGS.data_fake_dataset:
return self._make_fake_dataset(split)
ds = tfds.load(
self._tfds_name,
split=split,
data_dir=FLAGS.tfds_data_dir,
as_dataset_kwargs={"shuffle_files": False})
ds = self._replace_labels(split, ds)
ds = ds.map(self._parse_fn)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
def _train_filter_fn(self, image, label):
del image, label
return True
def _train_transform_fn(self, image, label, seed):
del seed
return image, label
def _eval_transform_fn(self, image, label, seed):
del seed
return image, label
def train_input_fn(self, params=None, preprocess_fn=None):
"""Input function for reading data.
Args:
params: Python dictionary with parameters. Must contain the key
"batch_size". TPUEstimator will set this for you!
preprocess_fn: Function to process single examples. This is allowed to
have a `seed` argument.
Returns:
`tf.data.Dataset` with preprocessed and batched examples.
"""
if params is None:
params = {}
seed = self._get_per_host_random_seed(params.get("context", None))
logging.info("train_input_fn(): params=%s seed=%s", params, seed)
ds = self._load_dataset(split=self._train_split)
ds = ds.filter(self._train_filter_fn)
ds = ds.repeat()
ds = ds.map(functools.partial(self._train_transform_fn, seed=seed))
if preprocess_fn is not None:
if "seed" in inspect.getargspec(preprocess_fn).args:
preprocess_fn = functools.partial(preprocess_fn, seed=seed)
ds = ds.map(preprocess_fn)
# Add a feature for the random offset of operations in tpu_random.py.
ds = tpu_random.add_random_offset_to_features(ds)
ds = ds.shuffle(FLAGS.data_shuffle_buffer_size, seed=seed)
if "batch_size" in params:
ds = ds.batch(params["batch_size"], drop_remainder=True)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
def eval_input_fn(self, params=None, split=None):
"""Input function for reading data.
Args:
params: Python dictionary with parameters. Must contain the key
"batch_size". TPUEstimator will set this for you!
split: Name of the split to use. If None will use the default eval split
of the dataset.
Returns:
`tf.data.Dataset` with preprocessed and batched examples.
"""
if params is None:
params = {}
if split is None:
split = self._eval_split
seed = self._get_per_host_random_seed(params.get("context", None))
logging.info("eval_input_fn(): params=%s seed=%s", params, seed)
ds = self._load_dataset(split=split)
# No filter, no rpeat.
ds = ds.map(functools.partial(self._eval_transform_fn, seed=seed))
# No shuffle.
if "batch_size" in params:
ds = ds.batch(params["batch_size"], drop_remainder=True)
return ds.prefetch(tf.contrib.data.AUTOTUNE)
# For backwards compatibility ImageDataset.
def input_fn(self, params, mode=tf.estimator.ModeKeys.TRAIN,
preprocess_fn=None):
assert mode == tf.estimator.ModeKeys.TRAIN, mode
return self.train_input_fn(params=params, preprocess_fn=preprocess_fn)
# For backwards compatibility ImageDataset.
def load_dataset(self, split_name):
assert split_name == "test", split_name
return self.eval_input_fn()
class MnistDataset(ImageDatasetV2):
"""Wrapper for the MNIST dataset from TFDS."""
def __init__(self, seed):
super(MnistDataset, self).__init__(
name="mnist",
tfds_name="mnist",
resolution=28,
colors=1,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class FashionMnistDataset(ImageDatasetV2):
"""Wrapper for the Fashion-MNIST dataset from TDFS."""
def __init__(self, seed):
super(FashionMnistDataset, self).__init__(
name="fashion_mnist",
tfds_name="fashion_mnist",
resolution=28,
colors=1,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class Cifar10Dataset(ImageDatasetV2):
"""Wrapper for the CIFAR10 dataset from TDFS."""
def __init__(self, seed):
super(Cifar10Dataset, self).__init__(
name="cifar10",
tfds_name="cifar10",
resolution=32,
colors=3,
num_classes=10,
eval_test_samples=10000,
seed=seed)
class CelebaDataset(ImageDatasetV2):
"""Wrapper for the CelebA dataset from TFDS."""
def __init__(self, seed):
super(CelebaDataset, self).__init__(
name="celeb_a",
tfds_name="celeb_a",
resolution=64,
colors=3,
num_classes=None,
eval_test_samples=10000,
seed=seed)
def _parse_fn(self, features):
"""Returns 64x64x3 image and constant label."""
image = features["image"]
image = tf.image.resize_image_with_crop_or_pad(image, 160, 160)
# Note: possibly consider using NumPy's imresize(image, (64, 64))
image = tf.image.resize_images(image, [64, 64])
image.set_shape(self.image_shape)
image = tf.cast(image, tf.float32) / 255.0
label = tf.constant(0, dtype=tf.int32)
return image, label
class LsunBedroomDataset(ImageDatasetV2):
"""Wrapper from the LSUN Bedrooms dataset from TFDS."""
def __init__(self, seed):
super(LsunBedroomDataset, self).__init__(
name="lsun-bedroom",
tfds_name="lsun/bedroom",
resolution=128,
colors=3,
num_classes=None,
eval_test_samples=30000,
seed=seed)
# As the official LSUN validation set only contains 300 samples, which is
# insufficient for FID computation, we're splitting off some trianing
# samples. The smallest percentage selectable through TFDS is 1%, so we're
# going to use that (corresponding roughly to 30000 samples).
# If you want to use fewer eval samples, just modify eval_test_samples.
self._train_split, self._eval_split = \
tfds.Split.TRAIN.subsplit([99, 1])
def _parse_fn(self, features):
"""Returns a 128x128x3 Tensor with constant label 0."""
image = features["image"]
image = tf.image.resize_image_with_crop_or_pad(
image, target_height=128, target_width=128)
image = tf.cast(image, tf.float32) / 255.0
label = tf.constant(0, dtype=tf.int32)
return image, label
def _transform_imagnet_image(image, target_image_shape, crop_method, seed):
"""Preprocesses ImageNet images to have a target image shape.
Args:
image: 3-D tensor with a single image.
target_image_shape: List/Tuple with target image shape.
crop_method: Method for cropping the image:
One of: distorted, random, middle, none
seed: Random seed, only used for `crop_method=distorted`.
Returns:
Image tensor with shape `target_image_shape`.
"""
if crop_method == "distorted":
begin, size, _ = tf.image.sample_distorted_bounding_box(
tf.shape(image),
tf.zeros([0, 0, 4], tf.float32),
aspect_ratio_range=[1.0, 1.0],
area_range=[0.5, 1.0],
use_image_if_no_bounding_boxes=True,
seed=seed)
image = tf.slice(image, begin, size)
# Unfortunately, the above operation loses the depth-dimension. So we need
# to restore it the manual way.
image.set_shape([None, None, target_image_shape[-1]])
elif crop_method == "random":
tf.set_random_seed(seed)
shape = tf.shape(image)
h, w = shape[0], shape[1]
size = tf.minimum(h, w)
begin = [h - size, w - size] * tf.random.uniform([2], 0, 1)
begin = tf.cast(begin, tf.int32)
begin = tf.concat([begin, [0]], axis=0) # Add channel dimension.
image = tf.slice(image, begin, [size, size, 3])
elif crop_method == "middle":
shape = tf.shape(image)
h, w = shape[0], shape[1]
size = tf.minimum(h, w)
begin = tf.cast([h - size, w - size], tf.float32) / 2.0
begin = tf.cast(begin, tf.int32)
begin = tf.concat([begin, [0]], axis=0) # Add channel dimension.
image = tf.slice(image, begin, [size, size, 3])
elif crop_method != "none":
raise ValueError("Unsupported crop method: {}".format(crop_method))
image = tf.image.resize_images(
image, [target_image_shape[0], target_image_shape[1]])
image.set_shape(target_image_shape)
return image
@gin.configurable("train_imagenet_transform", whitelist=["crop_method"])
def _train_imagenet_transform(image, target_image_shape, seed,
crop_method="distorted"):
return _transform_imagnet_image(
image,
target_image_shape=target_image_shape,
crop_method=crop_method,
seed=seed)
@gin.configurable("eval_imagenet_transform", whitelist=["crop_method"])
def _eval_imagenet_transform(image, target_image_shape, seed,
crop_method="middle"):
return _transform_imagnet_image(
image,
target_image_shape=target_image_shape,
crop_method=crop_method,
seed=seed)
class ImagenetDataset(ImageDatasetV2):
"""ImageNet2012 as defined by TF Datasets."""
def __init__(self, resolution, seed, filter_unlabeled=False):
if resolution not in [64, 128, 256, 512]:
raise ValueError("Unsupported resolution: {}".format(resolution))
super(ImagenetDataset, self).__init__(
name="imagenet_{}".format(resolution),
tfds_name="imagenet2012",
resolution=resolution,
colors=3,
num_classes=1000,
eval_test_samples=50000,
seed=seed)
self._eval_split = tfds.Split.VALIDATION
self._filter_unlabeled = filter_unlabeled
def _train_filter_fn(self, image, label):
del image
if not self._filter_unlabeled:
return True
logging.warning("Filtering unlabeled examples.")
return tf.math.greater_equal(label, 0)
def _train_transform_fn(self, image, label, seed):
image = _train_imagenet_transform(
image=image, target_image_shape=self.image_shape, seed=seed)
return image, label
def _eval_transform_fn(self, image, label, seed):
image = _eval_imagenet_transform(
image=image, target_image_shape=self.image_shape, seed=seed)
return image, label
class SizeFilteredImagenetDataset(ImagenetDataset):
"""ImageNet from TFDS filtered by image size."""
def __init__(self, resolution, threshold, seed):
super(SizeFilteredImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "imagenet_{}_hq{}".format(resolution, threshold)
self._threshold = threshold
def _train_filter_fn(self, image, label):
"""The minimum image dimension has to be larger than the threshold."""
del label
size = tf.math.reduce_min(tf.shape(image)[:2])
return tf.greater_equal(size, self._threshold)
class SingleClassImagenetDataset(ImagenetDataset):
"""ImageNet from TFDS with all instances having a constant label 0.
It can be used to simmulate the setting where no labels are provided.
"""
def __init__(self, resolution, seed):
super(SingleClassImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "single_class_" + self._name
self._num_classes = 1
def _parse_fn(self, features):
image, _ = super(SingleClassImagenetDataset, self)._parse_fn(features)
label = tf.constant(0, dtype=tf.int32)
return image, label
class RandomClassImagenetDataset(ImagenetDataset):
"""ImageNet2012 dataset with random labels."""
def __init__(self, resolution, seed):
super(RandomClassImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "random_class_" + self._name
self._num_classes = 1000
def _parse_fn(self, features):
image, _ = super(RandomClassImagenetDataset, self)._parse_fn(features)
label = tf.random.uniform(minval=0, maxval=1000, dtype=tf.int32)
return image, label
class SoftLabeledImagenetDataset(ImagenetDataset):
"""ImageNet2012 dataset with soft labels."""
def __init__(self, resolution, seed):
super(SoftLabeledImagenetDataset, self).__init__(
resolution=resolution,
seed=seed)
self._name = "soft_labeled_" + self._name
def _replace_label(self, feature_dict, new_unparsed_label):
"""Replaces the label from the feature_dict with the new (soft) label.
The function assumes that the new_unparsed_label contains a list of logits
which will be converted to a soft label using the softmax.
Args:
feature_dict: A serialized TFRecord containing the old label.
new_unparsed_label: A serialized TFRecord containing the new label.
Returns:
Updates the label in the label dict to the new soft label.
"""
label_spec = {
"file_name": tf.FixedLenFeature((), tf.string),
"label": tf.FixedLenFeature([self._num_classes], tf.float32)
}
parsed_label = tf.parse_single_example(new_unparsed_label, label_spec)
with tf.control_dependencies([
tf.assert_equal(parsed_label["file_name"], feature_dict["file_name"])]):
feature_dict["label"] = tf.nn.softmax(logits=parsed_label["label"])
return feature_dict
DATASETS = {
"celeb_a": CelebaDataset,
"cifar10": Cifar10Dataset,
"fashion-mnist": FashionMnistDataset,
"lsun-bedroom": LsunBedroomDataset,
"mnist": MnistDataset,
"imagenet_64": functools.partial(ImagenetDataset, resolution=64),
"imagenet_128": functools.partial(ImagenetDataset, resolution=128),
"imagenet_256": functools.partial(ImagenetDataset, resolution=256),
"imagenet_512": functools.partial(ImagenetDataset, resolution=512),
"imagenet_512_hq400": (functools.partial(
SizeFilteredImagenetDataset, resolution=512, threshold=400)),
"soft_labeled_imagenet_128": functools.partial(
SoftLabeledImagenetDataset, resolution=128),
"single_class_imagenet_128": functools.partial(
SingleClassImagenetDataset, resolution=128),
"random_class_imagenet_128": functools.partial(
RandomClassImagenetDataset, resolution=128),
"labeled_only_imagenet_128": functools.partial(
ImagenetDataset, resolution=128, filter_unlabeled=True),
}
@gin.configurable("dataset")
def get_dataset(name, seed=547):
"""Instantiates a data set and sets the random seed."""
if name not in DATASETS:
raise ValueError("Dataset %s is not available." % name)
return DATASETS[name](seed=seed)
|
compare_gan-master
|
compare_gan/datasets.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# coding=utf-8
|
compare_gan-master
|
compare_gan/__init__.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for datasets."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
from compare_gan import datasets
import tensorflow as tf
FLAGS = flags.FLAGS
_TPU_SUPPORTED_TYPES = {
tf.float32, tf.int32, tf.complex64, tf.int64, tf.bool, tf.bfloat16
}
def _preprocess_fn_id(images, labels):
return {"images": images}, labels
def _preprocess_fn_add_noise(images, labels, seed=None):
del labels
tf.set_random_seed(seed)
noise = tf.random.uniform([128], maxval=1.0)
return {"images": images}, noise
class DatasetsTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(DatasetsTest, self).setUp()
FLAGS.data_shuffle_buffer_size = 100
def get_element_and_verify_shape(self, dataset_name, expected_shape):
dataset = datasets.get_dataset(dataset_name)
dataset = dataset.eval_input_fn()
image, label = dataset.make_one_shot_iterator().get_next()
# Check if shape is known at compile time, required for TPUs.
self.assertAllEqual(image.shape.as_list(), expected_shape)
self.assertEqual(image.dtype, tf.float32)
self.assertIn(label.dtype, _TPU_SUPPORTED_TYPES)
with self.cached_session() as session:
image = session.run(image)
self.assertEqual(image.shape, expected_shape)
self.assertGreaterEqual(image.min(), 0.0)
self.assertLessEqual(image.max(), 1.0)
def test_mnist(self):
self.get_element_and_verify_shape("mnist", (28, 28, 1))
def test_fashion_mnist(self):
self.get_element_and_verify_shape("fashion-mnist", (28, 28, 1))
def test_celeba(self):
self.get_element_and_verify_shape("celeb_a", (64, 64, 3))
def test_lsun(self):
self.get_element_and_verify_shape("lsun-bedroom", (128, 128, 3))
def _run_train_input_fn(self, dataset_name, preprocess_fn):
dataset = datasets.get_dataset(dataset_name)
with tf.Graph().as_default():
dataset = dataset.input_fn(params={"batch_size": 1},
preprocess_fn=preprocess_fn)
iterator = dataset.make_initializable_iterator()
with self.session() as sess:
sess.run(iterator.initializer)
next_batch = iterator.get_next()
return [sess.run(next_batch) for _ in range(5)]
@parameterized.named_parameters(
("FakeCifar", _preprocess_fn_id),
("FakeCifarWithRandomNoise", _preprocess_fn_add_noise),
)
@flagsaver.flagsaver
def test_train_input_fn_is_determinsitic(self, preprocess_fn):
FLAGS.data_fake_dataset = True
batches1 = self._run_train_input_fn("cifar10", preprocess_fn)
batches2 = self._run_train_input_fn("cifar10", preprocess_fn)
for i in range(len(batches1)):
# Check that both runs got the same images/noise
self.assertAllClose(batches1[i][0], batches2[i][0])
self.assertAllClose(batches1[i][1], batches2[i][1])
@flagsaver.flagsaver
def test_train_input_fn_noise_changes(self):
FLAGS.data_fake_dataset = True
batches = self._run_train_input_fn("cifar10", _preprocess_fn_add_noise)
for i in range(1, len(batches)):
self.assertNotAllClose(batches[0][1], batches[i][1])
self.assertNotAllClose(batches[i - 1][1], batches[i][1])
if __name__ == "__main__":
tf.test.main()
|
compare_gan-master
|
compare_gan/datasets_test.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Binary to train and evaluate a single GAN configuration."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import csv
import os
import re
import time
from absl import flags
from absl import logging
from compare_gan import datasets
from compare_gan import eval_gan_lib
from compare_gan import hooks
from compare_gan.gans import utils
from compare_gan.metrics import fid_score as fid_score_lib
from compare_gan.metrics import inception_score as inception_score_lib
import gin.tf
import numpy as np
import six
import tensorflow as tf
FLAGS = flags.FLAGS
class _DummyParserDelegate(gin.config_parser.ParserDelegate):
"""Dummy class required to parse Gin configs.
Our use case (just get the config as dictionary) does not require real
implementations the two methods.
"""
def configurable_reference(self, scoped_name, evaluate):
return scoped_name
def macro(self, scoped_name):
return scoped_name
def _parse_gin_config(config_path):
"""Parses a Gin config into a dictionary. All values are strings."""
with tf.gfile.Open(config_path) as f:
config_str = f.read()
parser = gin.config_parser.ConfigParser(config_str, _DummyParserDelegate())
config = {}
for statement in parser:
if not isinstance(statement, gin.config_parser.ImportStatement):
name = statement.scope + "/" if statement.scope else ""
name = statement.selector + "." + statement.arg_name
config[name] = statement.value
return config
@gin.configurable("options")
def get_options_dict(batch_size=gin.REQUIRED,
gan_class=gin.REQUIRED,
architecture=gin.REQUIRED,
training_steps=gin.REQUIRED,
discriminator_normalization=None,
lamba=1,
disc_iters=1,
z_dim=128):
"""Parse legacy options from Gin configurations into a Python dict.
Args:
batch_size: The (global) batch size to use. On TPUs each core will get a
fraction of this.
gan_class: References to the GAN classe to use. This must implement the
AbstractGAN interface.
architecture: Name of the architecuter to use for G and D. This should be
value from consts.ARCHITECTURES and be supported by the GAN class.
training_steps: The number of training steps. These are discriminator steps.
discriminator_normalization: Deprecated. Ignored, but kept to read old
configs.
lamba: Weight for gradient penalty.
disc_iters: How often the discriminator is trained before training G for one
step. G will be trained for `training_steps // disc_iters` steps.
z_dim: Length of the latent variable z fed to the generator.
Returns:
A Python dictionary with the options.
"""
del discriminator_normalization
return {
"use_tpu": FLAGS.use_tpu, # For compatibility with AbstractGAN.
"batch_size": batch_size,
"gan_class": gan_class,
"architecture": architecture,
"training_steps": training_steps,
"lambda": lamba, # Different spelling intended.
"disc_iters": disc_iters,
"z_dim": z_dim,
}
class TaskManager(object):
"""Interface for managing a task."""
def __init__(self, model_dir):
self._model_dir = model_dir
@property
def model_dir(self):
return self._model_dir
def mark_training_done(self):
with tf.gfile.Open(os.path.join(self.model_dir, "TRAIN_DONE"), "w") as f:
f.write("")
def is_training_done(self):
return tf.gfile.Exists(os.path.join(self.model_dir, "TRAIN_DONE"))
def add_eval_result(self, checkpoint_path, result_dict, default_value):
pass
def get_checkpoints_with_results(self):
return set()
def unevaluated_checkpoints(self, timeout=0, eval_every_steps=None):
"""Generator for checkpoints without evaluation results.
Args:
timeout: Optional timeout for waiting for new checkpoints. Set this to
do continious evaluation.
eval_every_steps: Only evaluate checkpoints from steps divisible by this
integer.
Yields:
Path to checkpoints that have not yet been evaluated.
"""
logging.info("Looking for checkpoints in %s", self._model_dir)
evaluated_checkpoints = self.get_checkpoints_with_results()
last_eval = time.time()
while True:
unevaluated_checkpoints = []
checkpoint_state = tf.train.get_checkpoint_state(self.model_dir)
if checkpoint_state:
checkpoints = set(checkpoint_state.all_model_checkpoint_paths)
# Remove already evaluated checkpoints and sort ascending by step
# number.
unevaluated_checkpoints = checkpoints - evaluated_checkpoints
step_and_ckpt = sorted(
[(int(x.split("-")[-1]), x) for x in unevaluated_checkpoints])
if eval_every_steps:
step_and_ckpt = [(step, ckpt) for step, ckpt in step_and_ckpt
if step > 0 and step % eval_every_steps == 0]
unevaluated_checkpoints = [ckpt for _, ckpt in step_and_ckpt]
logging.info(
"Found checkpoints: %s\nEvaluated checkpoints: %s\n"
"Unevaluated checkpoints: %s", checkpoints, evaluated_checkpoints,
unevaluated_checkpoints)
for checkpoint_path in unevaluated_checkpoints:
yield checkpoint_path
if unevaluated_checkpoints:
evaluated_checkpoints |= set(unevaluated_checkpoints)
last_eval = time.time()
continue
# No new checkpoints, timeout or stop if training finished. Otherwise
# wait 1 minute.
if time.time() - last_eval > timeout or self.is_training_done():
break
time.sleep(60)
def report_progress(self, message):
pass
class TaskManagerWithCsvResults(TaskManager):
"""Task Manager that writes results to a CSV file."""
def __init__(self, model_dir, score_file=None):
super(TaskManagerWithCsvResults, self).__init__(model_dir)
if score_file is None:
score_file = os.path.join(model_dir, "scores.csv")
self._score_file = score_file
def _get_config_for_step(self, step):
"""Returns the latest operative config for the global step as dictionary."""
saved_configs = tf.gfile.Glob(
os.path.join(self.model_dir, "operative_config-*.gin"))
get_step = lambda fn: int(re.findall(r"operative_config-(\d+).gin", fn)[0])
config_steps = [get_step(fn) for fn in saved_configs]
assert config_steps
last_config_step = sorted([s for s in config_steps if s <= step])[-1]
config_path = os.path.join(
self.model_dir, "operative_config-{}.gin".format(last_config_step))
return _parse_gin_config(config_path)
def add_eval_result(self, checkpoint_path, result_dict, default_value):
step = os.path.basename(checkpoint_path).split("-")[-1]
config = self._get_config_for_step(step)
csv_header = (
["checkpoint_path", "step"] + sorted(result_dict) + sorted(config))
write_header = not tf.gfile.Exists(self._score_file)
if write_header:
with tf.gfile.Open(self._score_file, "w") as f:
writer = csv.DictWriter(f, fieldnames=csv_header, extrasaction="ignore")
writer.writeheader()
row = dict(checkpoint_path=checkpoint_path, step=step, **config)
for k, v in six.iteritems(result_dict):
if isinstance(v, float):
v = "{:.3f}".format(v)
row[k] = v
with tf.gfile.Open(self._score_file, "a") as f:
writer = csv.DictWriter(f, fieldnames=csv_header, extrasaction="ignore")
writer.writerow(row)
def get_checkpoints_with_results(self):
if not tf.gfile.Exists(self._score_file):
return set()
with tf.gfile.Open(self._score_file) as f:
reader = csv.DictReader(f)
return {r["checkpoint_path"] for r in reader}
return set()
def _run_eval(module_spec, checkpoints, task_manager, run_config,
use_tpu, num_averaging_runs):
"""Evaluates the given checkpoints and add results to a result writer.
Args:
module_spec: `ModuleSpec` of the model.
checkpoints: Generator for for checkpoint paths.
task_manager: `TaskManager`. init_eval() will be called before adding
results.
run_config: `RunConfig` to use. Values for master and tpu_config are
currently ignored.
use_tpu: Whether to use TPU for evaluation.
num_averaging_runs: Determines how many times each metric is computed.
"""
# By default, we compute FID and Inception scores. Other tasks defined in
# the metrics folder (such as the one in metrics/kid_score.py) can be added
# to this list if desired.
eval_tasks = [
inception_score_lib.InceptionScoreTask(),
fid_score_lib.FIDScoreTask()
]
logging.info("eval_tasks: %s", eval_tasks)
for checkpoint_path in checkpoints:
step = os.path.basename(checkpoint_path).split("-")[-1]
if step == 0:
continue
export_path = os.path.join(run_config.model_dir, "tfhub", str(step))
if not tf.gfile.Exists(export_path):
module_spec.export(export_path, checkpoint_path=checkpoint_path)
default_value = -1.0
try:
result_dict = eval_gan_lib.evaluate_tfhub_module(
export_path, eval_tasks, use_tpu=use_tpu,
num_averaging_runs=num_averaging_runs)
except ValueError as nan_found_error:
result_dict = {}
logging.exception(nan_found_error)
default_value = eval_gan_lib.NAN_DETECTED
logging.info("Evaluation result for checkpoint %s: %s (default value: %s)",
checkpoint_path, result_dict, default_value)
task_manager.add_eval_result(checkpoint_path, result_dict, default_value)
def run_with_schedule(schedule, run_config, task_manager, options, use_tpu,
num_eval_averaging_runs=1, eval_every_steps=-1):
"""Run the schedule with the given options.
Available schedules:
- train: Train up to options["training_steps"], continuing from existing
checkpoints if available.
- eval_after_train: First train up to options["training_steps"] then
evaluate all checkpoints.
- continuous_eval: Waiting for new checkpoints and evaluate them as they
become available. This is meant to run in parallel with a job running
the training schedule but can also run after it.
Args:
schedule: Schedule to run. One of: train, continuous_eval, train_and_eval.
run_config: `tf.contrib.tpu.RunConfig` to use.
task_manager: `TaskManager` for this run.
options: Python dictionary will run parameters.
use_tpu: Boolean whether to use TPU.
num_eval_averaging_runs: Determines how many times each metric is computed.
eval_every_steps: Integer determining which checkpoints to evaluate.
"""
logging.info("Running schedule '%s' with options: %s", schedule, options)
if run_config.tf_random_seed:
logging.info("Setting NumPy random seed to %s.", run_config.tf_random_seed)
np.random.seed(run_config.tf_random_seed)
result_dir = os.path.join(run_config.model_dir, "result")
utils.check_folder(result_dir)
dataset = datasets.get_dataset()
gan = options["gan_class"](dataset=dataset,
parameters=options,
model_dir=run_config.model_dir)
if schedule not in {"train", "eval_after_train", "continuous_eval"}:
raise ValueError("Schedule {} not supported.".format(schedule))
if schedule in {"train", "eval_after_train"}:
train_hooks = [
gin.tf.GinConfigSaverHook(run_config.model_dir),
hooks.ReportProgressHook(task_manager,
max_steps=options["training_steps"]),
]
if run_config.save_checkpoints_steps:
# This replaces the default checkpoint saver hook in the estimator.
logging.info("Using AsyncCheckpointSaverHook.")
train_hooks.append(
hooks.AsyncCheckpointSaverHook(
checkpoint_dir=run_config.model_dir,
save_steps=run_config.save_checkpoints_steps))
# (b/122782388): Remove hotfix.
run_config = run_config.replace(save_checkpoints_steps=1000000)
estimator = gan.as_estimator(
run_config, batch_size=options["batch_size"], use_tpu=use_tpu)
estimator.train(
input_fn=gan.input_fn,
max_steps=options["training_steps"],
hooks=train_hooks)
task_manager.mark_training_done()
if schedule == "continuous_eval":
# Continuous eval with up to 24 hours between checkpoints.
checkpoints = task_manager.unevaluated_checkpoints(
timeout=24 * 3600, eval_every_steps=eval_every_steps)
if schedule == "eval_after_train":
checkpoints = task_manager.unevaluated_checkpoints(
eval_every_steps=eval_every_steps)
if schedule in {"continuous_eval", "eval_after_train"}:
_run_eval(
gan.as_module_spec(),
checkpoints=checkpoints,
task_manager=task_manager,
run_config=run_config,
use_tpu=use_tpu,
num_averaging_runs=num_eval_averaging_runs)
|
compare_gan-master
|
compare_gan/runner_lib.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for eval_gan_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import os.path
from absl import flags
from absl.testing import flagsaver
from absl.testing import parameterized
from compare_gan import datasets
from compare_gan import eval_gan_lib
from compare_gan import eval_utils
from compare_gan.gans import consts as c
from compare_gan.gans.modular_gan import ModularGAN
from compare_gan.metrics import fid_score
from compare_gan.metrics import fractal_dimension
from compare_gan.metrics import inception_score
from compare_gan.metrics import ms_ssim_score
import gin
import mock
import tensorflow as tf
FLAGS = flags.FLAGS
def create_fake_inception_graph():
"""Creates a `GraphDef` with that mocks the Inception V1 graph.
It takes the input, multiplies it through a matrix full of 0.00001 values,
and provides the results in the endpoints 'pool_3' and 'logits'. This
matches the tensor names in the real Inception V1 model.
the real inception model.
Returns:
`tf.GraphDef` for the mocked Inception V1 graph.
"""
fake_inception = tf.Graph()
with fake_inception.as_default():
inputs = tf.placeholder(
tf.float32, shape=[None, 299, 299, 3], name="Mul")
w = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001
outputs = tf.matmul(tf.layers.flatten(inputs), w)
tf.identity(outputs, name="pool_3")
tf.identity(outputs, name="logits")
return fake_inception.as_graph_def()
class EvalGanLibTest(parameterized.TestCase, tf.test.TestCase):
def setUp(self):
super(EvalGanLibTest, self).setUp()
gin.clear_config()
FLAGS.data_fake_dataset = True
self.mock_get_graph = mock.patch.object(
eval_utils, "get_inception_graph_def").start()
self.mock_get_graph.return_value = create_fake_inception_graph()
@parameterized.parameters(c.ARCHITECTURES)
@flagsaver.flagsaver
def test_end2end_checkpoint(self, architecture):
"""Takes real GAN (trained for 1 step) and evaluate it."""
if architecture in {c.RESNET_STL_ARCH, c.RESNET30_ARCH}:
# RESNET_STL_ARCH and RESNET107_ARCH do not support CIFAR image shape.
return
gin.bind_parameter("dataset.name", "cifar10")
dataset = datasets.get_dataset("cifar10")
options = {
"architecture": architecture,
"z_dim": 120,
"disc_iters": 1,
"lambda": 1,
}
model_dir = os.path.join(tf.test.get_temp_dir(), self.id())
tf.logging.info("model_dir: %s" % model_dir)
run_config = tf.contrib.tpu.RunConfig(model_dir=model_dir)
gan = ModularGAN(dataset=dataset,
parameters=options,
conditional="biggan" in architecture,
model_dir=model_dir)
estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=False)
estimator.train(input_fn=gan.input_fn, steps=1)
export_path = os.path.join(model_dir, "tfhub")
checkpoint_path = os.path.join(model_dir, "model.ckpt-1")
module_spec = gan.as_module_spec()
module_spec.export(export_path, checkpoint_path=checkpoint_path)
eval_tasks = [
fid_score.FIDScoreTask(),
fractal_dimension.FractalDimensionTask(),
inception_score.InceptionScoreTask(),
ms_ssim_score.MultiscaleSSIMTask()
]
result_dict = eval_gan_lib.evaluate_tfhub_module(
export_path, eval_tasks, use_tpu=False, num_averaging_runs=1)
tf.logging.info("result_dict: %s", result_dict)
for score in ["fid_score", "fractal_dimension", "inception_score",
"ms_ssim"]:
for stats in ["mean", "std", "list"]:
required_key = "%s_%s" % (score, stats)
self.assertIn(required_key, result_dict, "Missing: %s." % required_key)
if __name__ == "__main__":
tf.test.main()
|
compare_gan-master
|
compare_gan/eval_gan_lib_test.py
|
# coding=utf-8
# Copyright 2018 Google LLC & Hwalsuk Lee.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utilities library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
from absl import logging
import six
# In Python 2 the inspect module does not have FullArgSpec. Define a named tuple
# instead.
if hasattr(inspect, "FullArgSpec"):
_FullArgSpec = inspect.FullArgSpec # pylint: disable=invalid-name
else:
_FullArgSpec = collections.namedtuple("FullArgSpec", [
"args", "varargs", "varkw", "defaults", "kwonlyargs", "kwonlydefaults",
"annotations"
])
def _getfullargspec(fn):
"""Python 2/3 compatible version of the inspect.getfullargspec method.
Args:
fn: The function object.
Returns:
A FullArgSpec. For Python 2 this is emulated by a named tuple.
"""
arg_spec_fn = inspect.getfullargspec if six.PY3 else inspect.getargspec
try:
arg_spec = arg_spec_fn(fn)
except TypeError:
# `fn` might be a callable object.
arg_spec = arg_spec_fn(fn.__call__)
if six.PY3:
assert isinstance(arg_spec, _FullArgSpec)
return arg_spec
return _FullArgSpec(
args=arg_spec.args,
varargs=arg_spec.varargs,
varkw=arg_spec.keywords,
defaults=arg_spec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
def _has_arg(fn, arg_name):
"""Returns True if `arg_name` might be a valid parameter for `fn`.
Specifically, this means that `fn` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn: The function to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
"""
while isinstance(fn, functools.partial):
fn = fn.func
while hasattr(fn, "__wrapped__"):
fn = fn.__wrapped__
arg_spec = _getfullargspec(fn)
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
def call_with_accepted_args(fn, **kwargs):
"""Calls `fn` only with the keyword arguments that `fn` accepts."""
kwargs = {k: v for k, v in six.iteritems(kwargs) if _has_arg(fn, k)}
logging.debug("Calling %s with args %s.", fn, kwargs)
return fn(**kwargs)
def get_parameter_overview(variables, limit=40):
"""Returns a string with variables names, their shapes, count, and types.
To get all trainable parameters pass in `tf.trainable_variables()`.
Args:
variables: List of `tf.Variable`(s).
limit: If not `None`, the maximum number of variables to include.
Returns:
A string with a table like in the example.
+----------------+---------------+------------+---------+
| Name | Shape | Size | Type |
+----------------+---------------+------------+---------+
| FC_1/weights:0 | (63612, 1024) | 65,138,688 | float32 |
| FC_1/biases:0 | (1024,) | 1,024 | float32 |
| FC_2/weights:0 | (1024, 32) | 32,768 | float32 |
| FC_2/biases:0 | (32,) | 32 | float32 |
+----------------+---------------+------------+---------+
Total: 65,172,512
"""
max_name_len = max([len(v.name) for v in variables] + [len("Name")])
max_shape_len = max([len(str(v.get_shape())) for v in variables] + [len(
"Shape")])
max_size_len = max([len("{:,}".format(v.get_shape().num_elements()))
for v in variables] + [len("Size")])
max_type_len = max([len(v.dtype.base_dtype.name) for v in variables] + [len(
"Type")])
var_line_format = "| {: <{}s} | {: >{}s} | {: >{}s} | {: <{}s} |"
sep_line_format = var_line_format.replace(" ", "-").replace("|", "+")
header = var_line_format.replace(">", "<").format("Name", max_name_len,
"Shape", max_shape_len,
"Size", max_size_len,
"Type", max_type_len)
separator = sep_line_format.format("", max_name_len, "", max_shape_len, "",
max_size_len, "", max_type_len)
lines = [separator, header, separator]
total_weights = sum(v.get_shape().num_elements() for v in variables)
# Create lines for up to 80 variables.
for v in variables:
if limit is not None and len(lines) >= limit:
lines.append("[...]")
break
lines.append(var_line_format.format(
v.name, max_name_len,
str(v.get_shape()), max_shape_len,
"{:,}".format(v.get_shape().num_elements()), max_size_len,
v.dtype.base_dtype.name, max_type_len))
lines.append(separator)
lines.append("Total: {:,}".format(total_weights))
return "\n".join(lines)
def log_parameter_overview(variables, msg):
"""Writes a table with variables name and shapes to INFO log.
See get_parameter_overview for details.
Args:
variables: List of `tf.Variable`(s).
msg: Message to be logged before the table.
"""
table = get_parameter_overview(variables, limit=None)
# The table can to large to fit into one log entry.
lines = [msg] + table.split("\n")
for i in range(0, len(lines), 80):
logging.info("\n%s", "\n".join(lines[i:i + 80]))
|
compare_gan-master
|
compare_gan/utils.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.