python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
import torch.nn as nn
from operator import itemgetter
from torch.autograd.function import Function
from torch.utils.checkpoint import get_device_states, set_device_states
# for routing arguments into the functions of the reversible layer
def route_args(router, args, depth):
routed_args = [(dict(), dict()) for _ in range(depth)]
matched_keys = [key for key in args.keys() if key in router]
for key in matched_keys:
val = args[key]
for depth, ((f_args, g_args), routes) in enumerate(zip(routed_args, router[key])):
new_f_args, new_g_args = map(lambda route: ({key: val} if route else {}), routes)
routed_args[depth] = ({**f_args, **new_f_args}, {**g_args, **new_g_args})
return routed_args
def layer_drop(layers, prob):
to_drop = torch.empty(len(layers)).uniform_(0, 1) < prob
blocks = [block for block, drop in zip(layers, to_drop) if not drop]
blocks = layers[:1] if len(blocks) == 0 else blocks
return blocks
# following example for saving and setting rng here https://pytorch.org/docs/stable/_modules/torch/utils/checkpoint.html
class Deterministic(nn.Module):
def __init__(self, net):
super().__init__()
self.net = net
self.cpu_state = None
self.cuda_in_fwd = None
self.gpu_devices = None
self.gpu_states = None
def record_rng(self, *args):
self.cpu_state = torch.get_rng_state()
if torch.cuda._initialized:
self.cuda_in_fwd = True
self.gpu_devices, self.gpu_states = get_device_states(*args)
def forward(self, *args, record_rng = False, set_rng = False, **kwargs):
if record_rng:
self.record_rng(*args)
if not set_rng:
return self.net(*args, **kwargs)
rng_devices = []
if self.cuda_in_fwd:
rng_devices = self.gpu_devices
with torch.random.fork_rng(devices=rng_devices, enabled=True):
torch.set_rng_state(self.cpu_state)
if self.cuda_in_fwd:
set_device_states(self.gpu_devices, self.gpu_states)
return self.net(*args, **kwargs)
# heavily inspired by https://github.com/RobinBruegger/RevTorch/blob/master/revtorch/revtorch.py
# once multi-GPU is confirmed working, refactor and send PR back to source
class ReversibleBlock(nn.Module):
def __init__(self, f, g):
super().__init__()
self.f = Deterministic(f)
self.g = Deterministic(g)
def forward(self, x, f_args = {}, g_args = {}):
x1, x2 = torch.chunk(x, 2, dim=2)
y1, y2 = None, None
with torch.no_grad():
y1 = x1 + self.f(x2, record_rng=self.training, **f_args)
y2 = x2 + self.g(y1, record_rng=self.training, **g_args)
return torch.cat([y1, y2], dim=2)
def backward_pass(self, y, dy, f_args = {}, g_args = {}):
y1, y2 = torch.chunk(y, 2, dim=2)
del y
dy1, dy2 = torch.chunk(dy, 2, dim=2)
del dy
with torch.enable_grad():
y1.requires_grad = True
gy1 = self.g(y1, set_rng=True, **g_args)
torch.autograd.backward(gy1, dy2)
with torch.no_grad():
x2 = y2 - gy1
del y2, gy1
dx1 = dy1 + y1.grad
del dy1
y1.grad = None
with torch.enable_grad():
x2.requires_grad = True
fx2 = self.f(x2, set_rng=True, **f_args)
torch.autograd.backward(fx2, dx1, retain_graph=True)
with torch.no_grad():
x1 = y1 - fx2
del y1, fx2
dx2 = dy2 + x2.grad
del dy2
x2.grad = None
x = torch.cat([x1, x2.detach()], dim=2)
dx = torch.cat([dx1, dx2], dim=2)
return x, dx
class _ReversibleFunction(Function):
@staticmethod
def forward(ctx, x, blocks, args):
ctx.args = args
for block, kwarg in zip(blocks, args):
x = block(x, **kwarg)
ctx.y = x.detach()
ctx.blocks = blocks
return x
@staticmethod
def backward(ctx, dy):
y = ctx.y
args = ctx.args
for block, kwargs in zip(ctx.blocks[::-1], args[::-1]):
y, dy = block.backward_pass(y, dy, **kwargs)
return dy, None, None
class SequentialSequence(nn.Module):
def __init__(self, layers, args_route = {}, layer_dropout = 0.):
super().__init__()
assert all(len(route) == len(layers) for route in args_route.values()), 'each argument route map must have the same depth as the number of sequential layers'
self.layers = layers
self.args_route = args_route
self.layer_dropout = layer_dropout
def forward(self, x, **kwargs):
args = route_args(self.args_route, kwargs, len(self.layers))
layers_and_args = list(zip(self.layers, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
for (f, g), (f_args, g_args) in layers_and_args:
x = x + f(x, **f_args)
x = x + g(x, **g_args)
return x
class ReversibleSequence(nn.Module):
def __init__(self, blocks, args_route = {}, layer_dropout = 0.):
super().__init__()
self.args_route = args_route
self.layer_dropout = layer_dropout
self.blocks = nn.ModuleList([ReversibleBlock(f=f, g=g) for f, g in blocks])
def forward(self, x, **kwargs):
x = torch.cat([x, x], dim=-1)
blocks = self.blocks
args = route_args(self.args_route, kwargs, len(blocks))
args = list(map(lambda x: {'f_args': x[0], 'g_args': x[1]}, args))
layers_and_args = list(zip(blocks, args))
if self.training and self.layer_dropout > 0:
layers_and_args = layer_drop(layers_and_args, self.layer_dropout)
blocks, args = map(lambda ind: list(map(itemgetter(ind), layers_and_args)), (0, 1))
out = _ReversibleFunction.apply(x, blocks, args)
return torch.stack(out.chunk(2, dim=-1)).sum(dim=0)
|
sinkhorn-transformer-master
|
sinkhorn_transformer/reversible.py
|
from sinkhorn_transformer.sinkhorn_transformer import SinkhornTransformer, SinkhornTransformerLM, SinkhornSelfAttention
from sinkhorn_transformer.autoregressive_wrapper import AutoregressiveWrapper
from sinkhorn_transformer.autopadder import Autopadder
|
sinkhorn-transformer-master
|
sinkhorn_transformer/__init__.py
|
import math
import torch
from torch import nn
from operator import mul
from math import gcd
import torch.nn.functional as F
from inspect import isfunction
from functools import partial, wraps, reduce
from local_attention import LocalAttention
from axial_positional_embedding import AxialPositionalEmbedding
from product_key_memory import PKM
from sinkhorn_transformer.reversible import ReversibleSequence, SequentialSequence
# helper functions
def identity(x, *args, **kwargs): return x
def default(x, d):
if x is None:
return d if not isfunction(d) else d()
return x
def cast_tuple(x):
return x if isinstance(x, tuple) else (x,)
def divisible_by(num, divisor):
return num % divisor == 0
def lcm(*numbers):
return int(reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def all_none(*arr):
return all(el is None for el in arr)
def cache_fn(f):
cache = None
@wraps(f)
def cached_fn(*args, **kwargs):
nonlocal cache
if cache is not None:
return cache
cache = f(*args, **kwargs)
return cache
return cached_fn
def rotate_left(t, n, dim=0):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(n, None))
r = (*pre_slices, slice(0, n))
return torch.cat((t[l], t[r]), dim=dim)
def rotate_right(t, n, dim=0):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(-n, None))
r = (*pre_slices, slice(None, -n))
return torch.cat((t[l], t[r]), dim=dim)
def merge_dims(ind_from, ind_to, tensor):
shape = list(tensor.shape)
arr_slice = slice(ind_from, ind_to + 1)
shape[arr_slice] = [reduce(mul, shape[arr_slice])]
return tensor.reshape(*shape)
def merge_heads(h, v):
b, t, d = v.shape
return v.view(b, t, h, -1).transpose(1, 2).reshape(b, h, t, -1)
def split_heads(h, v):
*_, t, d = v.shape
return v.view(-1, h, t, d).transpose(1, 2).reshape(-1, t, d * h)
def split_at_index(dim, index, t):
pre_slices = (slice(None),) * dim
l = (*pre_slices, slice(None, index))
r = (*pre_slices, slice(index, None))
return t[l], t[r]
def bucket(buckets, t, dim=1):
shape = list(t.shape)
shape[dim:dim+1] = [buckets, -1]
return t.reshape(*shape)
def unbucket(t, dim=1):
shape = list(t.shape)
shape[dim:dim+2] = [-1]
return t.reshape(*shape)
def sample_gumbel(shape, device, dtype, eps=1e-6):
u = torch.empty(shape, device=device, dtype=dtype).uniform_(0, 1)
return -log(-log(u, eps), eps)
def sinkhorn_sorting_operator(r, n_iters=8):
n = r.shape[1]
for _ in range(n_iters):
r = r - torch.logsumexp(r, dim=2, keepdim=True)
r = r - torch.logsumexp(r, dim=1, keepdim=True)
return torch.exp(r)
def gumbel_sinkhorn(r, n_iters=8, temperature=0.7):
r = log(r)
gumbel = sample_gumbel(r.shape, r.device, r.dtype)
r = (r + gumbel) / temperature
return sinkhorn_sorting_operator(r, n_iters)
def reorder_buckets(t, r):
return torch.einsum('buv,bvtd->butd', r, t)
def log(t, eps = 1e-6):
return torch.log(t + eps)
def max_neg_value(tensor):
return -torch.finfo(tensor.dtype).max
def cumavg(t, dim):
r = torch.arange(1, t.shape[dim] + 1, device=t.device, dtype=t.dtype)
expand_slice = [None] * len(t.shape)
expand_slice[dim] = slice(None, None)
return t.cumsum(dim=dim) / r[tuple(expand_slice)]
def batched_index_select(values, indices):
last_dim = values.shape[-1]
return values.gather(1, indices[:, :, None].expand(-1, -1, last_dim))
def expand_dim(t, dim, k):
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
def expand_batch_and_merge_head(b, t):
shape = list(t.squeeze(0).shape)
t = expand_dim(t, 0, b)
shape[0] = shape[0] * b
return t.reshape(*shape)
def differentiable_topk(x, k, temperature=1.):
*_, n, dim = x.shape
topk_tensors = []
for i in range(k):
is_last = i == (k - 1)
values, indices = (x / temperature).softmax(dim=-1).topk(1, dim=-1)
topks = torch.zeros_like(x).scatter_(-1, indices, values)
topk_tensors.append(topks)
if not is_last:
x.scatter_(-1, indices, float('-inf'))
topks = torch.cat(topk_tensors, dim=-1)
return topks.reshape(*_, k * n, dim)
# helper classes
class Chunk(nn.Module):
def __init__(self, chunks, fn, along_dim = -1):
super().__init__()
self.dim = along_dim
self.chunks = chunks
self.fn = fn
def forward(self, x):
chunks = x.chunk(self.chunks, dim = self.dim)
return torch.cat([self.fn(c) for c in chunks], dim = self.dim)
class GELU_(nn.Module):
def forward(self, x):
return 0.5 * x * (1 + torch.tanh(math.sqrt(2 / math.pi) * (x + 0.044715 * torch.pow(x, 3))))
GELU = nn.GELU if hasattr(nn, 'GELU') else GELU_
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0., activation = None, glu = False):
super().__init__()
activation = default(activation, GELU)
self.glu = glu
self.w1 = nn.Linear(dim, dim * mult * (2 if glu else 1))
self.act = activation()
self.dropout = nn.Dropout(dropout)
self.w2 = nn.Linear(dim * mult, dim)
def forward(self, x, **kwargs):
if not self.glu:
x = self.w1(x)
x = self.act(x)
else:
x, v = self.w1(x).chunk(2, dim=-1)
x = self.act(x) * v
x = self.dropout(x)
x = self.w2(x)
return x
class ReZero(nn.Module):
def __init__(self, fn):
super().__init__()
self.g = nn.Parameter(torch.zeros(1))
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) * self.g
class PreNorm(nn.Module):
def __init__(self, norm_class, dim, fn):
super().__init__()
self.norm = norm_class(dim)
self.fn = fn
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class ProjectInOut(nn.Module):
def __init__(self, fn, dim_in, dim_out, project_out = True):
super().__init__()
self.fn = fn
self.project_in = nn.Linear(dim_in, dim_out)
self.project_out = nn.Linear(dim_out, dim_in) if project_out else identity
def forward(self, x, **kwargs):
x = self.project_in(x)
x = self.fn(x, **kwargs)
x = self.project_out(x)
return x
# non-causal sortnet and sinkhorn attention
class SimpleSortNet(nn.Module):
def __init__(self, heads, bucket_size, max_buckets, dim, non_permutative, temperature, sinkhorn_iter):
super().__init__()
self.dim = dim
self.heads = heads
self.max_buckets = max_buckets
self.bucket_size = bucket_size
self.non_permutative = non_permutative
self.temperature = temperature
self.sinkhorn_iter = sinkhorn_iter
self.linear = nn.Parameter(torch.randn(1, heads, dim, max_buckets))
self.act = nn.ReLU()
def forward(self, q, k, topk=1):
bh, t, _ = q.shape
b = bh // self.heads
buckets = t // self.bucket_size
b_q, b_k = bucket(buckets, q), bucket(buckets, k)
x = torch.cat((b_q.sum(dim=2), b_k.sum(dim=2)), dim=-1)
W = expand_batch_and_merge_head(b, self.linear)
R = self.act(x @ W)
return differentiable_topk(R, k=topk, temperature=self.temperature) if self.non_permutative else gumbel_sinkhorn(R, self.sinkhorn_iter, self.temperature)
class AttentionSortNet(nn.Module):
def __init__(self, heads, bucket_size, kv_bucket_size, dim, non_permutative, temperature, sinkhorn_iter, n_sortcut = 0):
super().__init__()
self.heads = heads
self.bucket_size = bucket_size
self.kv_bucket_size = kv_bucket_size
self.dim = dim
self.non_permutative = non_permutative
self.temperature = temperature
self.sinkhorn_iter = sinkhorn_iter
self.n_sortcut = n_sortcut
def forward(self, q, k, topk=1):
bh, *_, bucket_size, kv_bucket_size, device, dtype, dim = *q.shape, self.bucket_size, self.kv_bucket_size, q.device, q.dtype, self.dim
b = bh // self.heads
buckets = q.shape[1] // bucket_size
kv_buckets = k.shape[1] // kv_bucket_size
b_q = bucket(buckets, q) if self.n_sortcut == 0 else bucket(1, q)
b_k = bucket(kv_buckets, k)
sq = b_q.mean(dim=2)
sk = b_k.mean(dim=2)
R = torch.einsum('bie,bje->bij', sq, sk).to(q) * (dim ** -0.5)
if self.non_permutative:
k = topk if self.n_sortcut == 0 else self.n_sortcut
return differentiable_topk(R, k=k)
return gumbel_sinkhorn(F.relu(R), self.sinkhorn_iter, self.temperature)
class SinkhornAttention(nn.Module):
def __init__(self, bucket_size, dim, dim_heads, heads, max_seq_len, temperature = 0.75, non_permutative = True, sinkhorn_iter = 7, n_sortcut = 0, dropout = 0., kv_bucket_size = None, use_simple_sort_net = False, n_top_buckets = 1):
super().__init__()
self.bucket_size = bucket_size
self.kv_bucket_size = default(kv_bucket_size, bucket_size)
self.dim = dim
self.heads = heads
self.temperature = temperature
self.non_permutative = non_permutative
self.sinkhorn_iter = sinkhorn_iter
self.n_sortcut = n_sortcut
if use_simple_sort_net:
self.sort_net = SimpleSortNet(heads, self.kv_bucket_size, max_seq_len // self.kv_bucket_size, dim_heads * 2, non_permutative = non_permutative, temperature = temperature, sinkhorn_iter = sinkhorn_iter)
else:
self.sort_net = AttentionSortNet(heads, self.bucket_size, self.kv_bucket_size, dim_heads, non_permutative = non_permutative, temperature = temperature, sinkhorn_iter = sinkhorn_iter, n_sortcut = n_sortcut)
self.n_top_buckets = n_top_buckets
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, q_mask = None, kv_mask = None):
b, h, t, d_h, n_top, d, heads, temperature, bucket_size, kv_bucket_size, device = *q.shape, self.n_top_buckets, self.dim, self.heads, self.temperature, self.bucket_size, self.kv_bucket_size, q.device
bh = b * h
buckets = q.shape[2] // bucket_size
kv_buckets = k.shape[2] // kv_bucket_size
n_top = min(n_top, kv_buckets)
merge_batch_head = partial(merge_dims, 0, 1)
q, k, v = map(merge_batch_head, (q, k, v))
# bucket query, key, values
b_q = bucket(buckets, q)
b_k, b_v = map(partial(bucket, kv_buckets), (k, v))
bsz = b_k.shape[2]
# calculate reordering matrix R with simple sort net
R = self.sort_net(q, k, topk=n_top)
R = R.type_as(q).to(q)
# concatenate reordered buckets
b_k_r = reorder_buckets(b_k, R)
b_v_r = reorder_buckets(b_v, R)
# choose the top n ranked buckets for all query buckets
if self.n_sortcut > 0:
b_k_r = b_k_r[:, 0:self.n_sortcut].reshape(bh, 1, -1, d_h)
b_v_r = b_v_r[:, 0:self.n_sortcut].reshape(bh, 1, -1, d_h)
b_k_r = expand_dim(b_k_r, 1, buckets)
b_v_r = expand_dim(b_v_r, 1, buckets)
else:
b_k_r = b_k_r.reshape(bh, buckets, -1, d_h)
b_v_r = b_k_r.reshape(bh, buckets, -1, d_h)
b_k = torch.cat((b_k_r, b_k), dim=2) if buckets == kv_buckets else b_k_r
b_v = torch.cat((b_v_r, b_v), dim=2) if buckets == kv_buckets else b_v_r
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (d_h ** -0.5)
# mask
mask_value = max_neg_value(dots)
if not all_none(q_mask, kv_mask):
q_mask = default(q_mask, lambda: torch.ones((b, t), device=device).bool())
kv_mask = default(kv_mask, q_mask)
mq, mk = bucket(buckets, q_mask), bucket(kv_buckets, kv_mask)
expand_head_and_merge_into_batch = lambda x: merge_dims(0, 1, expand_dim(x.unsqueeze(1), 1, h))
mq, mk = map(expand_head_and_merge_into_batch, (mq, mk))
mk_r = batched_index_select(mk, R.abs().argmax(dim=-1))
if self.n_sortcut > 0:
mk_r = mk_r[:, 0:self.n_sortcut].reshape(-1, 1, bsz * self.n_sortcut)
mk_r = expand_dim(mk_r, 1, buckets)
else:
mk_r = mk_r.reshape(bh, buckets, -1)
mk = torch.cat((mk_r, mk), dim=2) if buckets == kv_buckets else mk_r
mask = mq[:, :, :, None] * mk[:, :, None, :]
dots.masked_fill_(~mask, mask_value)
del mask
# attention
dots = dots.softmax(dim=-1)
dots = self.dropout(dots)
out = torch.einsum('buij,buje->buie', dots, b_v)
out = unbucket(out)
out = out.reshape(b, h, t, d_h)
return out
# causal sort net and reordered bucketing attention
def mask_reordering_matrix(R, topk, temperature):
buckets = R.shape[1]
mask_value = max_neg_value(R)
mask = torch.zeros(R.shape, device=R.device).bool()
i, j = torch.triu_indices(buckets, buckets)
mask[:, i, j + topk] = True
R.masked_fill_(mask, mask_value)
return differentiable_topk(R, topk, temperature)
class CausalSimpleSortNet(nn.Module):
def __init__(self, heads, bucket_size, max_buckets, n_top_buckets, dim, temperature):
super().__init__()
self.dim = dim
self.heads = heads
self.bucket_size = bucket_size
self.max_buckets = max_buckets
self.n_top_buckets = n_top_buckets
self.temperature = temperature
self.linear = nn.Parameter(torch.randn(1, heads, dim, max_buckets + n_top_buckets))
self.act = nn.LeakyReLU()
def forward(self, q, k, topk=1):
bh, *_, h, max_buckets = *q.shape, self.heads, self.max_buckets
b = bh // h
buckets = k.shape[1] // self.bucket_size
k_r = torch.cat((cumavg(k, dim=1), k), dim=-1)
k_r = bucket(buckets, k_r)
# for causal sort net, take the first token of each bucket to prevent leaking of future to past
x = k_r[:, :, 0]
W = expand_batch_and_merge_head(b, self.linear)
R = self.act(x @ W)
R = R[:, 0:buckets, 0:(buckets + self.n_top_buckets)]
return mask_reordering_matrix(R, topk, self.temperature)
class CausalAttentionSortNet(nn.Module):
def __init__(self, heads, bucket_size, dim, temperature):
super().__init__()
self.heads = heads
self.bucket_size = bucket_size
self.dim = dim
self.temperature = temperature
def forward(self, q, k, topk=1):
bh, *_, h, dim = *q.shape, self.heads, self.dim
b = bh // h
buckets = q.shape[1] // self.bucket_size
kv_buckets = k.shape[1] // self.bucket_size
q_r = bucket(buckets, cumavg(q, dim=1))
k_r = bucket(kv_buckets, cumavg(k, dim=1))
sq = q_r[:, :, 0]
sk = k_r.sum(dim=2)
sk = F.pad(sk, (0, 0, topk, 0))
R = torch.einsum('bie,bje->bij', sq, sk) * (dim ** -0.5)
return mask_reordering_matrix(R, topk, self.temperature)
def apply_fn_after_split_ind(dim, ind, fn, t):
l, r = split_at_index(dim, ind, t)
return torch.cat((l, fn(r)), dim=dim)
class SinkhornCausalAttention(nn.Module):
def __init__(self, bucket_size, dim, dim_heads, heads, max_seq_len, dropout = 0., kv_bucket_size = None, use_simple_sort_net = False, n_top_buckets = 2, temperature = 1.):
super().__init__()
assert kv_bucket_size is None or bucket_size == kv_bucket_size, 'different bucketing for key/values for causal reordering not supported yet'
self.dim = dim
self.heads = heads
self.bucket_size = bucket_size
# a learned null key / value for the first bucket (which has nothing in the past to sort to)
self.null_keys = nn.Parameter(torch.randn(heads, 1, dim_heads))
self.null_values = nn.Parameter(torch.randn(heads, 1, dim_heads))
if use_simple_sort_net:
self.sort_net = CausalSimpleSortNet(heads, bucket_size, max_seq_len // bucket_size, n_top_buckets, dim_heads * 2, temperature)
else:
self.sort_net = CausalAttentionSortNet(heads, bucket_size, dim_heads, temperature)
self.n_top_buckets = n_top_buckets
self.dropout = nn.Dropout(dropout)
def forward(self, q, k, v, q_mask = None, kv_mask = None):
b, h, t, d_h, n_top, d, bsz, device = *q.shape, self.n_top_buckets, self.dim, self.bucket_size, q.device
bh = b * h
hh = h // 2
buckets = t // bsz
n_top = min(n_top, buckets)
hh_slice = (slice(None), slice(hh, None))
rotate_fn = partial(apply_fn_after_split_ind, 1, hh, lambda t: rotate_left(t, bsz-1, dim=2))
q, k, v = map(rotate_fn, (q, k, v))
# merge batch and head
merge_batch_head = partial(merge_dims, 0, 1)
q, k, v = map(merge_batch_head, (q, k, v))
# bucket qkv
b_q, b_k, b_v = map(partial(bucket, buckets), (q, k, v))
# calculate R
R = self.sort_net(q, k, topk=n_top)
R = R.type_as(q).to(q)
# add null key / values
b_null_k = self.null_keys[None, :, None, :, :].expand(b, h, n_top, bsz, -1).reshape(bh, n_top, bsz, -1).to(k)
b_null_v = self.null_values[None, :, None, :, :].expand(b, h, n_top, bsz, -1).reshape(bh, n_top, bsz, -1).to(v)
b_k_r = torch.cat((b_null_k, b_k), dim=1)
b_v_r = torch.cat((b_null_v, b_v), dim=1)
# reorder buckets to buckets of the past
b_k_r = reorder_buckets(b_k_r, R)
b_v_r = reorder_buckets(b_v_r, R)
b_k_r = b_k_r.reshape(bh, buckets, bsz * n_top, -1)
b_v_r = b_v_r.reshape(bh, buckets, bsz * n_top, -1)
# and concatenate to original buckets themselves for local attention
b_k = torch.cat((b_k_r, b_k), dim=2)
b_v = torch.cat((b_v_r, b_v), dim=2)
dots = torch.einsum('buie,buje->buij', b_q, b_k) * (d_h ** -0.5)
# mask
mask_value = max_neg_value(q)
if not all_none(q_mask, kv_mask):
q_mask = default(q_mask, lambda: torch.ones((b, t), device=device).bool())
kv_mask = default(kv_mask, q_mask)
expand_head = lambda x: x.unsqueeze(1).repeat(1, h, 1)
q_mask, kv_mask = map(expand_head, (q_mask, kv_mask))
q_mask[hh_slice] = rotate_left(q_mask[hh_slice], bsz-1, dim=2)
kv_mask[hh_slice] = rotate_left(kv_mask[hh_slice], bsz-1, dim=2)
q_mask, kv_mask = map(lambda x: merge_dims(0, 1, x), (q_mask, kv_mask))
mq, mk = bucket(buckets, q_mask), bucket(buckets, kv_mask)
mk_with_null = F.pad(mk, (0, 0, 2, 0), value=True)
mk_r = batched_index_select(mk_with_null, R.abs().argmax(dim=-1))
mk_r = mk_r.reshape(bh, buckets, -1)
mk = torch.cat((mk_r, mk), dim=2)
mask = mq[:, :, :, None] * mk[:, :, None, :]
dots.masked_fill_(~mask, mask_value)
del mask
# masking for half head rotations
shift = n_top * bsz
total_shift = shift + bsz
mask = torch.ones((b, h, buckets, bsz, total_shift), device=device).bool()
i, j = torch.triu_indices(bsz, bsz, 1)
mask[:, :, :, i, j + shift] = False
mask[:, hh:, -1, 0:shift, 0:shift+1] = False
mask[:, hh:, -1, 0, 0:shift+1] = True
mask = mask.reshape(b * h, buckets, bsz, total_shift)
dots.masked_fill_(~mask, mask_value)
del mask
# attention
dots = dots.softmax(dim=-1)
dots = self.dropout(dots)
out = torch.einsum('buij,buje->buie', dots, b_v)
out = unbucket(out)
out = out.reshape(b, h, t, d_h)
out = apply_fn_after_split_ind(1, hh, lambda t: rotate_right(t, bsz-1, dim=2), out)
return out
class SinkhornSelfAttention(nn.Module):
def __init__(self, dim, bucket_size, max_seq_len, heads = 8, dim_head = None, kv_bucket_size = None, causal = False, non_permutative = True, sinkhorn_iter = 5, n_sortcut = 0, temperature = 0.75, attn_dropout = 0., dropout = 0., context_only = False, use_simple_sort_net = False, n_local_attn_heads = 0, n_top_buckets = 1):
super().__init__()
assert dim_head or divisible_by(dim, heads), f'If dim_head is None, dimension {dim} must be divisible by the number of heads {heads}'
assert not (causal and n_sortcut > 0), 'sortcut can only be used for non causal attention'
assert not (causal and context_only), 'context only self attention layer cannot be causal'
assert n_local_attn_heads <= heads, 'number of local attention heads cannot exceed total heads'
dim_head = default(dim_head, dim // heads)
dim_heads = dim_head * heads
self.dim_head = dim_head
self.heads = heads
self.bucket_size = bucket_size
self.kv_bucket_size = default(kv_bucket_size, bucket_size)
self.context_only = context_only
self.to_q = nn.Linear(dim, dim_heads, bias=False)
self.to_kv = nn.Linear(dim, dim_heads * 2, bias=False) if not context_only else None
self.to_out = nn.Linear(dim_heads, dim)
self.n_local_attn_heads = n_local_attn_heads
self.local_attention = LocalAttention(bucket_size, causal, dropout = attn_dropout, look_forward=(1 if not causal else 0))
sink_heads = heads - n_local_attn_heads
if causal:
attn = SinkhornCausalAttention(bucket_size, dim, dim_head, sink_heads, max_seq_len, dropout = attn_dropout, kv_bucket_size = kv_bucket_size, use_simple_sort_net = use_simple_sort_net, n_top_buckets = n_top_buckets, temperature = temperature)
else:
attn = SinkhornAttention(bucket_size, dim, dim_head, sink_heads, max_seq_len, non_permutative = non_permutative, sinkhorn_iter = sinkhorn_iter, n_sortcut = n_sortcut, temperature = temperature, dropout = attn_dropout, kv_bucket_size = kv_bucket_size, use_simple_sort_net = use_simple_sort_net, n_top_buckets = n_top_buckets)
self.sinkhorn_attention = attn
self.dropout = nn.Dropout(dropout)
def forward(self, x, input_mask = None, context = None, context_mask = None):
b, t, d, h, dh, l_h = *x.shape, self.heads, self.dim_head, self.n_local_attn_heads
assert divisible_by(t, self.bucket_size), f'sequence {t} needs to be divisible by bucket size {self.bucket_size}'
assert not (self.context_only and context is None), 'context key / values must be supplied if context self attention layer'
assert not (context is not None and (context.shape[0], context.shape[2]) != (b, d)), 'contextual key / values must have the same batch and dimensions as the decoder'
q = self.to_q(x)
kv = self.to_kv(x).chunk(2, dim=-1) if not self.context_only else (context, context)
kv_mask = input_mask if not self.context_only else context_mask
assert divisible_by(kv[0].shape[1], self.kv_bucket_size), 'key/value sequences need to be divisible by key/value bucket size'
qkv = (q, *kv)
merge_heads_fn = partial(merge_heads, h)
q, k, v = map(merge_heads_fn, qkv)
split_index_fn = partial(split_at_index, 1, l_h)
(lq, q), (lk, k), (lv, v) = map(split_index_fn, (q, k, v))
has_local, has_sinkhorn = map(lambda x: x.shape[1] > 0, (lq, q))
out = []
if has_local > 0:
out.append(self.local_attention(lq, lk, lv, input_mask = input_mask))
if has_sinkhorn > 0:
out.append(self.sinkhorn_attention(q, k, v, q_mask = input_mask, kv_mask = kv_mask))
out = torch.cat(out, dim=1)
out = split_heads(h, out)
out = self.to_out(out)
out = self.dropout(out)
return out
class SinkhornTransformer(nn.Module):
def __init__(self, dim, depth, max_seq_len = None, causal = False, heads = 8, dim_head = None, bucket_size = 64, kv_bucket_size = None, context_bucket_size = None, non_permutative = False, sinkhorn_iter = 5, n_sortcut = 0, temperature = 0.75, reversible = False, ff_chunks = 1, ff_dropout = 0., attn_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., weight_tie = False, ff_glu = False, use_simple_sort_net = None, receives_context = False, context_n_sortcut = 2, n_local_attn_heads = 0, use_rezero = False, n_top_buckets = 1, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
layers = nn.ModuleList([])
kv_bucket_size = default(kv_bucket_size, bucket_size)
context_bucket_size = default(context_bucket_size, bucket_size)
get_attn = lambda: SinkhornSelfAttention(dim, bucket_size, max_seq_len, causal = causal, heads = heads, dim_head = dim_head, kv_bucket_size = kv_bucket_size, non_permutative = non_permutative, sinkhorn_iter = sinkhorn_iter, n_sortcut = n_sortcut, temperature = temperature, attn_dropout = attn_dropout, dropout = attn_layer_dropout, use_simple_sort_net = use_simple_sort_net, n_local_attn_heads = n_local_attn_heads, n_top_buckets = n_top_buckets)
get_ff = lambda: Chunk(ff_chunks, FeedForward(dim, dropout = ff_dropout, glu = ff_glu), along_dim=1)
get_pkm = lambda: PKM(dim, num_keys = pkm_num_keys)
get_attn_context = lambda: SinkhornSelfAttention(dim, bucket_size, max_seq_len, context_only = True, heads = heads, dim_head = dim_head, kv_bucket_size = context_bucket_size, non_permutative = non_permutative, sinkhorn_iter = sinkhorn_iter, n_sortcut = context_n_sortcut, temperature = temperature, attn_dropout = attn_dropout, dropout = attn_layer_dropout, n_top_buckets = n_top_buckets)
get_ff_context = lambda: FeedForward(dim, dropout = ff_dropout, glu = ff_glu)
if weight_tie:
get_attn, get_attn_context, get_ff, get_ff_context = map(cache_fn, (get_attn, get_attn_context, get_ff, get_ff_context))
fn_wrapper = partial(PreNorm, nn.LayerNorm, dim) if not use_rezero else ReZero
for ind in range(depth):
layer_num = ind + 1
use_pkm = layer_num in pkm_layers
get_parallel_fn = get_ff if not use_pkm else get_pkm
layers.append(nn.ModuleList([
fn_wrapper(get_attn()),
fn_wrapper(get_parallel_fn())
]))
if not receives_context:
continue
layers.append(nn.ModuleList([
fn_wrapper(get_attn_context()),
fn_wrapper(get_ff_context())
]))
execute_type = ReversibleSequence if reversible else SequentialSequence
attn_context_layer = ((True, False),) if receives_context else tuple()
route_attn = ((True, False), *attn_context_layer) * depth
route_context = ((False, False), *attn_context_layer) * depth
context_route_map = {'context': route_context, 'context_mask': route_context} if receives_context else {}
attn_route_map = {'input_mask': route_attn}
self.layers = execute_type(layers, args_route = {**context_route_map, **attn_route_map}, layer_dropout = layer_dropout)
self.receives_context = receives_context
self.max_seq_len = max_seq_len
self.pad_to_bucket_size = lcm(bucket_size, kv_bucket_size)
self.context_bucket_size = context_bucket_size
self.is_fixed_length = use_simple_sort_net and not causal
# if not using attention sort and also not causal, force fixed sequence length
assert not (self.is_fixed_length and self.max_seq_len is None), 'maximum sequence length must be specified if length is fixed'
def forward(self, x, **kwargs):
assert not (self.is_fixed_length and x.shape[1] != self.max_seq_len), f'you must supply a sequence of length {self.max_seq_len}'
assert ('context' not in kwargs or self.receives_context), 'needs to be initted with receives_context True if passing contextual key / values'
return self.layers(x, **kwargs)
class SinkhornTransformerLM(nn.Module):
def __init__(self, num_tokens, dim, max_seq_len, depth, heads = 8, dim_head = None, bucket_size = 64, kv_bucket_size = None, context_bucket_size = None, causal = False, non_permutative = True, sinkhorn_iter = 5, n_sortcut = 0, temperature = 0.75, reversible = False, ff_chunks = 1, ff_glu = False, return_embeddings = False, ff_dropout = 0., attn_dropout = 0., attn_layer_dropout = 0., layer_dropout = 0., emb_dropout = 0., weight_tie = False, emb_dim = None, use_simple_sort_net = None, receives_context = False, context_n_sortcut = 0, n_local_attn_heads = 0, use_rezero = False, n_top_buckets = 2, pkm_layers = tuple(), pkm_num_keys = 128):
super().__init__()
emb_dim = default(emb_dim, dim)
self.max_seq_len = max_seq_len
self.to_token_emb = nn.Embedding(num_tokens, emb_dim)
self.axial_pos_emb = AxialPositionalEmbedding(emb_dim, axial_shape = (max_seq_len // bucket_size, bucket_size))
self.emb_dropout = nn.Dropout(emb_dropout)
self.sinkhorn_transformer = SinkhornTransformer(dim, depth, max_seq_len = max_seq_len, causal = causal, heads = heads, dim_head = dim_head, bucket_size = bucket_size, kv_bucket_size = kv_bucket_size, context_bucket_size = context_bucket_size, non_permutative = non_permutative, sinkhorn_iter = sinkhorn_iter, n_sortcut = n_sortcut, temperature = temperature, reversible = reversible, ff_chunks = ff_chunks, ff_dropout = ff_dropout, attn_dropout = attn_dropout, attn_layer_dropout = attn_layer_dropout, layer_dropout = layer_dropout, weight_tie = weight_tie, ff_glu = ff_glu, use_simple_sort_net = use_simple_sort_net, receives_context = receives_context, context_n_sortcut = context_n_sortcut, n_local_attn_heads = n_local_attn_heads, use_rezero = use_rezero, n_top_buckets = n_top_buckets, pkm_layers = pkm_layers, pkm_num_keys = pkm_num_keys)
if emb_dim != dim:
self.sinkhorn_transformer = ProjectInOut(self.sinkhorn_transformer, emb_dim, dim, project_out =(not return_embeddings))
self.norm = nn.LayerNorm(emb_dim)
self.to_logits = identity if return_embeddings else nn.Linear(emb_dim, num_tokens)
def forward(self, x, **kwargs):
_, t, device = *x.shape, x.device
assert t <= self.max_seq_len, f'sequence length {t} is greater than maximum sequence length {self.max_seq_len}'
x = self.to_token_emb(x)
x = self.axial_pos_emb(x) + x
x = self.emb_dropout(x)
x = self.sinkhorn_transformer(x, **kwargs)
x = self.norm(x)
return self.to_logits(x)
|
sinkhorn-transformer-master
|
sinkhorn_transformer/sinkhorn_transformer.py
|
import os
from copy import deepcopy
import torch
import torch.multiprocessing as mp
import torch.distributed as dist
from torch.nn.parallel import DistributedDataParallel as DDP
from st_moe_pytorch.st_moe_pytorch import Experts, Expert
from st_moe_pytorch.distributed import all_gather_variable_dim
def setup(rank, world_size):
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = '12355'
dist.init_process_group("gloo", rank = rank, world_size = world_size)
def cleanup():
dist.destroy_process_group()
def start(
rank,
world_size,
batch_size,
batch_size_var_len,
num_experts,
tokens_per_expert,
dim,
use_cuda
):
setup(rank, world_size)
net = Experts([Expert(dim) for _ in range(num_experts)])
if batch_size_var_len:
batch_size = batch_size + rank
seq = torch.randn(batch_size, num_experts, tokens_per_expert, dim)
# locally
local_net = deepcopy(net)
local_inputs, _ = all_gather_variable_dim(seq)
local_out = local_net(
local_inputs,
is_distributed = False
)
local_out.mean().backward()
# distributed
model = DDP(net)
ddp_inputs = seq
if use_cuda:
model.cuda(rank)
ddp_inputs = seq.cuda(rank)
out = model(ddp_inputs)
out.mean().backward()
ddp_all_out, _ = all_gather_variable_dim(out)
if rank == 0:
# validate output is the same for local vs distributed
model.cpu()
ddp_all_out.cpu()
assert torch.allclose(local_out, ddp_all_out.cpu(), atol = 1e-3), 'output is not the same'
# validate gradients of first expert is the same for local vs distributed
get_first_expert_grad = lambda t: t.experts[0].net[0].weight.grad
assert torch.allclose(
get_first_expert_grad(net).cpu(),
get_first_expert_grad(local_net),
atol = 1e-2
), 'grad is not the same'
print('✅ outputs and gradients are same between local and ddp')
cleanup()
if __name__ == '__main__':
world_size = 8
num_experts = 3
batch_size = 2
batch_size_var_len = True
use_cuda = False
assert not use_cuda or torch.cuda.device_count() <= world_size
seq_len = 32
dim = 8
mp.spawn(
start,
args = (
world_size,
batch_size,
batch_size_var_len,
num_experts,
seq_len,
dim,
use_cuda
),
nprocs = world_size,
join = True
)
|
st-moe-pytorch-main
|
assert.py
|
from setuptools import setup, find_packages
setup(
name = 'st-moe-pytorch',
packages = find_packages(exclude=[]),
version = '0.1.1',
license='MIT',
description = 'ST - Mixture of Experts - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/st-moe-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'mixture of experts'
],
install_requires=[
'beartype',
'CoLT5-attention>=0.10.15',
'einops>=0.6',
'torch>=2.0',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
st-moe-pytorch-main
|
setup.py
|
from st_moe_pytorch.st_moe_pytorch import (
MoE,
SparseMoEBlock
)
|
st-moe-pytorch-main
|
st_moe_pytorch/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
from torch.autograd import Function
import torch.distributed as dist
from einops import rearrange, pack, unpack
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def divisible_by(num, den):
return (num % den) == 0
def pad_dim_to(t, length, dim = 0):
pad_length = length - t.shape[dim]
zero_pairs = (-dim - 1) if dim < 0 else (t.ndim - dim - 1)
return F.pad(t, (*((0, 0) * zero_pairs), 0, pad_length))
def all_gather_same_dim(t):
world_size = dist.get_world_size()
gathered_tensors = [torch.empty_like(t, device = t.device, dtype = t.dtype) for i in range(world_size)]
dist.all_gather(gathered_tensors, t)
return gathered_tensors
def gather_sizes(t, *, dim):
size = torch.tensor(t.shape[dim], device = t.device, dtype = torch.long)
sizes = all_gather_same_dim(size)
return torch.stack(sizes)
def has_only_one_value(t):
return (t == t[0]).all()
def all_gather_variable_dim(t, dim = 0, sizes = None):
device, rank, world_size = t.device, dist.get_rank(), dist.get_world_size()
if not exists(sizes):
sizes = gather_sizes(t, dim = dim)
if has_only_one_value(sizes):
gathered_tensors = all_gather_same_dim(t)
gathered_tensors = torch.cat(gathered_tensors, dim = dim)
return gathered_tensors, sizes
max_size = sizes.amax().item()
padded_t = pad_dim_to(t, max_size, dim = dim)
gathered_tensors = all_gather_same_dim(padded_t)
gathered_tensors = torch.cat(gathered_tensors, dim = dim)
seq = torch.arange(max_size, device = device)
mask = rearrange(seq, 'j -> 1 j') < rearrange(sizes, 'i -> i 1')
mask = rearrange(mask, 'i j -> (i j)')
seq = torch.arange(mask.shape[-1], device = device)
indices = seq[mask]
gathered_tensors = gathered_tensors.index_select(dim, indices)
return gathered_tensors, sizes
class AllGatherFunction(Function):
@staticmethod
def forward(ctx, x, dim, sizes):
x, batch_sizes = all_gather_variable_dim(x, dim = dim, sizes = sizes)
ctx.batch_sizes = batch_sizes.tolist()
ctx.dim = dim
return x, batch_sizes
@staticmethod
def backward(ctx, grads, _):
batch_sizes, rank = ctx.batch_sizes, dist.get_rank()
grads_by_rank = grads.split(batch_sizes, dim = ctx.dim)
return grads_by_rank[rank], None, None
class AllGather(nn.Module):
def __init__(self, *, dim = 0):
super().__init__()
self.dim = dim
def forward(self, x, sizes = None):
return AllGatherFunction.apply(x, self.dim, sizes)
class SplitByRankFunction(Function):
@staticmethod
def forward(ctx, x):
rank = dist.get_rank()
out = x[rank]
if isinstance(x, tuple):
sizes = tuple(map(lambda t: t.shape[0], x))
else:
sizes = (x.shape[1],) * x.shape[0]
sizes = torch.tensor(sizes, device = out.device, dtype = torch.long)
ctx.sizes = sizes
return out, sizes
@staticmethod
def backward(ctx, grads, _):
grads = rearrange(grads, '... -> 1 ...')
grads = all_gather_variable_dim(grads, sizes = ctx.sizes)
return grads
split_by_rank = SplitByRankFunction.apply
|
st-moe-pytorch-main
|
st_moe_pytorch/distributed.py
|
from functools import partial
from collections import namedtuple
from typing import Optional, Tuple, Union
import torch
from torch.nn import Module, ModuleList
from torch import nn, einsum
import torch.nn.functional as F
from beartype import beartype
from einops import rearrange, repeat, reduce, pack, unpack
from colt5_attention import topk as maybe_differentiable_topk
import torch.distributed as dist
from st_moe_pytorch.distributed import (
AllGather,
split_by_rank,
gather_sizes,
has_only_one_value
)
# constants
MIN_EXPERT_CAPACITY = 4
MixtureOfExpertsReturn = namedtuple('MixtureOfExpertsReturn', [
'outputs',
'total_aux_loss',
'balance_loss',
'router_z_loss'
])
# helper functions
def exists(val):
return val is not None
def default(val, default):
if exists(val):
return val
return default() if callable(default) else default
def divisible_by(num, den):
return (num % den) == 0
def chunk_num(num, chunks):
num_per_chunk, remainder = divmod(num, chunks)
out = []
for i in range(chunks):
n = num_per_chunk
out.append(n + int(i < remainder))
return out
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def cast_tuple(el, len = 1):
return el if isinstance(el, tuple) else ((el,) * len)
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def Sequential(*modules):
return nn.Sequential(*filter(exists, modules))
# tensor related helper functions
def cumsum_exclusive(t, dim = -3):
assert dim < 0
num_pad_dims = -dim - 1
pre_padding = (0, 0) * num_pad_dims
return F.pad(t, (*pre_padding, 1, -1)).cumsum(dim = dim)
# pytorch one hot throws an error if there are out of bound indices.
# tensorflow, in contrast, does not throw an error
def safe_one_hot(indexes, max_length):
max_index = indexes.max() + 1
one_hot_classes = max(max_index + 1, max_length)
return F.one_hot(indexes, one_hot_classes)[..., :max_length]
# rms normalization
class RMSNorm(Module):
def __init__(self, dim):
super().__init__()
self.scale = dim ** 0.5
self.gamma = nn.Parameter(torch.ones(dim))
def forward(self, x):
return F.normalize(x, dim = -1) * self.gamma * self.scale
# expert class
# best performing was ff geglu with multiplicative bias (just after gating)
class GEGLU(Module):
def __init__(
self,
dim,
mult_bias = True
):
super().__init__()
self.mult_bias = nn.Parameter(torch.ones(dim)) if mult_bias else 1.
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x * self.mult_bias
class Expert(Module):
def __init__(
self,
dim,
hidden_mult = 4,
mult_bias = True,
prenorm = False
):
super().__init__()
dim_hidden = int(dim * hidden_mult * 2 / 3)
self.net = Sequential(
RMSNorm(dim) if prenorm else None,
nn.Linear(dim, dim_hidden * 2),
GEGLU(dim_hidden, mult_bias = mult_bias),
nn.Linear(dim_hidden, dim)
)
self.apply(self.init_)
def init_(self, module):
if isinstance(module, nn.Linear):
dim = module.weight.shape[0]
std = dim ** -0.5
module.weight.data.uniform_(-std, std)
module.bias.data.uniform_(-std, std)
def forward(self, x):
return self.net(x)
class Experts(nn.Module):
def __init__(
self,
experts,
is_distributed = None,
allow_var_seq_len = False # whether to handle variable sequence length
):
super().__init__()
self.num_experts = len(experts)
self.experts = nn.ModuleList(experts)
# distributed related settings
self.is_distributed = is_distributed
if not exists(self.is_distributed):
self.is_distributed = dist.is_initialized() and dist.get_world_size() > 1
self.all_gather = AllGather()
self.allow_var_seq_len = allow_var_seq_len
# device tracker, since need to manually move experts not in use to CPU in distributed
self.register_buffer('dummy', torch.ones(1), persistent = False)
@property
def device(self):
return self.dummy.device
def all_experts_to_cpu_besides(self, selection):
if isinstance(selection, int):
experts = [self.experts[selection]]
if isinstance(selection, slice):
experts = self.experts[selection]
else:
experts = selection
experts_set = set(experts)
for expert in self.experts:
device = self.device if expert in experts_set else 'cpu'
expert.to(device)
def forward(
self,
x,
is_distributed = None
):
"""
einops notation:
b - batch
r - rank (device / machines)
e - experts
n - sequence (number of tokens per expert)
d - feature dimension
"""
# declare some variables
is_distributed = default(is_distributed, self.is_distributed)
shape, num_experts = x.shape, self.num_experts
seq_len = shape[-2]
# for now naively all gather across batch dimension if distributed, optimize later
world_size = 1
rank = 0
if is_distributed:
seq_sizes = gather_sizes(x, dim = -2)
var_seq_len = not has_only_one_value(seq_sizes)
assert self.allow_var_seq_len or not var_seq_len, 'number of tokens per expert must be the same - if you want the framework to handle it, set `allow_var_seq_len = True` on `Experts`'
# if variable sequence length, pad
if var_seq_len:
max_seq_size = seq_sizes.amax().item()
x = pad_dim_to(x, max_seq_size, dim = -2)
# gather and concat across batches, accounting for variable batch sizes
x, batch_sizes = self.all_gather(x)
total_batch_size = batch_sizes.sum().item()
world_size = dist.get_world_size()
rank = dist.get_rank()
# the experts in use on the rank
num_experts_per_rank = num_experts
expert_slice = slice(0, num_experts)
if is_distributed:
if world_size <= num_experts:
num_experts_across_ranks = chunk_num(num_experts, world_size)
start_indices = cumsum_exclusive(torch.tensor(num_experts_across_ranks), dim = -1)
num_experts_per_rank = num_experts_across_ranks[rank]
num_experts_batches_across_ranks = tuple(i * total_batch_size for i in num_experts_across_ranks)
expert_start_index = start_indices[rank].item()
else:
num_batch_chunks = world_size // num_experts
total_ranks_in_use = num_batch_chunks * num_experts
expert_start_index = rank // num_batch_chunks
batch_splits = chunk_num(total_batch_size, num_batch_chunks)
num_experts_batches_across_ranks = batch_splits * num_experts
# for now, remaining machines just process nothing
remain_ranks = world_size % num_experts
num_experts_batches_across_ranks += (0,) * remain_ranks
num_experts_per_rank = int(rank < total_ranks_in_use)
assert len(num_experts_batches_across_ranks) == world_size
expert_slice = slice(expert_start_index, expert_start_index + num_experts_per_rank)
# if distributed, each machine only handles subset of experts and batch
x = rearrange(x, 'b e n d -> e b n d')
if is_distributed:
x, expert_batch_packed_shape = pack_one(x, '* n d')
x = x.split(num_experts_batches_across_ranks, dim = 0)
x, experts_per_rank_sizes = split_by_rank(x)
if num_experts_per_rank > 0:
x = rearrange(x, '(e b) n d -> e b n d', e = num_experts_per_rank)
else:
x = x.reshape(num_experts, *x.shape)
# get the experts in use
self.all_experts_to_cpu_besides(expert_slice)
experts = self.experts[expert_slice]
# route tokens to appropriate experts
outs = []
for expert, expert_input in zip(experts, x):
out = expert(expert_input)
outs.append(out)
if len(outs) > 0:
outs = torch.stack(outs)
else:
outs = torch.empty_like(x, requires_grad = self.training)
# all gather across merged expert batches dimensions
# then split the batch dimension back
if is_distributed:
outs = rearrange(outs, 'e b n d -> (e b) n d')
outs, _ = self.all_gather(outs, sizes = experts_per_rank_sizes)
outs = unpack_one(outs, expert_batch_packed_shape, '* n d')
outs = rearrange(outs, 'e b n d -> b e n d')
if is_distributed:
outs = outs.split(batch_sizes.tolist())
outs, _ = split_by_rank(outs)
# account for padded sequence length
outs = outs[..., :seq_len, :]
assert outs.shape == shape
return outs
# the below code is almost all transcribed from the official tensorflow version, from which the papers are written
# https://github.com/tensorflow/tensor2tensor/blob/master/tensor2tensor/models/research/moe.py
# gating network
class TopNGating(Module):
@beartype
def __init__(
self,
dim,
num_gates,
eps = 1e-9,
top_n = 2,
threshold_train: Union[float, Tuple[float, ...]] = 0.2,
threshold_eval: Union[float, Tuple[float, ...]] = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
straight_through_dispatch_tensor = True,
differentiable_topk = False,
differentiable_topk_fused = True
):
super().__init__()
self.eps = eps
self.num_gates = num_gates
self.to_gates = nn.Linear(dim, num_gates, bias = False)
self.differentiable_topk = differentiable_topk
self.topk = partial(
maybe_differentiable_topk,
non_differentiable = not differentiable_topk,
fused = differentiable_topk_fused # use triton fused coordinate descent if possible by default
)
assert top_n >= 2, 'must be 2 or more experts'
self.top_n = top_n
top_n_minus_1 = top_n - 1
threshold_train = cast_tuple(threshold_train, top_n_minus_1)
threshold_eval = cast_tuple(threshold_eval, top_n_minus_1)
assert len(threshold_train) == len(threshold_eval) == top_n_minus_1
self.register_buffer('threshold_train', torch.tensor([eps, *threshold_train]))
self.register_buffer('threshold_eval', torch.tensor([eps, *threshold_eval]))
self.capacity_factor_train = capacity_factor_train
self.capacity_factor_eval = capacity_factor_eval
self.straight_through_dispatch_tensor = straight_through_dispatch_tensor
self.register_buffer('zero', torch.zeros((1,)), persistent = False)
def forward(self, x):
"""
einstein notation:
b - batch
n - sequence
e - experts
k - top-n experts
"""
*_, b, group_size, dim, dtype, top_n, num_gates, eps = *x.shape, x.dtype, self.top_n, self.num_gates, self.eps
# threshold, capacity depending on training or eval
suffix = 'train' if self.training else 'eval'
threshold = getattr(self, f'threshold_{suffix}')
capacity_factor = getattr(self, f'capacity_factor_{suffix}')
# Each sequence sends (at most?) expert_capacity positions to each expert.
# Static expert_capacity dimension is needed for expert batch sizes
expert_capacity = min(group_size, int((group_size * capacity_factor) / num_gates))
expert_capacity = max(expert_capacity, MIN_EXPERT_CAPACITY)
expert_capacity_f = float(expert_capacity)
# gate logits and gates
gate_logits = self.to_gates(x)
raw_gates = gate_logits.softmax(dim = -1)
# find top N experts per position
topk_return = self.topk(raw_gates, k = top_n)
gate_indices = topk_return.indices
if self.differentiable_topk:
# allow for differentiable topk using coordinate descent
# used successfully for routing from CoLT5 paper https://github.com/lucidrains/CoLT5-attention
gates = topk_return.coor_descent_values
else:
gates = topk_return.values
# move the top-n dimension to be first
gates = rearrange(gates, '... k -> k ...')
gate_indices = rearrange(gate_indices, '... k -> k ...')
# masks
one_hot_gate_indices = F.one_hot(gate_indices, num_gates)
mask = one_hot_gate_indices.float()
mask_1 = mask[0] # needed for balancing loss
# normalize top-n gate scores
denom = reduce(gates, 'k ... -> 1 ...', 'sum').clamp(min = eps)
gates = gates / denom
# best performing policy was to route to the second expert, with probability of min(1., score / threshold), where score = gate2 / (gate1 + gate2)
# optimal threshold was ~ 0.2
# generalized to more than 2 experts
probs = torch.zeros_like(gates).uniform_(0., 1.)
threshold = rearrange(threshold, 'k -> k 1 1')
should_route = probs < (gates / threshold.clamp(min = eps))
# tokens should always be routed to first expert
# threshold for first expert already set to very small number, but just in case
should_route[0, ...] = True
mask *= rearrange(should_route.float(), '... -> ... 1')
mask_cumsum = cumsum_exclusive(mask, dim = -2) # along sequence dimension
# compute assignment to experts - (batch, seq, experts)
# This is the position within the expert's mini-batch for this sequence
positions = []
prev_expert_count = 0.
for n in range(self.top_n):
position_in_expert = (mask_cumsum[n] + prev_expert_count) * mask[n]
# Remove the elements that don't fit. (batch, sequence, experts)
mask[n] *= (position_in_expert < expert_capacity_f).float()
# How many examples in this sequence go to this expert - needed for the next iteration as offset
prev_expert_count = reduce(mask[n], '... n e -> ... 1 e', 'sum')
# (batch, sequence)
position_in_expert = reduce(position_in_expert, '... n e -> ... n', 'sum')
positions.append(position_in_expert)
positions = torch.stack(positions)
# (k, batch, sequence) - mostly ones, but zeros where something didn't fit
mask_flat = reduce(mask, '... n e -> ... n', 'sum')
# (k, batch, sequence) - weighted assignment
# following https://github.com/tensorflow/mesh/blob/master/mesh_tensorflow/transformer/moe.py#L1903
gates = gates * mask_flat
# (batch, sequence, experts, expert_capacity)
N = None
gates = gates[..., N, N]
mask_flat = mask_flat[..., N, N]
one_hot_gate_indices = one_hot_gate_indices[..., N]
safe_one_hot_gates = safe_one_hot(positions.long(), expert_capacity)[..., N, :]
combine_tensor = reduce(
gates
* mask_flat
* one_hot_gate_indices
* safe_one_hot_gates
, 'k ... -> ...', 'sum')
# dispatch tensor
dispatch_tensor = combine_tensor.bool().type(dtype)
if self.straight_through_dispatch_tensor:
dispatch_tensor = dispatch_tensor + combine_tensor - combine_tensor.detach()
# balance losses - (batch, experts)
# We want to equalize the fraction of the batch assigned to each expert
if self.training:
density_1 = reduce(mask_1, '... n e -> ... e', 'mean')
density_1_proxy = reduce(raw_gates, '... n e -> ... e', 'mean') # Something continuous that is correlated with what we want to equalize.
balance_loss = (density_1_proxy * density_1).mean() * float(num_gates ** 2)
else:
balance_loss = self.zero
# calculate the router z-loss proposed in paper
if self.training:
router_z_loss = torch.logsumexp(gate_logits, dim = -1)
router_z_loss = torch.square(router_z_loss)
router_z_loss = router_z_loss.mean()
else:
router_z_loss = self.zero
return dispatch_tensor, combine_tensor, balance_loss, router_z_loss
# plain mixture of experts
class MoE(Module):
@beartype
def __init__(self,
dim,
num_experts = 16,
expert_hidden_mult = 4,
threshold_train = 0.2,
threshold_eval = 0.2,
capacity_factor_train = 1.25,
capacity_factor_eval = 2.,
gating_top_n = 2,
balance_loss_coef = 1e-2,
router_z_loss_coef = 1e-3,
experts: Optional[Module] = None,
straight_through_dispatch_tensor = True,
differentiable_topk = False,
differentiable_topk_fused = True,
is_distributed = None,
allow_var_seq_len = False
):
super().__init__()
self.dim = dim
self.num_experts = num_experts
self.gate = TopNGating(
dim,
top_n = gating_top_n,
num_gates = num_experts,
straight_through_dispatch_tensor = straight_through_dispatch_tensor,
differentiable_topk = differentiable_topk,
threshold_train = threshold_train,
threshold_eval = threshold_eval,
capacity_factor_train = capacity_factor_train,
capacity_factor_eval = capacity_factor_eval
)
experts = default(experts, lambda: [Expert(dim = dim, hidden_mult = expert_hidden_mult) for _ in range(num_experts)])
self.experts = Experts(
experts,
is_distributed = is_distributed,
allow_var_seq_len = allow_var_seq_len
)
self.balance_loss_coef = balance_loss_coef
self.router_z_loss_coef = router_z_loss_coef
def forward(self, x):
dispatch_tensor, combine_tensor, balance_loss, router_z_loss = self.gate(x)
# dispatch
expert_inputs = einsum('b n d, b n e c -> b e c d', x, dispatch_tensor)
# feed the expert inputs through the experts.
expert_outputs = self.experts(expert_inputs)
# combine
output = einsum('b e c d, b n e c -> b n d', expert_outputs, combine_tensor)
# losses
weighted_balance_loss = balance_loss * self.balance_loss_coef
weighted_router_z_loss = router_z_loss * self.router_z_loss_coef
# combine the losses
total_aux_loss = weighted_balance_loss + weighted_router_z_loss
return MixtureOfExpertsReturn(output, total_aux_loss, balance_loss, router_z_loss)
# sparse moe block
# in particular, they found that adding a feedforward before or after greatly stabilized the training and improved results
class SparseMoEBlock(Module):
@beartype
def __init__(
self,
moe: MoE,
*,
add_ff_before = False,
add_ff_after = True
):
super().__init__()
dim = moe.dim
self.moe = moe
self.moe_prenorm = RMSNorm(dim)
self.ff_before = Expert(dim, prenorm = True) if add_ff_before else None
self.ff_after = Expert(dim, prenorm = True) if add_ff_after else None
def forward(self, x):
# feedforward before
if exists(self.ff_before):
x = self.ff_before(x) + x
# mixture of experts layer
residual = x
moe_out, total_aux_loss, balance_loss, router_z_loss = self.moe(self.moe_prenorm(x))
x = moe_out + residual
# feedforward after
if exists(self.ff_after):
x = self.ff_after(x) + x
return MixtureOfExpertsReturn(x, total_aux_loss, balance_loss, router_z_loss)
|
st-moe-pytorch-main
|
st_moe_pytorch/st_moe_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'block-recurrent-transformer-pytorch',
packages = find_packages(exclude=[]),
version = '0.4.3',
license='MIT',
description = 'Block Recurrent Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
long_description_content_type = 'text/markdown',
url = 'https://github.com/lucidrains/block-recurrent-transformer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'attention mechanism',
'recurrence'
],
install_requires=[
'beartype',
'einops>=0.6.1',
'memorizing-transformers-pytorch>=0.4.0',
'torch>=1.6',
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
block-recurrent-transformer-pytorch-main
|
setup.py
|
import gzip
import random
import tqdm
import numpy as np
import torch
from torch.optim import Adam
from torch.nn import functional as F
from torch.utils.data import DataLoader, Dataset
from accelerate import Accelerator
from block_recurrent_transformer_pytorch import BlockRecurrentTransformer, RecurrentTrainerWrapper
# constants
NUM_BATCHES = int(1e5)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 1e-4
VALIDATE_EVERY = 100
PRIME_LENGTH = 128
GENERATE_EVERY = 250
GENERATE_LENGTH = 2048
SEQ_LEN = 2048
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return "".join(list(map(decode_token, tokens)))
# accelerator
accelerator = Accelerator()
device = accelerator.device
acc_print = accelerator.print
# instantiate palm
model = BlockRecurrentTransformer(
num_tokens = 256,
dim = 512,
depth = 6,
dim_head = 64,
heads = 8,
max_seq_len = 1024,
block_width = 512,
num_state_vectors = 512,
recurrent_layers = (4,),
use_flash_attn = True
)
train_wrapper = RecurrentTrainerWrapper(
model,
xl_memories_dropout = 0.1,
state_dropout = 0.1,
)
model.to(device)
# prepare enwik8 data
with gzip.open("./data/enwik8.gz") as file:
data = np.frombuffer(file.read(int(95e6)), dtype=np.uint8).copy()
np_train, np_valid = np.split(data, [int(90e6)])
data_train, data_val = torch.from_numpy(np_train), torch.from_numpy(np_valid)
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len, (1,))
full_seq = self.data[rand_start : rand_start + self.seq_len + 1].long()
return full_seq.to(device)
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size=BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size=BATCH_SIZE))
# optimizer
optim = Adam(model.parameters(), lr = LEARNING_RATE)
model, optim, train_loader, val_loader = accelerator.prepare(
model, optim, train_loader, val_loader
)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval=10.0, desc="training"):
model.train()
for _ in range(GRADIENT_ACCUMULATE_EVERY):
loss = train_wrapper(next(train_loader))
accelerator.backward(loss / GRADIENT_ACCUMULATE_EVERY)
acc_print(f"training loss: {loss.item()}")
accelerator.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = train_wrapper(next(val_loader))
acc_print(f"validation loss: {loss.item()}")
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:PRIME_LENGTH]
prime = decode_tokens(inp)
acc_print(f"%s \n\n %s", (prime, "*" * 100))
sample = train_wrapper.generate(inp[None, ...], length = GENERATE_LENGTH)
output_str = decode_tokens(sample[0])
acc_print(output_str, "\n")
|
block-recurrent-transformer-pytorch-main
|
train.py
|
import torch
from packaging import version
if version.parse(torch.__version__) >= version.parse('2.0.0'):
from einops._torch_specific import allow_ops_in_compiled_graph
allow_ops_in_compiled_graph()
from block_recurrent_transformer_pytorch.block_recurrent_transformer_pytorch import BlockRecurrentTransformer, RecurrentTrainerWrapper
|
block-recurrent-transformer-pytorch-main
|
block_recurrent_transformer_pytorch/__init__.py
|
import math
from random import random
from functools import wraps, partial
from itertools import zip_longest
from collections import namedtuple, defaultdict
from packaging import version
import torch
import torch.nn.functional as F
from torch import nn, einsum
from einops import rearrange, repeat, pack, unpack
from einops.layers.torch import Rearrange
from beartype import beartype
from beartype.door import is_bearable
from beartype.typing import Optional, List, Tuple
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def is_empty(t: torch.Tensor):
return t.numel() == 0
def cast_tuple(t, length = 1):
return t if isinstance(t, tuple) else ((t,) * length)
def all_unique(arr):
return len(arr) == len(set(arr))
def eval_decorator(fn):
def inner(self, *args, **kwargs):
was_training = self.training
self.eval()
out = fn(self, *args, **kwargs)
self.train(was_training)
return out
return inner
def once(fn):
called = False
@wraps(fn)
def inner(x):
nonlocal called
if called:
return
called = True
return fn(x)
return inner
print_once = once(print)
def compact(arr):
return [*filter(exists, arr)]
def and_reduce(arr: List[torch.Tensor]):
if len(arr) == 0:
return None
head, *rest = arr
for t in rest:
head = head & t
return head
def safe_cat(*args, dim = 1):
args = compact(args)
if len(args) == 0:
return None
return torch.cat(args, dim = dim)
def divisible_by(numer, denom):
return (numer % denom) == 0
def l2norm(t):
return F.normalize(t, dim = -1)
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def pad_at_dim(t, pad, dim = -1, value = 0.):
dims_from_right = (- dim - 1) if dim < 0 else (t.ndim - dim - 1)
zeros = ((0, 0) * dims_from_right)
return F.pad(t, (*zeros, *pad), value = value)
# bias-less layernorm
class LayerNorm(nn.Module):
def __init__(self, dim):
super().__init__()
self.gamma = nn.Parameter(torch.ones(dim))
self.register_buffer("beta", torch.zeros(dim))
def forward(self, x):
return F.layer_norm(x, x.shape[-1:], self.gamma, self.beta)
# sampling helpers
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1., dim = -1):
return ((t / max(temperature, 1e-10)) + gumbel_noise(t)).argmax(dim = dim)
def top_k(logits, thres = 0.9):
k = math.ceil((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
# rotary positional embedding w/ xpos
# https://arxiv.org/abs/2104.09864
# https://arxiv.org/abs/2212.10554v1
class RotaryEmbedding(nn.Module):
def __init__(
self,
dim,
width,
scale_base = 512,
theta = 10000
):
super().__init__()
self.width = width
inv_freq = 1.0 / (theta ** (torch.arange(0, dim, 2).float() / dim))
self.register_buffer("inv_freq", inv_freq, persistent = False)
self.scale_base = scale_base
scale = (torch.arange(0, dim, 2) + 0.4 * dim) / (1.4 * dim)
self.register_buffer('scale', scale, persistent = False)
self.register_buffer('cached_freqs', None, persistent = False)
self.register_buffer('cached_scales', None, persistent = False)
@property
def device(self):
return next(self.buffers()).device
def forward(self):
device, seq_len = self.device, self.width
if exists(self.cached_freqs):
cached_seq_len = self.cached_freqs.shape[-2]
if cached_seq_len >= seq_len:
return self.cached_freqs[:seq_len], self.cached_scales[:seq_len]
t = torch.arange(seq_len, device = device).type_as(self.inv_freq)
freqs = torch.einsum('i , j -> i j', t, self.inv_freq)
freqs = torch.cat((freqs, freqs), dim = -1)
power = (t - (seq_len // 2)) / self.scale_base
scale = self.scale ** rearrange(power, 'n -> n 1')
scale = torch.cat((scale, scale), dim = -1)
self.register_buffer('cached_freqs', freqs, persistent = False)
self.register_buffer('cached_scales', scale, persistent = False)
return freqs, scale
def rotate_half(x):
x1, x2 = x.chunk(2, dim=-1)
return torch.cat((-x2, x1), dim=-1)
def apply_rotary_pos_emb(t, pos, scale = 1.):
scale = default(scale, 1.)
seq_len = t.shape[-2]
assert pos.shape[-2] >= seq_len
pos = pos[-seq_len:]
if isinstance(scale, torch.Tensor):
assert scale.shape[-2] >= seq_len
scale = scale[-seq_len:]
return (t * pos.cos() * scale) + (rotate_half(t) * pos.sin() * scale)
# memory management
class MemoryManager(nn.Module):
def __init__(
self,
dim,
*,
layers = 1,
mem_lengths = 512,
compress_factors = 1
):
super().__init__()
mem_lengths = cast_tuple(mem_lengths)
compress_factors = cast_tuple(compress_factors)
assert all([mem_length > 0 for mem_length in mem_lengths])
assert len(mem_lengths) == len(compress_factors)
assert layers >= 1
self.mem_lengths = mem_lengths
self.compress_factors = compress_factors
self.layers = nn.ModuleList([])
for _ in range(layers):
compress_fns = nn.ModuleList([])
for compress_factor in compress_factors:
compress_fn = nn.Identity()
if compress_factor > 1:
compress_fn = nn.Sequential(
Rearrange('b n d -> b d n'),
nn.Conv1d(
dim * 2,
dim * 2,
compress_factor,
stride = compress_factor,
groups = 2
),
Rearrange('b d n -> b n d'),
)
compress_fns.append(compress_fn)
self.layers.append(compress_fns)
def forward(
self,
past_memories: List[torch.Tensor],
new_memories: List[torch.Tensor]
):
next_memories = []
for past_memory, new_memory, compress_fns in zip_longest(past_memories, new_memories, self.layers):
# edge case if neither memories exist
if not (exists(past_memory) or exists(new_memory)):
next_memories.append(None)
continue
next_memory = None
for mem_length, compress_factor, compress_fn in zip(self.mem_lengths, self.compress_factors, compress_fns):
# first get the memories for the given compression factor "current_memory"
current_memory = None
if exists(past_memory):
past_memory, current_memory = past_memory[..., :-mem_length, :], past_memory[..., -mem_length:, :]
# compress the new memories coming in, based on the compression factors set at init
if (not is_empty(new_memory)) and compress_factor > 1:
# make sure memory length is divisible by compression factor
new_mem_length = new_memory.shape[-2]
curtailed_length = (new_mem_length // compress_factor) * compress_factor
curtailed_slice = slice(-curtailed_length, None) if curtailed_length > 0 else slice(0, 0)
new_memory = new_memory[..., curtailed_slice, :]
# compress the memory pushed to the next stage
if new_memory.shape[-2] > 0:
new_memory = rearrange(new_memory, 'm b n d -> b n (m d)')
new_memory = compress_fn(new_memory)
new_memory = rearrange(new_memory, 'b n (m d) -> m b n d', m = 2)
# fifo memory queue
# add the new memory on the right
current_memory = safe_cat(current_memory, new_memory, dim = -2)
# "new" memory is new with respect to the next compressed segment
new_memory, current_memory = current_memory[..., :-mem_length, :], current_memory[..., -mem_length:, :]
# concat the new memory to the left into the past
next_memory = safe_cat(current_memory, next_memory, dim = -2)
next_memories.append(next_memory)
return next_memories
# maybe flash attention, if using pytorch 2.0
# constants
Config = namedtuple('EfficientAttentionConfig', ['enable_flash', 'enable_math', 'enable_mem_efficient'])
# state container
class StateContainer(nn.Module):
def __init__(
self,
dim,
*,
num_state_vectors,
dim_head = 64,
heads = 8,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False
):
super().__init__()
assert num_state_vectors > 0
self.heads = heads
inner_dim = dim_head * heads
self.state_norm = LayerNorm(dim)
self.q_to_state = nn.Linear(dim, inner_dim, bias = False)
self.q_from_state = nn.Linear(dim, inner_dim, bias = False)
self.state_to_q = nn.Linear(dim, inner_dim, bias = False)
self.state_to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.init_state = nn.Parameter(torch.randn(num_state_vectors, dim))
self.state_pos_ids = nn.Parameter(torch.randn(num_state_vectors, dim))
self.to_state_out = nn.Linear(inner_dim * 2, dim, bias = False)
self.to_state_cross_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.state_self_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.from_state_cross_attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
# gating related parameters - using the fixed simple config
self.state_out_to_gate = nn.Linear(dim, dim)
self.learned_ema_beta = nn.Parameter(torch.randn(dim))
# since each read should be followed by a write, just store cache in the container
self.cache = None
self.next_read_state = None
def set_next_read_state(
self,
states
):
if not exists(states):
states = self.init_state
self.next_read_state = (states,)
def read(self, x):
assert exists(self.next_read_state), 'states to be read must be set with .set_next_read_state'
states, = self.next_read_state
self.next_read_state = None
# pre norm state for attention
normed_states = self.state_norm(states)
# add the positional ids, as stated in the paper critical for it to work
normed_states = normed_states + self.state_pos_ids
# get queries for cross attention, which they do not share, although they share key / values. another intriguing detail
q_to_state = self.q_to_state(x)
q_to_state = rearrange(q_to_state, '... n (h d) -> ... h n d', h = self.heads)
# self attention qkv for states
state_k, state_v = self.state_to_kv(normed_states).chunk(2, dim = -1)
# cross attend to the past states key values
to_state_out = self.to_state_cross_attn(q_to_state, state_k, state_v)
to_state_out = rearrange(to_state_out, 'b h n d -> b n (h d)')
# cache for next write
self.cache = (states, normed_states, state_k, state_v)
return to_state_out
def write(
self,
*,
memories
):
assert exists(self.cache)
k, v = memories
batch = k.shape[0]
# get cached values from the previous read
states, normed_states, state_k, state_v = self.cache
self.cache = None
# derive queries
q_from_state = self.q_from_state(normed_states)
q_from_state = rearrange(q_from_state, '... n (h d) -> ... h n d', h = self.heads)
state_q = self.state_to_q(normed_states)
state_q_einsum = 'n (h d)' if state_q.ndim == 2 else 'b n (h d)'
state_q = repeat(state_q, f'{state_q_einsum} -> b h n d', h = self.heads, b = batch)
# states must also undergo self attention
if q_from_state.ndim == 3:
q_from_state = repeat(q_from_state, '... -> b ...', b = batch)
state_out = self.state_self_attn(state_q, state_k, state_v)
from_state_out = self.from_state_cross_attn(q_from_state, k, v)
state_out = torch.cat((state_out, from_state_out), dim = -1)
state_out = rearrange(state_out, 'b h n d -> b n (h d)')
state_out = self.to_state_out(state_out)
# use the best performing configuration
# fixed simple gate - nothing more than a learned EMA with some resemblance to highway networks
z = self.state_out_to_gate(state_out)
learned_ema_decay = self.learned_ema_beta.sigmoid()
# set new state with the learned EMA gating
return learned_ema_decay * z + (1 - learned_ema_decay) * states
def forward(self, x):
raise NotImplementedError
# main class
class Attend(nn.Module):
def __init__(
self,
causal = False,
use_flash_attn = False
):
super().__init__()
self.causal = causal
self.register_buffer("mask", None, persistent=False)
self.use_flash_attn = use_flash_attn
assert not (use_flash_attn and version.parse(torch.__version__) < version.parse('2.0.0')), 'in order to use flash attention, you must be using pytorch 2.0 or above'
# determine efficient attention configs for cuda and cpu
self.cpu_config = Config(True, True, True)
self.cuda_config = None
if not torch.cuda.is_available() or not use_flash_attn:
return
device_properties = torch.cuda.get_device_properties(torch.device('cuda'))
if device_properties.major == 8 and device_properties.minor == 0:
print_once('A100 GPU detected, using flash attention if input tensor is on cuda')
self.cuda_config = Config(True, False, False)
else:
print_once('Non-A100 GPU detected, using math or mem efficient attention if input tensor is on cuda')
self.cuda_config = Config(False, True, True)
def get_mask(self, n, device):
if exists(self.mask) and self.mask.shape[-1] >= n:
return self.mask[:n, :n]
mask = torch.ones((n, n), device=device, dtype=torch.bool).triu(1)
self.register_buffer("mask", mask, persistent=False)
return mask
def flash_attn(self, q, k, v, mask = None):
_, heads, q_len, _, k_len, is_cuda = *q.shape, k.shape[-2], q.is_cuda
# Recommended for multi-query single-key-value attention by Tri Dao
# kv shape torch.Size([1, 512, 64]) -> torch.Size([1, 8, 512, 64])
if k.ndim == 3:
k = repeat(k, 'b ... -> b h ...', h = q.shape[1])
if v.ndim == 3:
v = repeat(v, 'b ... -> b h ...', h = q.shape[1])
# Check if mask exists and expand to compatible shape
# The mask is B L, so it would have to be expanded to B H N L
masks = []
if self.causal:
i, j = q_len, k_len
causal_mask = torch.ones((i, j), dtype = torch.bool, device = q.device).triu(j - i + 1)
masks.append(~causal_mask)
if exists(mask):
if mask.ndim != 2:
mask = repeat(mask, 'w ... -> (b w) ...', b = q.shape[0] // mask.shape[0])
masks.append(mask)
attn_mask = and_reduce(masks)
# Check if there is a compatible device for flash attention
config = self.cuda_config if is_cuda else self.cpu_config
# pytorch 2.0 flash attn: q, k, v, mask, dropout, causal, softmax_scale
with torch.backends.cuda.sdp_kernel(**config._asdict()):
out = F.scaled_dot_product_attention(
q, k, v,
attn_mask = attn_mask
)
return out
def forward(self, q, k, v, mask = None, use_flash_attn = None):
use_flash_attn = default(use_flash_attn, self.use_flash_attn)
b, n, device = q.shape[0], q.shape[-2], q.device
q, ps = pack_one(q, '* h n d')
k, _ = pack_one(k, '* n d')
v, _ = pack_one(v, '* n d')
if use_flash_attn:
out = self.flash_attn(q, k, v, mask = mask)
return unpack_one(out, ps, '* h n d')
scale = q.shape[-1] ** -0.5
k_einsum = 'b j d' if k.ndim == 3 else 'b h j d'
v_einsum = 'b j d' if v.ndim == 3 else 'b h j d'
# similarity
sim = einsum(f"b h i d, {k_einsum} -> b h i j", q, k) * scale
# key padding mask
if exists(mask):
if mask.ndim != 2:
mask = repeat(mask, 'w ... -> (b w) ...', b = b)
sim = sim.masked_fill(~mask, -torch.finfo(sim.dtype).max)
# causal mask
if self.causal:
i, j = sim.shape[-2:]
causal_mask = torch.ones((i, j), dtype = torch.bool, device = q.device).triu(j - i + 1)
sim = sim.masked_fill(causal_mask, -torch.finfo(sim.dtype).max)
# attention
attn = sim.softmax(dim=-1)
# aggregate values
out = einsum(f"b h i j, {v_einsum} -> b h i d", attn, v)
return unpack_one(out, ps, '* h n d')
# geglu feedforward
class GEGLU(nn.Module):
def forward(self, x):
x, gate = x.chunk(2, dim = -1)
return F.gelu(gate) * x
def FeedForward(dim, mult = 4):
inner_dim = int(dim * mult * 2 / 3)
return nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, inner_dim * 2, bias = False),
GEGLU(),
nn.Linear(inner_dim, dim, bias = False)
)
# attention
class Attention(nn.Module):
def __init__(
self,
dim_head,
causal = False,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False
):
super().__init__()
self.causal = causal
self.qk_rmsnorm = qk_rmsnorm
self.qk_rmsnorm_scale = qk_rmsnorm_scale
self.attend = Attend(causal = causal, use_flash_attn = use_flash_attn)
if qk_rmsnorm:
self.q_scale = nn.Parameter(torch.ones(dim_head))
self.k_scale = nn.Parameter(torch.ones(dim_head))
def forward(
self,
q, k, v,
mask = None,
rotary_pos_emb = None,
xpos_scale = None
):
scale = q.shape[-1] ** -0.5
if self.qk_rmsnorm:
q, k = map(l2norm, (q, k))
scale = self.qk_rmsnorm_scale
if self.qk_rmsnorm:
q = q * self.q_scale
k = k * self.k_scale
# rotary positional embedding with xpos for length extrapolation
if exists(rotary_pos_emb):
q = apply_rotary_pos_emb(q, rotary_pos_emb, xpos_scale)
k = apply_rotary_pos_emb(k, rotary_pos_emb, xpos_scale ** -1)
# attention
out = self.attend(q, k, v, mask = mask)
return out
class AttentionBlock(nn.Module):
def __init__(
self,
dim,
block_width,
dim_head = 64,
heads = 8,
qk_rmsnorm = False,
qk_rmsnorm_scale = 8,
use_flash_attn = False,
num_state_vectors = 0,
num_external_state_reads = 0,
state_read_before_write = True # this will be defaulted to on as in the paper, but will be turned off in the case the researcher wants to test out reading the state at a lower layer
):
super().__init__()
inner_dim = dim_head * heads
self.heads = heads
self.norm = LayerNorm(dim)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, dim_head * 2, bias = False)
self.attn = Attention(dim_head, qk_rmsnorm = qk_rmsnorm, qk_rmsnorm_scale = qk_rmsnorm_scale, use_flash_attn = use_flash_attn)
self.block_width = block_width
self.is_recurrent_layer = num_state_vectors > 0
# decide how many states this attention layer is going to read from
num_state_reads = int(self.is_recurrent_layer and state_read_before_write) + num_external_state_reads
self.to_out = nn.Linear(inner_dim * (1 + num_state_reads), dim, bias = False)
if not self.is_recurrent_layer:
return
self.state_read_before_write = state_read_before_write
self.state_container = StateContainer(
dim,
dim_head = dim_head,
heads = heads,
num_state_vectors = num_state_vectors,
qk_rmsnorm = qk_rmsnorm,
qk_rmsnorm_scale = qk_rmsnorm_scale,
use_flash_attn = use_flash_attn
)
@property
def device(self):
return next(self.parameters()).device
def forward(
self,
x,
rotary_pos_emb = None,
xpos_scale = None,
attn_mask = None,
xl_memories: Optional[torch.Tensor] = None,
read_from_state_containers: List[StateContainer] = []
):
batch, seq_len, _, width, device = *x.shape, self.block_width, self.device
# pre normalization
x = self.norm(x)
# queries, keys, values and split out heads
q, k, v = (self.to_q(x), *self.to_kv(x).chunk(2, dim = -1))
split_head = partial(rearrange, pattern = 'b n (h d) -> b h n d', h = self.heads)
q = split_head(q)
# save the last key / values as memories for recurrence
memories = torch.stack((k, v))
mem_len = 0
if exists(xl_memories):
# if past memories are passed in, concat as the first bucket
mem_len = xl_memories.shape[-2]
past_k, past_v = xl_memories
k = torch.cat((past_k, k), dim = 1)
v = torch.cat((past_v, v), dim = 1)
# handle cropping of attention mask and positional embeddings
if exists(attn_mask):
attn_mask = attn_mask[:seq_len, :seq_len]
attn_mask = F.pad(attn_mask, (mem_len, 0), value = True)
# attention, but of course
out = self.attn(
q, k, v,
rotary_pos_emb = rotary_pos_emb,
xpos_scale = xpos_scale,
mask = attn_mask
)
# merge heads
out = rearrange(out, 'b h n d -> b n (h d)')
# early return if not a recurrent layer
if not self.is_recurrent_layer and len(read_from_state_containers) == 0:
return self.to_out(out), memories, None
# whether to read from own state container, default to on, but may pass in more
if self.is_recurrent_layer and self.state_read_before_write:
read_from_state_containers = [self.state_container, *read_from_state_containers]
for read_state_container in read_from_state_containers:
# read from the states ...
to_state_out = read_state_container.read(x)
# and concat it to the output of self-attention
out = torch.cat((out, to_state_out), dim = -1)
new_states = None
if self.is_recurrent_layer:
# then write to the states as well if need be
new_states = self.state_container.write(memories = memories)
return self.to_out(out), memories, new_states
# classes
@beartype
class BlockRecurrentTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
dim_head = 64,
heads = 8,
all_layers_qk_rmsnorm = False,
ff_mult = 4,
max_seq_len = 1024,
block_width = 512,
recurrent_layers: Optional[Tuple[int, ...]] = None,
read_recurrent_layers: Optional[Tuple[int, ...]] = None,
num_state_vectors = None,
ignore_index = -100,
use_flash_attn = False,
use_compressed_mem = False,
compressed_mem_factor = 4
):
super().__init__()
num_state_vectors = default(num_state_vectors, block_width)
# set recurrent layers
recurrent_layers = default(recurrent_layers, (depth // 2,)) # default to one recurent layer at middle of the network
assert all([0 < layer <= depth for layer in recurrent_layers]), f'recurrent layers must range from 1 to the depth {depth}'
assert all_unique(recurrent_layers), 'recurrent layers must be all unique. no duplicate layers'
self.recurrent_layers = recurrent_layers
# set read recurrent layers
read_recurrent_layers = default(read_recurrent_layers, recurrent_layers)
assert all([read_layer <= write_layer for read_layer, write_layer in zip(read_recurrent_layers, recurrent_layers)]), 'the recurrent read layer must be always less than or equal to the write layer'
assert all([0 < layer <= depth for layer in read_recurrent_layers])
assert len(read_recurrent_layers) == len(recurrent_layers)
self.read_recurrent_layers = read_recurrent_layers
# token embedding
self.token_emb = nn.Embedding(num_tokens, dim)
self.rotary_pos_emb = RotaryEmbedding(dim = dim_head, width = (2 if not use_compressed_mem else 3) * block_width)
self.layers = nn.ModuleList([])
self.write_to_read_map = {write_layer: read_layer for write_layer, read_layer in zip(recurrent_layers, read_recurrent_layers)}
self.read_state_router = defaultdict(list)
for layer in range(1, depth + 1):
is_recurrent_layer = layer in self.recurrent_layers
layer_num_state_vectors = num_state_vectors if is_recurrent_layer else 0
num_external_state_reads = sum([int(layer == read_layer) for read_layer in read_recurrent_layers])
# only layers with xl memories
# or has recurrence in horizontal direction
# use qk rmsnorm (in paper, they use cosine sim attention, but i think qk rmsnorm is more proven given Vit 22B paper)
# one can also override to use all qk rmsnorm by setting all_layers_qk_rmsnorm = True
qk_rmsnorm = all_layers_qk_rmsnorm or is_recurrent_layer
attn_block = AttentionBlock(
dim,
block_width = block_width,
dim_head = dim_head,
heads = heads,
qk_rmsnorm = qk_rmsnorm,
num_state_vectors = layer_num_state_vectors,
use_flash_attn = use_flash_attn,
num_external_state_reads = num_external_state_reads,
state_read_before_write = False,
)
ff_block = FeedForward(dim, mult = ff_mult)
if is_recurrent_layer:
read_layer = self.write_to_read_map[layer]
self.read_state_router[read_layer].append(attn_block.state_container)
self.layers.append(nn.ModuleList([
attn_block,
ff_block
]))
# (compressed) memory management
self.mem_manager = MemoryManager(
dim = dim_head,
layers = depth,
mem_lengths = block_width if not use_compressed_mem else (block_width, block_width // 2),
compress_factors = 1 if not use_compressed_mem else (1, compressed_mem_factor)
)
# to logits
self.to_logits = nn.Sequential(
LayerNorm(dim),
nn.Linear(dim, num_tokens, bias = False)
)
self.max_seq_len = max_seq_len
self.block_width = block_width
assert divisible_by(max_seq_len, block_width)
self.ignore_index = ignore_index
self.register_buffer('cached_causal_attn_mask', None, persistent = False)
@property
def device(self):
return next(self.parameters()).device
def get_causal_attn_mask(self, width):
if exists(self.cached_causal_attn_mask):
cached_mask = self.cached_causal_attn_mask
cached_width = cached_mask.shape[-2]
padding = (width - cached_width) // 2
j_slice = Ellipsis if padding == 0 else slice(padding, -padding)
return cached_mask[:cached_width, j_slice]
device = self.device
causal_mask = torch.ones((width, width), device = device, dtype = torch.bool).triu(1)
return ~causal_mask
@torch.no_grad()
@eval_decorator
def generate(
self,
prime,
length = None,
xl_memories: List[torch.Tensor] = [],
states: List[torch.Tensor] = [],
temperature = 1.,
filter_thres = 0.9,
return_memories_and_states = False
):
length = default(length, self.max_seq_len + 1)
start_len = prime.shape[-1]
assert start_len < self.max_seq_len
assert length <= (self.max_seq_len + 1)
assert start_len < length
output = prime
memories = []
for ind in range(length - start_len):
logits, next_memories, next_states = self.forward(
output,
xl_memories = xl_memories,
states = states
)
logits = logits[:, -1]
filtered_logits = top_k(logits, thres = filter_thres)
sampled = gumbel_sample(filtered_logits, temperature = temperature)
sampled = rearrange(sampled, 'b -> b 1')
output = torch.cat((output, sampled), dim = -1)
if divisible_by(output.shape[-1] - 1, self.max_seq_len): # on the sampling of the last token in the current window, set new memories and states
memories = next_memories
states = next_states
output = output[:, start_len:]
if return_memories_and_states:
return output, memories, states
return output
def forward(
self,
x,
return_loss = False,
xl_memories: List[torch.Tensor] = [],
states: List[torch.Tensor] = [],
return_memories_and_states = None # can force to either return memory + state or not. by default will only return when number of tokens == max_seq_len
):
device = x.device
if return_loss:
x, labels = x[:, :-1], x[:, 1:]
# get sequence length i and j for dynamic pos bias
assert x.shape[-1] <= self.max_seq_len
w = self.block_width
# token embedding
x = self.token_emb(x)
# dynamic pos bias
attn_mask = self.get_causal_attn_mask(w)
rotary_pos_emb, xpos_scale = self.rotary_pos_emb()
# only return memories and state if at the full block width, but can be overridden
return_memories_and_states = default(return_memories_and_states, self.max_seq_len == x.shape[-2])
# ready output tensor, to be concatted to block by block
batch, _, dim = x.shape
out = torch.empty(batch, 0, dim, dtype = x.dtype, device = self.device)
# split input into blocks of width w
input_blocks = x.split(w, dim = -2)
# process each block at a time
for input_block in input_blocks:
input_block_length = input_block.shape[-2]
# ready xl memories and states
iter_xl_memories = iter(xl_memories)
iter_states = iter(states)
next_xl_memories = []
next_states = []
# set the states on the appropriate state containers
for attn, _ in self.layers:
if not attn.is_recurrent_layer:
continue
attn.state_container.set_next_read_state(next(iter_states, None))
# go through layers
for ind, (attn, ff) in enumerate(self.layers):
# determine if the layer requires transformer xl memories
layer = ind + 1
# whether to pass in xl memories
attn_kwargs = dict(
rotary_pos_emb = rotary_pos_emb,
xpos_scale = xpos_scale,
attn_mask = attn_mask,
xl_memories = next(iter_xl_memories, None),
read_from_state_containers = self.read_state_router[layer]
)
# attention layer
residual = input_block
attn_branch_out, layer_xl_memories, layer_next_states = attn(input_block, **attn_kwargs)
if exists(layer_xl_memories):
next_xl_memories.append(layer_xl_memories)
if exists(layer_next_states):
next_states.append(layer_next_states)
input_block = attn_branch_out + residual
# feedforward layer
input_block = ff(input_block) + input_block
# concat to output
out = torch.cat((out, input_block), dim = -2)
# set new xl memories and states
states = next_states
if input_block_length == w:
xl_memories = self.mem_manager(xl_memories, next_xl_memories)
# project to logits
logits = self.to_logits(out)
# detach the states and memories
returned_next_states = list(map(torch.detach, states)) if return_memories_and_states else None
returned_next_xl_memories = list(map(torch.detach, xl_memories)) if return_memories_and_states else None
# whether to return logits
if not return_loss:
return logits, returned_next_xl_memories, returned_next_states
# cross entropy loss
logits = rearrange(logits, 'b n c -> b c n')
loss = F.cross_entropy(logits, labels, ignore_index = self.ignore_index)
return loss, returned_next_xl_memories, returned_next_states
# recurrent trainer wrapper
@beartype
class RecurrentTrainerWrapper(nn.Module):
def __init__(
self,
transformer: BlockRecurrentTransformer,
xl_memories_dropout = 0.,
state_dropout = 0.
):
super().__init__()
self.transformer = transformer
self.seq_len = transformer.max_seq_len
self.xl_memories_dropout = xl_memories_dropout
self.state_dropout = state_dropout
@eval_decorator
@torch.no_grad()
def generate(
self,
prime,
length,
**kwargs
):
seq_len = self.seq_len
start_len = prime.shape[-1]
assert start_len < length
output = prime
current_len = start_len
memories = []
states = []
# determine lengths
has_remainder = not divisible_by(length, seq_len)
remainder_amount = length % seq_len
total_segments = math.ceil(length / seq_len)
if not has_remainder:
lengths = (*((seq_len + 1,) * (total_segments - 1)), seq_len)
elif remainder_amount == 1:
lengths = (seq_len + 1,) * (total_segments - 1)
else:
lengths = (*((seq_len + 1,) * (total_segments - 1)), remainder_amount)
# loop through lengths
for next_length in lengths:
segment_output, memories, states = self.transformer.generate(
output[:, -current_len:],
length = next_length,
xl_memories = memories,
states = states,
return_memories_and_states = True,
**kwargs
)
output = torch.cat((output, segment_output), dim = -1)
current_len = 1
return output[:, start_len:]
def forward(
self,
x,
return_memories_and_states = False
):
total_seq_len, seq_len = x.shape[1], self.seq_len
assert divisible_by(total_seq_len - 1, seq_len), f'length of sequence ({total_seq_len}) must be equal to a multiple of {seq_len} + 1 (one extra token) during training'
segments = total_seq_len // seq_len
total_loss = 0.
memories = []
states = []
for ind in range(segments):
start = ind * seq_len
end = start + seq_len + 1
if self.training and random() < self.xl_memories_dropout:
memories.clear()
if self.training and random() < self.state_dropout:
states.clear()
loss, memories, states = self.transformer(
x[:, start:end],
xl_memories = memories,
states = states,
return_loss = True
)
total_loss = total_loss + (loss / segments)
if return_memories_and_states:
return total_loss, memories, states
return total_loss
|
block-recurrent-transformer-pytorch-main
|
block_recurrent_transformer_pytorch/block_recurrent_transformer_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'long-short-transformer',
packages = find_packages(),
version = '0.0.5',
license='MIT',
description = 'Long Short Transformer - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/long-short-transformer',
keywords = [
'artificial intelligence',
'deep learning',
'transformers',
'efficient attention'
],
install_requires=[
'einops>=0.3',
'rotary-embedding-torch',
'torch>=1.6'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
long-short-transformer-main
|
setup.py
|
from long_short_transformer import LongShortTransformer
from long_short_transformer.autoregressive_wrapper import AutoregressiveWrapper
import random
import tqdm
import gzip
import numpy as np
import torch
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
# constants
NUM_BATCHES = int(1e6)
BATCH_SIZE = 4
GRADIENT_ACCUMULATE_EVERY = 4
LEARNING_RATE = 3e-4
VALIDATE_EVERY = 100
GENERATE_EVERY = 500
GENERATE_LENGTH = 512
SEQ_LEN = 1024
# helpers
def cycle(loader):
while True:
for data in loader:
yield data
def decode_token(token):
return str(chr(max(32, token)))
def decode_tokens(tokens):
return ''.join(list(map(decode_token, tokens)))
# instantiate GPT-like decoder model
model = LongShortTransformer(
num_tokens = 256,
dim = 512,
depth = 8,
heads = 8,
max_seq_len = SEQ_LEN,
causal = True,
window_size = 128
)
model = AutoregressiveWrapper(model)
model.cuda()
# prepare enwik8 data
with gzip.open('./data/enwik8.gz') as file:
data = np.fromstring(file.read(int(95e6)), dtype = np.uint8)
data_train, data_val = map(torch.from_numpy, np.split(data, [int(90e6)]))
class TextSamplerDataset(Dataset):
def __init__(self, data, seq_len):
super().__init__()
self.data = data
self.seq_len = seq_len
def __getitem__(self, index):
rand_start = torch.randint(0, self.data.size(0) - self.seq_len - 1, (1,))
full_seq = self.data[rand_start: rand_start + self.seq_len + 1].long()
return full_seq.cuda()
def __len__(self):
return self.data.size(0) // self.seq_len
train_dataset = TextSamplerDataset(data_train, SEQ_LEN)
val_dataset = TextSamplerDataset(data_val, SEQ_LEN)
train_loader = cycle(DataLoader(train_dataset, batch_size = BATCH_SIZE))
val_loader = cycle(DataLoader(val_dataset, batch_size = BATCH_SIZE))
# optimizer
optim = torch.optim.Adam(model.parameters(), lr = LEARNING_RATE)
# training
for i in tqdm.tqdm(range(NUM_BATCHES), mininterval = 10., desc = 'training'):
model.train()
for __ in range(GRADIENT_ACCUMULATE_EVERY):
loss = model(next(train_loader))
loss.backward()
print(f'training loss: {loss.item()}')
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
optim.step()
optim.zero_grad()
if i % VALIDATE_EVERY == 0:
model.eval()
with torch.no_grad():
loss = model(next(val_loader))
print(f'validation loss: {loss.item()}')
if i % GENERATE_EVERY == 0:
model.eval()
inp = random.choice(val_dataset)[:-1]
prime = decode_tokens(inp)
print(f'%s \n\n %s', (prime, '*' * 100))
sample = model.generate(inp, GENERATE_LENGTH)
output_str = decode_tokens(sample)
print(output_str)
|
long-short-transformer-main
|
train.py
|
from math import gcd, ceil
import functools
import torch
from torch import nn, einsum
import torch.nn.functional as F
from rotary_embedding_torch import RotaryEmbedding, apply_rotary_emb
from einops import rearrange, repeat
# helpers
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def lcm(*numbers):
return int(functools.reduce(lambda x, y: int((x * y) / gcd(x, y)), numbers, 1))
def pad_to_multiple(tensor, multiple, dim = -1, value = 0):
seqlen = tensor.shape[dim]
m = seqlen / multiple
if m.is_integer():
return tensor
remainder = ceil(m) * multiple - seqlen
pad_offset = (0,) * (-1 - dim) * 2
return F.pad(tensor, (*pad_offset, 0, remainder), value=value)
def look_around(x, backward = 1, forward = 0, pad_value = -1, dim = 2):
t = x.shape[1]
dims = (len(x.shape) - dim) * (0, 0)
padded_x = F.pad(x, (*dims, backward, forward), value= pad_value)
tensors = [padded_x[:, ind:(ind + t), ...] for ind in range(forward + backward + 1)]
return torch.cat(tensors, dim=dim)
# classes
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.fn = fn
self.norm = nn.LayerNorm(dim)
def forward(self, x, **kwargs):
x = self.norm(x)
return self.fn(x, **kwargs)
class FeedForward(nn.Module):
def __init__(self, dim, mult = 4, dropout = 0.):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return self.net(x)
class LongShortAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
causal = True,
window_size = 128,
pos_emb = None,
segment_size = 16,
r = 1,
dropout = 0.
):
super().__init__()
assert not (causal and r >= segment_size), 'r should be less than segment size, if autoregressive'
inner_dim = heads * dim_head
self.scale = dim_head ** -0.5
self.heads = heads
self.causal = causal
self.window_size = window_size
self.segment_size = segment_size
self.pad_to_multiple = window_size if not causal else lcm(window_size, segment_size)
self.to_dynamic_proj = nn.Linear(dim_head, r, bias = False)
self.local_norm = nn.LayerNorm(dim_head)
self.global_norm = nn.LayerNorm(dim_head)
self.pos_emb = default(pos_emb, RotaryEmbedding(dim_head))
self.attn_dropout = nn.Dropout(dropout)
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(dim, inner_dim, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
def forward(self, x, mask = None):
b, n, *_, h, device, causal, w, s = *x.shape, self.heads, x.device, self.causal, self.window_size, self.segment_size
# pad input sequence to multiples of window size (or window size and segment length if causal)
x = pad_to_multiple(x, self.pad_to_multiple, dim = -2, value = 0.)
# derive from variables
padded_len = x.shape[-2]
windows = padded_len // w
is_padded = padded_len != n
mask_value = -torch.finfo(x.dtype).max
# handle mask if padding was needed and mask was not given
if is_padded:
mask = default(mask, torch.ones((b, n), device = device).bool())
mask = pad_to_multiple(mask, w, dim = -1, value = False)
# get queries, keys, values
qkv = (self.to_q(x), self.to_kv(x))
# get sequence range, for calculating mask
seq_range = torch.arange(padded_len, device = device)
# split heads
q, kv = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h = h), qkv)
# rotary embedding
if exists(self.pos_emb):
rotary_emb = self.pos_emb(seq_range, cache_key = padded_len)
rotary_emb = rearrange(rotary_emb, 'n d -> () n d')
q, kv = map(lambda t: apply_rotary_emb(rotary_emb, t), (q, kv))
# scale queries
q = q * self.scale
# get local queries and keys similarity scores
window_fn = lambda t: rearrange(t, 'b (w n) d -> b w n d', n = w)
lq, lkv = map(window_fn, (q, kv))
lookaround_kwargs = {'backward': 1, 'forward': (0 if causal else 1)}
lkv = look_around(lkv, **lookaround_kwargs)
lkv = self.local_norm(lkv)
lsim = einsum('b w i d, b w j d -> b w i j', lq, lkv)
# prepare global key / values
if self.causal:
# autoregressive global attention is handled in segments
# later on, these segments are carefully masked to prevent leakage
gkv = rearrange(kv, 'b (n s) d -> b n s d', s = s)
pkv = self.to_dynamic_proj(gkv)
if exists(mask):
pmask = repeat(mask, 'b (n s) -> (b h) n s', s = s, h = h)
pkv.masked_fill_(~pmask[..., None], mask_value)
pkv = pkv.softmax(dim = -2)
gkv = einsum('b n s d, b n s r -> b n r d', gkv, pkv)
gkv = rearrange(gkv, 'b n r d -> b (n r) d')
else:
# equation (3) in the paper
pkv = self.to_dynamic_proj(kv)
if exists(mask):
pkv.masked_fill_(~mask[..., None], mask_value)
pkv = pkv.softmax(dim = -2)
gkv = einsum('b n d, b n r -> b r d', kv, pkv)
# calculate global queries and keys similarity scores
gkv = self.global_norm(gkv)
gsim = einsum('b n d, b r d -> b n r', q, gkv)
# concat values together (same as keys)
gkv = repeat(gkv, 'b r d -> b w r d', w = windows)
v = torch.cat((gkv, lkv), dim = -2)
# masking
buckets, i, j = lsim.shape[-3:]
if exists(mask):
mask = repeat(mask, 'b (w n) -> (b h) w n', n = w, h = h)
mask = look_around(mask, pad_value = False, **lookaround_kwargs)
mask = rearrange(mask, 'b w n -> b w () n')
lsim.masked_fill_(~mask, mask_value)
# mask out padding
seq_range_windowed = rearrange(seq_range, '(w n) -> () w n', w = windows)
pad_mask = look_around(seq_range_windowed, pad_value = -1, **lookaround_kwargs) == -1
lsim.masked_fill_(pad_mask[:, :, None], mask_value)
# calculate causal masking for both global and local
if self.causal:
g_range = rearrange(seq_range, '(n s) -> n s', s = s)
g_range_max = g_range.amax(dim = -1)
g_mask = seq_range[:, None] >= g_range_max[None, :]
g_mask = rearrange(g_mask, 'i j -> () i j')
gsim.masked_fill_(~g_mask, mask_value)
causal_mask = torch.ones(i, j, device = device).triu_(j - i + 1).bool()
causal_mask = repeat(causal_mask, 'i j -> () u i j', u = buckets)
lsim.masked_fill_(causal_mask, mask_value)
# concat local and global similarities together to ready for attention
gsim = rearrange(gsim, 'b (w n) r -> b w n r', w = windows)
sim = torch.cat((gsim, lsim), dim = -1)
# attention
attn = sim.softmax(dim = -1)
attn = self.attn_dropout(attn)
# aggregate values (same as keys, since tied) and project out
out = einsum('b w i j, b w j d -> b w i d', attn, v)
out = rearrange(out, '(b h) w n d -> b (w n) (h d)', h = h)
out = out[:, :n]
return self.to_out(out)
# main class
class LongShortTransformer(nn.Module):
def __init__(
self,
*,
num_tokens,
dim,
depth,
max_seq_len,
window_size = 128,
causal = True,
dim_head = 64,
heads = 8,
ff_mult = 4,
segment_size = None,
r = None,
ff_dropout = 0.,
attn_dropout = 0.
):
super().__init__()
self.max_seq_len = max_seq_len
self.token_emb = nn.Embedding(num_tokens, dim)
pos_emb = RotaryEmbedding(dim_head)
# handle autoregressive default variables differently
# specifically, segments are only used for autoregressive case
# r is the projected r << n in the non-autoregressive case, and the projected r per segment for the autoregressive case
# yea, it is confusing, i know
segment_size = default(segment_size, 16 if causal else None)
r = default(r, 1 if causal else 128)
self.layers = nn.ModuleList([])
for _ in range(depth):
self.layers.append(nn.ModuleList([
PreNorm(dim, LongShortAttention(dim = dim, heads = heads, dim_head = dim_head, window_size = window_size, causal = causal, pos_emb = pos_emb, segment_size = segment_size, r = r, dropout = attn_dropout)),
PreNorm(dim, FeedForward(dim = dim, mult = ff_mult, dropout = ff_dropout))
]))
self.to_logits = nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, num_tokens)
)
def forward(self, x, mask = None):
x = self.token_emb(x)
for attn, ff in self.layers:
x = attn(x, mask = mask) + x
x = ff(x) + x
return self.to_logits(x)
|
long-short-transformer-main
|
long_short_transformer/long_short_transformer.py
|
import torch
from torch import nn
import torch.nn.functional as F
# helper function
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# top k filtering
def top_k(logits, thres = 0.9):
k = int((1 - thres) * logits.shape[-1])
val, ind = torch.topk(logits, k)
probs = torch.full_like(logits, float('-inf'))
probs.scatter_(1, ind, val)
return probs
class AutoregressiveWrapper(nn.Module):
def __init__(self, net, ignore_index = -100, pad_value = 0):
super().__init__()
self.pad_value = pad_value
self.ignore_index = ignore_index
self.net = net
self.max_seq_len = net.max_seq_len
@torch.no_grad()
@eval_decorator
def generate(self, start_tokens, seq_len, eos_token = None, temperature = 1., filter_logits_fn = top_k, filter_thres = 0.9, **kwargs):
device = start_tokens.device
num_dims = len(start_tokens.shape)
if num_dims == 1:
start_tokens = start_tokens[None, :]
b, t = start_tokens.shape
out = start_tokens
mask = kwargs.pop('mask', None)
if mask is None:
mask = torch.full_like(out, True, dtype=torch.bool, device=out.device)
for _ in range(seq_len):
x = out[:, -self.max_seq_len:]
mask = mask[:, -self.max_seq_len:]
logits = self.net(x, mask=mask, **kwargs)[:, -1, :]
filtered_logits = top_k(logits, thres = filter_thres)
probs = F.softmax(filtered_logits / temperature, dim=-1)
sample = torch.multinomial(probs, 1)
out = torch.cat((out, sample), dim=-1)
mask = F.pad(mask, (0, 1), value=True)
if eos_token is not None and (sample == eos_token).all():
break
out = out[:, t:]
if num_dims == 1:
out = out.squeeze(0)
return out
def forward(self, x, **kwargs):
xi = x[:, :-1]
xo = x[:, 1:]
# help auto-solve a frequent area of confusion around input masks in auto-regressive
# if user supplies a mask that is only off by one from the source sequence, resolve it for them
mask = kwargs.get('mask', None)
if mask is not None and mask.shape[1] == x.shape[1]:
mask = mask[:, :-1]
kwargs.update(mask = mask)
out = self.net(xi, **kwargs)
loss = F.cross_entropy(out.transpose(1, 2), xo, ignore_index = self.ignore_index)
return loss
|
long-short-transformer-main
|
long_short_transformer/autoregressive_wrapper.py
|
from long_short_transformer.long_short_transformer import LongShortTransformer, LongShortAttention
|
long-short-transformer-main
|
long_short_transformer/__init__.py
|
from setuptools import setup, find_packages
setup(
name = 'scattering-transform',
packages = find_packages(),
version = '0.0.7',
license='MIT',
description = 'Scattering Transform module from the paper Scattering Compositional Learner',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/scattering-compositional-learner',
keywords = ['artificial intelligence', 'deep learning', 'reasoning'],
install_requires=[
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
scattering-compositional-learner-master
|
setup.py
|
from scattering_transform.scattering_transform import SCL, ScatteringTransform, SCLTrainingWrapper
|
scattering-compositional-learner-master
|
scattering_transform/__init__.py
|
import torch
from torch import nn
import torch.nn.functional as F
# helper functions
def default(val, default_val):
return val if val is not None else default_val
def expand_dim(t, dim, k):
t = t.unsqueeze(dim)
expand_shape = [-1] * len(t.shape)
expand_shape[dim] = k
return t.expand(*expand_shape)
# simple MLP with ReLU activation
class MLP(nn.Module):
def __init__(self, *dims, activation = None):
super().__init__()
assert len(dims) > 2, 'must have at least 3 dimensions, for dimension in and dimension out'
activation = default(activation, nn.ReLU)
layers = []
pairs = list(zip(dims[:-1], dims[1:]))
for ind, (dim_in, dim_out) in enumerate(pairs):
is_last = ind >= (len(pairs) - 1)
layers.append(nn.Linear(dim_in, dim_out))
if not is_last:
layers.append(activation())
self.net = nn.Sequential(*layers)
def forward(self, x):
return self.net(x)
# the feedforward residual block mentioned in the paper
# used after extracting the visual features, as well as post-extraction of attribute information
class FeedForwardResidual(nn.Module):
def __init__(self, dim, mult = 4):
super().__init__()
self.net = nn.Sequential(
nn.Linear(dim, dim * mult),
nn.LayerNorm(dim * mult),
nn.ReLU(inplace = True),
nn.Linear(dim * mult, dim)
)
def forward(self, x):
return x + self.net(x)
# convolutional net
# todo, make customizable and add Evonorm for batch independent normalization
class ConvNet(nn.Module):
def __init__(self, image_size, chans, output_dim):
super().__init__()
num_conv_layers = len(chans) - 1
conv_output_size = image_size // (2 ** num_conv_layers)
convolutions = []
channel_pairs = list(zip(chans[:-1], chans[1:]))
for ind, (chan_in, chan_out) in enumerate(channel_pairs):
is_last = ind >= (len(channel_pairs) - 1)
convolutions.append(nn.Conv2d(chan_in, chan_out, 3, padding=1, stride=2))
if not is_last:
convolutions.append(nn.BatchNorm2d(chan_out))
self.net = nn.Sequential(
*convolutions,
nn.Flatten(1),
nn.Linear(chans[-1] * (conv_output_size ** 2), output_dim),
nn.ReLU(inplace=True),
FeedForwardResidual(output_dim)
)
def forward(self, x):
return self.net(x)
# scattering transform
class ScatteringTransform(nn.Module):
def __init__(self, dims, heads, activation = None):
super().__init__()
assert len(dims) > 2, 'must have at least 3 dimensions, for dimension in, the hidden dimension, and dimension out'
dim_in, *hidden_sizes, dim_out = dims
dim_in //= heads
dim_out //= heads
self.heads = heads
self.mlp = MLP(dim_in, *hidden_sizes, dim_out, activation = activation)
def forward(self, x):
shape, heads = x.shape, self.heads
dim = shape[-1]
assert (dim % heads) == 0, f'the dimension {dim} must be divisible by the number of heads {heads}'
x = x.reshape(-1, heads, dim // heads)
x = self.mlp(x)
return x.reshape(shape)
# main scattering compositional learner class
class SCL(nn.Module):
def __init__(
self,
image_size = 160,
set_size = 9,
conv_channels = [1, 16, 16, 32, 32, 32],
conv_output_dim = 80,
attr_heads = 10,
attr_net_hidden_dims = [128],
rel_heads = 80,
rel_net_hidden_dims = [64, 23, 5]):
super().__init__()
self.vision = ConvNet(image_size, conv_channels, conv_output_dim)
self.attr_heads = attr_heads
self.attr_net = ScatteringTransform([conv_output_dim, *attr_net_hidden_dims, conv_output_dim], heads = attr_heads)
self.ff_residual = FeedForwardResidual(conv_output_dim)
self.rel_heads = rel_heads
self.rel_net = MLP(set_size * (conv_output_dim // rel_heads), *rel_net_hidden_dims)
self.to_logit = nn.Linear(rel_net_hidden_dims[-1] * rel_heads, 1)
def forward(self, sets):
b, m, n, c, h, w = sets.shape
images = sets.view(-1, c, h, w)
features = self.vision(images)
attrs = self.attr_net(features)
attrs = self.ff_residual(attrs)
attrs = attrs.reshape(b, m, n, self.rel_heads, -1).transpose(-2, -3).flatten(3)
rels = self.rel_net(attrs)
rels = rels.flatten(2)
logits = self.to_logit(rels).flatten(1)
return logits
# wrapper for easier training
class SCLTrainingWrapper(nn.Module):
def __init__(self, scl):
super().__init__()
self.scl = scl
def forward(self, questions, answers):
answers = answers.unsqueeze(2)
questions = expand_dim(questions, dim=1, k=8)
permutations = torch.cat((questions, answers), dim=2)
return self.scl(permutations)
|
scattering-compositional-learner-master
|
scattering_transform/scattering_transform.py
|
from dotenv import load_dotenv
# set path to cache in .env and unset the next comment
# load_dotenv()
from enformer_pytorch import Enformer
from tf_bind_transformer import AdapterModel, Trainer
# instantiate enformer or load pretrained
enformer = Enformer.from_hparams(
dim = 768,
depth = 4,
heads = 8,
target_length = -1,
use_convnext = True,
num_downsamples = 6 # resolution of 2 ^ 6 == 64bp
)
# instantiate model wrapper that takes in enformer
model = AdapterModel(
enformer = enformer,
use_aa_embeds = True,
use_free_text_context = True,
free_text_embed_method = 'mean_pool',
binary_target = True,
target_mse_loss = False,
use_squeeze_excite = True,
aa_embed_encoder = 'protalbert'
).cuda()
# training constants
BATCH_SIZE = 2
GRAD_ACCUM_STEPS = 8
# effective batch size of BATCH_SIZE * GRAD_ACCUM_STEPS = 16
VALIDATE_EVERY = 250
GRAD_CLIP_MAX_NORM = 1.5
REMAP_FILE_PATH = './remap2022_all.bed'
TFACTOR_FOLDER = './tfactor.fastas'
FASTA_FILE_PATH = './hg38.ml.fa'
NON_PEAK_PATH = './generated-non-peaks.bed'
CONTEXT_LENGTH = 4096
SCOPED_NEGS_REMAP_PATH = './neg-npy/remap2022.bed'
SCOPED_NEGS_PATH = './neg-npy'
TRAIN_CHROMOSOMES = [*range(1, 24, 2), 'X'] # train on odd chromosomes
VALID_CHROMOSOMES = [*range(2, 24, 2)] # validate on even
HELD_OUT_TARGET = ['AFF4']
# trainer class for fine-tuning
trainer = Trainer(
model,
context_length = CONTEXT_LENGTH,
batch_size = BATCH_SIZE,
validate_every = VALIDATE_EVERY,
grad_clip_norm = GRAD_CLIP_MAX_NORM,
grad_accum_every = GRAD_ACCUM_STEPS,
remap_bed_file = REMAP_FILE_PATH,
negative_bed_file = NON_PEAK_PATH,
factor_fasta_folder = TFACTOR_FOLDER,
fasta_file = FASTA_FILE_PATH,
train_chromosome_ids = TRAIN_CHROMOSOMES,
valid_chromosome_ids = VALID_CHROMOSOMES,
held_out_targets = HELD_OUT_TARGET,
include_scoped_negs = True,
scoped_negs_remap_bed_path = SCOPED_NEGS_REMAP_PATH,
scoped_negs_path = SCOPED_NEGS_PATH,
)
# do gradient steps in a while loop
while True:
_ = trainer(finetune_enformer_ln_only = False)
|
tf-bind-transformer-main
|
finetune_binary_pred.py
|
import click
from tqdm import tqdm
from pathlib import Path
from Bio import SeqIO
from tf_bind_transformer.protein_utils import get_protein_embedder
@click.command()
@click.option('--model-name', default = 'protalbert', help = 'Protein model name')
@click.option('--fasta-folder', help = 'Path to factor fastas', required = True)
def cache_embeddings(
model_name,
fasta_folder
):
fn = get_protein_embedder(model_name)['fn']
fastas = [*Path(fasta_folder).glob('**/*.fasta')]
assert len(fastas) > 0, f'no fasta files found at {fasta_folder}'
for fasta in tqdm(fastas):
seq = SeqIO.read(fasta, 'fasta')
seq_str = str(seq.seq)
fn([seq_str], device = 'cpu')
if __name__ == '__main__':
cache_embeddings()
|
tf-bind-transformer-main
|
precache_proteins.py
|
from setuptools import setup, find_packages
setup(
name = 'tf-bind-transformer',
packages = find_packages(exclude=[]),
version = '0.0.118',
license='MIT',
description = 'Transformer for Transcription Factor Binding',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/tf-bind-transformer',
long_description_content_type = 'text/markdown',
keywords = [
'artificial intelligence',
'deep learning',
'attention mechanism',
'transformers',
'transcription factors',
'gene expression'
],
install_requires=[
'bidirectional-cross-attention',
'biopython',
'click',
'einops>=0.3',
'enformer-pytorch>=0.5',
'fair-esm',
'logavgexp-pytorch',
'polars',
'python-dotenv',
'sentencepiece',
'torch>=1.6',
'transformers>=4.0',
'tqdm'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
tf-bind-transformer-main
|
setup.py
|
from dotenv import load_dotenv
# set path to cache in .env and unset the next comment
# load_dotenv()
from enformer_pytorch import Enformer
from tf_bind_transformer import AdapterModel, BigWigTrainer
# training constants
BATCH_SIZE = 1
GRAD_ACCUM_STEPS = 8
LEARNING_RATE = 1e-4 # Deepmind used 1e-4 for fine-tuning of Enformer
# effective batch size of BATCH_SIZE * GRAD_ACCUM_STEPS = 16
VALIDATE_EVERY = 250
GRAD_CLIP_MAX_NORM = 1.5
TFACTOR_FOLDER = './tfactor.fastas'
HUMAN_FASTA_FILE_PATH = './hg38.ml.fa'
MOUSE_FASTA_FILE_PATH = './mm10.ml.fa'
HUMAN_LOCI_PATH = './chip_atlas/human_sequences.bed'
MOUSE_LOCI_PATH = './chip_atlas/mouse_sequences.bed'
BIGWIG_PATH = './chip_atlas/bigwig'
BIGWIG_TRACKS_ONLY_PATH = './chip_atlas/bigwig_tracks_only'
ANNOT_FILE_PATH = './chip_atlas/annot.tab'
TARGET_LENGTH = 896
HELD_OUT_TARGET = ['GATA2']
# instantiate enformer or load pretrained
enformer = Enformer.from_pretrained('EleutherAI/enformer-official-rough', target_length = TARGET_LENGTH)
# instantiate model wrapper that takes in enformer
model = AdapterModel(
enformer = enformer,
use_aa_embeds = True,
use_free_text_context = True,
free_text_embed_method = 'mean_pool',
aa_embed_encoder = 'esm',
finetune_output_heads = dict(
human = 12,
mouse = 24
)
).cuda()
# trainer class for fine-tuning
trainer = BigWigTrainer(
model,
human_loci_path = HUMAN_LOCI_PATH,
mouse_loci_path = MOUSE_LOCI_PATH,
human_fasta_file = HUMAN_FASTA_FILE_PATH,
mouse_fasta_file = MOUSE_FASTA_FILE_PATH,
bigwig_folder_path = BIGWIG_PATH,
bigwig_tracks_only_folder_path = BIGWIG_TRACKS_ONLY_PATH,
annot_file_path = ANNOT_FILE_PATH,
target_length = TARGET_LENGTH,
lr = LEARNING_RATE,
batch_size = BATCH_SIZE,
shuffle = True,
validate_every = VALIDATE_EVERY,
grad_clip_norm = GRAD_CLIP_MAX_NORM,
grad_accum_every = GRAD_ACCUM_STEPS,
human_factor_fasta_folder = TFACTOR_FOLDER,
mouse_factor_fasta_folder = TFACTOR_FOLDER,
held_out_targets = HELD_OUT_TARGET
)
# do gradient steps in a while loop
while True:
_ = trainer()
|
tf-bind-transformer-main
|
finetune_track.py
|
import torch
from torch import nn
from tf_bind_transformer.optimizer import get_optimizer
from tf_bind_transformer.data_bigwig import BigWigDataset, BigWigTracksOnlyDataset, get_bigwig_dataloader, get_bigwig_tracks_dataloader
from enformer_pytorch.modeling_enformer import poisson_loss, pearson_corr_coef
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# helpers for logging and accumulating values across gradient steps
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# simple Trainer class
class BigWigTrainer(nn.Module):
def __init__(
self,
model,
*,
human_factor_fasta_folder,
annot_file_path,
human_loci_path,
mouse_loci_path,
human_fasta_file,
mouse_fasta_file,
batch_size,
bigwig_tracks_only_folder_path = None,
bigwig_folder_path = None,
train_chromosome_ids = None,
valid_chromosome_ids = None,
mouse_factor_fasta_folder = None,
downsample_factor = 128,
target_length = 896,
lr = 3e-4,
wd = 0.1,
validate_every = 250,
grad_clip_norm = None,
grad_accum_every = 1,
held_out_targets_human = [],
held_out_targets_mouse = [],
held_out_cell_types_human = [],
held_out_cell_types_mouse = [],
context_length = 4096,
shuffle = False,
shift_aug_range = (-2, 2),
rc_aug = False,
checkpoint_filename = './checkpoint.pt',
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
include_biotypes_metadata_columns = ['germ_layer', 'cellline_cat'],
biotypes_metadata_delimiter = ' | ',
bigwig_reduction_type = 'sum',
enformer_train_valid_split = True
):
super().__init__()
assert exists(bigwig_folder_path) or exists(bigwig_tracks_only_folder_path)
self.model = model
mouse_factor_fasta_folder = default(mouse_factor_fasta_folder, human_factor_fasta_folder)
self.human_ds = BigWigDataset(
filter_chromosome_ids = train_chromosome_ids,
factor_fasta_folder = human_factor_fasta_folder,
bigwig_folder = bigwig_folder_path,
enformer_loci_path = human_loci_path,
annot_file = annot_file_path,
fasta_file = human_fasta_file,
exclude_targets = held_out_targets_human,
exclude_cell_types = held_out_cell_types_human,
target_length = target_length,
context_length = context_length,
downsample_factor = downsample_factor,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
bigwig_reduction_type = bigwig_reduction_type,
filter_sequences_by = ('column_4', 'train') if enformer_train_valid_split else None,
only_ref = ['hg38'],
factor_species_priority = ['human', 'mouse', 'unknown']
)
self.valid_human_ds = BigWigDataset(
filter_chromosome_ids = valid_chromosome_ids,
factor_fasta_folder = human_factor_fasta_folder,
bigwig_folder = bigwig_folder_path,
enformer_loci_path = human_loci_path,
annot_file = annot_file_path,
fasta_file = human_fasta_file,
include_targets = held_out_targets_human,
include_cell_types = held_out_cell_types_human,
target_length = target_length,
context_length = context_length,
downsample_factor = downsample_factor,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
bigwig_reduction_type = bigwig_reduction_type,
filter_sequences_by = ('column_4', 'valid') if enformer_train_valid_split else None,
only_ref = ['hg38'],
factor_species_priority = ['human', 'mouse', 'unknown']
)
self.mouse_ds = BigWigDataset(
filter_chromosome_ids = train_chromosome_ids,
factor_fasta_folder = mouse_factor_fasta_folder,
bigwig_folder = bigwig_folder_path,
enformer_loci_path = mouse_loci_path,
annot_file = annot_file_path,
fasta_file = mouse_fasta_file,
exclude_targets = held_out_targets_mouse,
exclude_cell_types = held_out_cell_types_mouse,
target_length = target_length,
context_length = context_length,
downsample_factor = downsample_factor,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
bigwig_reduction_type = bigwig_reduction_type,
filter_sequences_by = ('column_4', 'train') if enformer_train_valid_split else None,
only_ref = ['mm10'],
factor_species_priority = ['mouse', 'human', 'unknown']
)
self.valid_mouse_ds = BigWigDataset(
filter_chromosome_ids = valid_chromosome_ids,
factor_fasta_folder = mouse_factor_fasta_folder,
bigwig_folder = bigwig_folder_path,
enformer_loci_path = mouse_loci_path,
annot_file = annot_file_path,
fasta_file = mouse_fasta_file,
include_targets = held_out_targets_mouse,
include_cell_types = held_out_cell_types_mouse,
target_length = target_length,
context_length = context_length,
downsample_factor = downsample_factor,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
bigwig_reduction_type = bigwig_reduction_type,
filter_sequences_by = ('column_4', 'valid') if enformer_train_valid_split else None,
only_ref = ['mm10'],
factor_species_priority = ['mouse', 'human', 'unknown']
)
self.human_head_ds = BigWigTracksOnlyDataset(
ref = 'hg38',
bigwig_folder = bigwig_tracks_only_folder_path,
enformer_loci_path = human_loci_path,
fasta_file = human_fasta_file,
annot_file = annot_file_path,
downsample_factor = downsample_factor,
target_length = target_length,
filter_sequences_by = ('column_4', 'train')
)
self.valid_human_head_ds = BigWigTracksOnlyDataset(
ref = 'hg38',
bigwig_folder = bigwig_tracks_only_folder_path,
enformer_loci_path = human_loci_path,
fasta_file = human_fasta_file,
annot_file = annot_file_path,
downsample_factor = downsample_factor,
target_length = target_length,
filter_sequences_by = ('column_4', 'valid')
)
self.mouse_head_ds = BigWigTracksOnlyDataset(
ref = 'mm10',
bigwig_folder = bigwig_tracks_only_folder_path,
enformer_loci_path = mouse_loci_path,
fasta_file = mouse_fasta_file,
annot_file = annot_file_path,
downsample_factor = downsample_factor,
target_length = target_length,
filter_sequences_by = ('column_4', 'train')
)
self.valid_mouse_head_ds = BigWigTracksOnlyDataset(
ref = 'mm10',
bigwig_folder = bigwig_tracks_only_folder_path,
enformer_loci_path = mouse_loci_path,
fasta_file = mouse_fasta_file,
annot_file = annot_file_path,
downsample_factor = downsample_factor,
target_length = target_length,
filter_sequences_by = ('column_4', 'valid')
)
len_train_human = len(self.human_ds)
len_train_mouse = len(self.mouse_ds)
len_valid_human = len(self.valid_human_ds)
len_valid_mouse = len(self.valid_mouse_ds)
len_train_human_head = len(self.human_head_ds)
len_valid_human_head = len(self.valid_human_head_ds)
len_train_mouse_head = len(self.mouse_head_ds)
len_valid_mouse_head = len(self.valid_mouse_head_ds)
self.has_train = len_train_human > 0 or len_train_mouse > 0 or len_train_human_head > 0 or len_train_mouse_head > 0
self.has_valid = len_valid_human > 0 or len_valid_mouse > 0 or len_valid_human_head > 0 or len_valid_mouse_head > 0
if self.has_train:
print(f'training with {self.human_ds.ntargets} human targets and {self.mouse_ds.ntargets} mice targets')
print(f'training independent tracks with {self.human_head_ds.ntargets} human targets and {self.mouse_head_ds.ntargets} mouse targets')
if self.has_valid:
print(f'validating with {self.valid_human_ds.ntargets} human targets and {self.valid_mouse_ds.ntargets} mice targets')
print(f'validating independent tracks with {self.valid_human_head_ds.ntargets} human targets and {self.valid_mouse_head_ds.ntargets} mouse targets')
assert self.has_train and self.has_valid, 'must have training and validation samples in order to proceed'
self.train_human_dl = get_bigwig_dataloader(self.human_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_train_human > 0 else None
self.train_mouse_dl = get_bigwig_dataloader(self.mouse_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_train_mouse > 0 else None
self.valid_human_dl = get_bigwig_dataloader(self.valid_human_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_valid_human > 0 else None
self.valid_mouse_dl = get_bigwig_dataloader(self.valid_mouse_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_valid_mouse > 0 else None
# dataloader for independent tracks without
self.train_human_head_dl = get_bigwig_tracks_dataloader(self.human_head_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_train_human_head > 0 else None
self.train_mouse_head_dl = get_bigwig_tracks_dataloader(self.mouse_head_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_train_mouse_head > 0 else None
self.valid_human_head_dl = get_bigwig_tracks_dataloader(self.valid_human_head_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_valid_human_head > 0 else None
self.valid_mouse_head_dl = get_bigwig_tracks_dataloader(self.valid_mouse_head_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size) if len_valid_mouse_head > 0 else None
# optimizer
self.optim = get_optimizer(model.parameters(), lr = lr, wd = wd)
self.grad_accum_every = grad_accum_every
self.grad_clip_norm = grad_clip_norm
self.validate_every = validate_every
self.register_buffer('steps', torch.Tensor([0.]))
self.checkpoint_filename = checkpoint_filename
def forward(
self,
finetune_enformer_ln_only = True,
**kwargs
):
grad_accum_every = self.grad_accum_every
curr_step = int(self.steps.item())
self.model.train()
log = {}
loss_divisor = int(exists(self.train_human_dl)) + int(exists(self.train_mouse_dl)) + int(exists(self.train_human_head_dl)) + int(exists(self.train_mouse_head_dl))
if exists(self.train_human_dl):
for _ in range(grad_accum_every):
seq, tf_aa, contextual_texts, target = next(self.train_human_dl)
seq, target = seq.cuda(), target.cuda()
loss = self.model(
seq,
aa = tf_aa,
contextual_free_text = contextual_texts,
target = target,
finetune_enformer_ln_only = finetune_enformer_ln_only,
**kwargs
)
log = accum_log(log, {'human_loss': loss.item() / grad_accum_every})
(loss / self.grad_accum_every / loss_divisor).backward()
print(f'{curr_step} human loss: {log["human_loss"]}')
if exists(self.train_mouse_dl):
for _ in range(grad_accum_every):
seq, tf_aa, contextual_texts, target = next(self.train_mouse_dl)
seq, target = seq.cuda(), target.cuda()
loss = self.model(
seq,
aa = tf_aa,
contextual_free_text = contextual_texts,
target = target,
finetune_enformer_ln_only = finetune_enformer_ln_only,
**kwargs
)
log = accum_log(log, {'mouse_loss': loss.item() / grad_accum_every})
(loss / self.grad_accum_every / loss_divisor).backward()
print(f'{curr_step} mouse loss: {log["mouse_loss"]}')
if exists(self.train_human_head_dl):
for _ in range(grad_accum_every):
seq, target = next(self.train_human_head_dl)
seq, target = seq.cuda(), target.cuda()
loss = self.model(
seq,
target = target,
head = 'human',
finetune_enformer_ln_only = finetune_enformer_ln_only,
**kwargs
)
log = accum_log(log, {'human_head_loss': loss.item() / grad_accum_every})
(loss / self.grad_accum_every / loss_divisor).backward()
print(f'{curr_step} human head loss: {log["human_head_loss"]}')
if exists(self.train_mouse_head_dl):
for _ in range(grad_accum_every):
seq, target = next(self.train_mouse_head_dl)
seq, target = seq.cuda(), target.cuda()
loss = self.model(
seq,
target = target,
head = 'mouse',
finetune_enformer_ln_only = finetune_enformer_ln_only,
**kwargs
)
log = accum_log(log, {'mouse_head_loss': loss.item() / grad_accum_every})
(loss / self.grad_accum_every / loss_divisor).backward()
print(f'{curr_step} mouse head loss: {log["mouse_head_loss"]}')
# gradient clipping
if exists(self.grad_clip_norm):
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip_norm)
# take a gradient step
self.optim.step()
self.optim.zero_grad()
# validation
if (curr_step % self.validate_every) == 0:
self.model.eval()
if exists(self.valid_human_dl):
for _ in range(grad_accum_every):
seq, tf_aa, contextual_texts, target = next(self.valid_human_dl)
seq, target = seq.cuda(), target.cuda()
pred = self.model(
seq,
aa = tf_aa,
contextual_free_text = contextual_texts,
)
valid_loss = self.model.loss_fn(pred, target)
valid_corr_coef = pearson_corr_coef(pred, target)
log = accum_log(log, {
'human_valid_loss': valid_loss.item() / grad_accum_every,
'human_valid_corr_coef': valid_corr_coef.item() / grad_accum_every
})
print(f'{curr_step} human valid loss: {log["human_valid_loss"]}')
print(f'{curr_step} human valid pearson R: {log["human_valid_corr_coef"]}')
if exists(self.valid_mouse_dl):
for _ in range(grad_accum_every):
seq, tf_aa, contextual_texts, target = next(self.valid_mouse_dl)
seq, target = seq.cuda(), target.cuda()
pred = self.model(
seq,
aa = tf_aa,
contextual_free_text = contextual_texts,
)
valid_loss = self.model.loss_fn(pred, target)
valid_corr_coef = pearson_corr_coef(pred, target)
log = accum_log(log, {
'mouse_valid_loss': valid_loss.item() / grad_accum_every,
'mouse_valid_corr_coef': valid_corr_coef.item() / grad_accum_every
})
print(f'{curr_step} mouse valid loss: {log["mouse_valid_loss"]}')
print(f'{curr_step} mouse valid pearson R: {log["mouse_valid_corr_coef"]}')
if exists(self.valid_human_head_dl):
for _ in range(grad_accum_every):
seq, target = next(self.valid_human_head_dl)
seq, target = seq.cuda(), target.cuda()
pred = self.model(seq, head = 'human')
valid_loss = self.model.loss_fn(pred, target)
valid_corr_coef = pearson_corr_coef(pred, target).mean()
log = accum_log(log, {
'human_head_valid_loss': valid_loss.item() / grad_accum_every,
'human_head_valid_corr_coef': valid_corr_coef.item() / grad_accum_every
})
print(f'{curr_step} human head valid loss: {log["human_head_valid_loss"]}')
print(f'{curr_step} human head valid pearson R: {log["human_head_valid_corr_coef"]}')
if exists(self.valid_mouse_head_dl):
for _ in range(grad_accum_every):
seq, target = next(self.valid_mouse_head_dl)
seq, target = seq.cuda(), target.cuda()
pred = self.model(seq, head = 'mouse')
valid_loss = self.model.loss_fn(pred, target)
valid_corr_coef = pearson_corr_coef(pred, target).mean()
log = accum_log(log, {
'mouse_head_valid_loss': valid_loss.item() / grad_accum_every,
'mouse_head_valid_corr_coef': valid_corr_coef.item() / grad_accum_every
})
print(f'{curr_step} mouse head valid loss: {log["mouse_head_valid_loss"]}')
print(f'{curr_step} mouse head valid pearson R: {log["mouse_head_valid_corr_coef"]}')
if curr_step > 0:
torch.save(self.model.state_dict(), self.checkpoint_filename)
self.steps += 1
return log
|
tf-bind-transformer-main
|
tf_bind_transformer/training_utils_bigwig.py
|
import torch
from torch import nn
from einops import rearrange
from torch import einsum
from bidirectional_cross_attention import BidirectionalCrossAttention
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# classes
def FeedForward(dim, mult = 4, dropout = 0.):
return nn.Sequential(
nn.LayerNorm(dim),
nn.Linear(dim, dim * mult),
nn.GELU(),
nn.Dropout(dropout),
nn.Linear(dim * mult, dim)
)
# self attention
class SelfAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_qkv = nn.Linear(dim, inner_dim * 3, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
mask = None,
):
h = self.heads
x = self.norm(x)
q, k, v = self.to_qkv(x).chunk(3, dim = -1)
q = q * self.scale
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(mask):
mask_value = -torch.finfo(sim.dtype).max
mask = rearrange(mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class SelfAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
dropout = 0.,
ff_mult = 4,
**kwargs
):
super().__init__()
self.attn = SelfAttention(dim = dim, dropout = dropout, **kwargs)
self.ff = FeedForward(dim = dim, mult = ff_mult, dropout = dropout)
def forward(self, x, mask = None):
x = self.attn(x, mask = mask) + x
x = self.ff(x) + x
return x
# directional cross attention
class CrossAttention(nn.Module):
def __init__(
self,
*,
dim,
heads = 8,
dim_head = 64,
context_dim = None,
dropout = 0.
):
super().__init__()
context_dim = default(context_dim, dim)
self.norm = nn.LayerNorm(dim)
self.context_norm = nn.LayerNorm(context_dim)
self.heads = heads
self.scale = dim_head ** -0.5
inner_dim = dim_head * heads
self.to_q = nn.Linear(dim, inner_dim, bias = False)
self.to_kv = nn.Linear(context_dim, inner_dim * 2, bias = False)
self.to_out = nn.Linear(inner_dim, dim)
self.dropout = nn.Dropout(dropout)
def forward(
self,
x,
context,
mask = None,
context_mask = None
):
h = self.heads
x = self.norm(x)
context = self.context_norm(context)
q, k, v = (self.to_q(x), *self.to_kv(context).chunk(2, dim = -1))
q = q * self.scale
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b h n d', h = h), (q, k, v))
sim = einsum('b h i d, b h j d -> b h i j', q, k)
if exists(context_mask):
mask_value = -torch.finfo(sim.dtype).max
context_mask = rearrange(context_mask, 'b j -> b 1 1 j')
sim = sim.masked_fill(~context_mask, mask_value)
attn = sim.softmax(dim = -1)
attn = self.dropout(attn)
out = einsum('b h i j, b h j d -> b h i d', attn, v)
out = rearrange(out, 'b h n d -> b n (h d)')
return self.to_out(out)
class JointCrossAttentionBlock(nn.Module):
def __init__(
self,
*,
dim,
context_dim = None,
ff_mult = 4,
dropout = 0.,
**kwargs
):
super().__init__()
context_dim = default(context_dim, dim)
self.attn = BidirectionalCrossAttention(dim = dim, context_dim = context_dim, dropout = dropout, prenorm = True, **kwargs)
self.ff = FeedForward(dim, mult = ff_mult, dropout = dropout)
self.context_ff = FeedForward(context_dim, mult = ff_mult, dropout = dropout)
def forward(
self,
x,
context,
mask = None,
context_mask = None
):
attn_out, context_attn_out = self.attn(x, context, mask = mask, context_mask = context_mask)
x = x + attn_out
context = context + context_attn_out
x = self.ff(x) + x
context = self.context_ff(context) + context
return x, context
|
tf-bind-transformer-main
|
tf_bind_transformer/attention.py
|
# for fetching transcription factor sequences
GENE_IDENTIFIER_MAP = {
'RXR': 'RXRA'
}
NAMES_WITH_HYPHENS = {
'NKX3-1',
'NKX2-1',
'NKX2-5',
'SS18-SSX'
}
def parse_gene_name(name):
if '-' not in name or name in NAMES_WITH_HYPHENS:
name = GENE_IDENTIFIER_MAP.get(name, name)
if '_' in name:
# for now, if target with modification
# just search for the target factor name to the left of the underscore
name, *_ = name.split('_')
return (name,)
first, *rest = name.split('-')
parsed_rest = []
for name in rest:
if len(name) == 1:
name = f'{first[:-1]}{name}'
parsed_rest.append(name)
return tuple([first, *parsed_rest])
|
tf-bind-transformer-main
|
tf_bind_transformer/gene_utils.py
|
import torch
import os
import logging
from transformers import AutoTokenizer, AutoModelForMaskedLM, logging
from tf_bind_transformer.cache_utils import cache_fn, run_once
logging.set_verbosity_error()
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
CONTEXT_EMBED_USE_CPU = os.getenv('CONTEXT_EMBED_USE_CPU', None) is not None
if CONTEXT_EMBED_USE_CPU:
print('calculating context embed only on cpu')
MODELS = dict(
pubmed = dict(
dim = 768,
path = 'microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract',
)
)
GLOBAL_VARIABLES = dict(model = None, tokenizer = None)
def get_contextual_dim(model_name):
assert model_name in MODELS
return MODELS[model_name]['dim']
@run_once('init_transformer')
def init_transformer(model_name):
path = MODELS[model_name]['path']
GLOBAL_VARIABLES['tokenizer'] = AutoTokenizer.from_pretrained(path)
model = AutoModelForMaskedLM.from_pretrained(path)
if not CONTEXT_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
@torch.no_grad()
def tokenize_text(
text,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True
):
init_transformer(model_name)
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[text],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not CONTEXT_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
if return_cls_token:
return hidden_state[0]
return hidden_state.mean(dim = 0)
def get_text_repr(
texts,
*,
device,
max_length = 256,
model_name = 'pubmed',
hidden_state_index = -1,
return_cls_token = True,
):
assert model_name in MODELS, f'{model_name} not found in available text transformers to use'
if isinstance(texts, str):
texts = [texts]
get_context_repr_fn = cache_fn(tokenize_text, path = f'contexts/{model_name}')
representations = [get_context_repr_fn(text, max_length = max_length, model_name = model_name, hidden_state_index = hidden_state_index, return_cls_token = return_cls_token) for text in texts]
return torch.stack(representations).to(device)
|
tf-bind-transformer-main
|
tf_bind_transformer/context_utils.py
|
from tf_bind_transformer.tf_bind_transformer import AdapterModel
from tf_bind_transformer.training_utils import Trainer
from tf_bind_transformer.training_utils_bigwig import BigWigTrainer
|
tf-bind-transformer-main
|
tf_bind_transformer/__init__.py
|
import torch
import os
import re
from pathlib import Path
from functools import partial
import esm
from torch.nn.utils.rnn import pad_sequence
from transformers import AlbertTokenizer, AutoModelForMaskedLM, logging
from tf_bind_transformer.cache_utils import cache_fn, run_once, md5_hash_fn
def exists(val):
return val is not None
def map_values(fn, dictionary):
return {k: fn(v) for k, v in dictionary.items()}
def to_device(t, *, device):
return t.to(device)
def cast_tuple(t):
return (t,) if not isinstance(t, tuple) else t
PROTEIN_EMBED_USE_CPU = os.getenv('PROTEIN_EMBED_USE_CPU', None) is not None
if PROTEIN_EMBED_USE_CPU:
print('calculating protein embed only on cpu')
# global variables
GLOBAL_VARIABLES = {
'model': None,
'tokenizer': None
}
# general helper functions
def calc_protein_representations_with_subunits(proteins, get_repr_fn, *, device):
representations = []
for subunits in proteins:
subunits = cast_tuple(subunits)
subunits_representations = list(map(get_repr_fn, subunits))
subunits_representations = list(map(partial(to_device, device = device), subunits_representations))
subunits_representations = torch.cat(subunits_representations, dim = 0)
representations.append(subunits_representations)
lengths = [seq_repr.shape[0] for seq_repr in representations]
masks = torch.arange(max(lengths), device = device)[None, :] < torch.tensor(lengths, device = device)[:, None]
padded_representations = pad_sequence(representations, batch_first = True)
return padded_representations.to(device), masks.to(device)
# esm related functions
ESM_MAX_LENGTH = 1024
ESM_EMBED_DIM = 1280
INT_TO_AA_STR_MAP = {
0: 'A',
1: 'C',
2: 'D',
3: 'E',
4: 'F',
5: 'G',
6: 'H',
7: 'I',
8: 'K',
9: 'L',
10: 'M',
11: 'N',
12: 'P',
13: 'Q',
14: 'R',
15: 'S',
16: 'T',
17: 'V',
18: 'W',
19: 'Y',
20: '_'
}
def tensor_to_aa_str(t):
str_seqs = []
for int_seq in t.unbind(dim = 0):
str_seq = list(map(lambda t: INT_TO_AA_STR_MAP[t] if t != 20 else '', int_seq.tolist()))
str_seqs.append(''.join(str_seq))
return str_seqs
@run_once('init_esm')
def init_esm():
model, alphabet = esm.pretrained.esm1b_t33_650M_UR50S()
batch_converter = alphabet.get_batch_converter()
if not PROTEIN_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = (model, batch_converter)
def get_single_esm_repr(protein_str):
init_esm()
model, batch_converter = GLOBAL_VARIABLES['model']
data = [('protein', protein_str)]
batch_labels, batch_strs, batch_tokens = batch_converter(data)
if batch_tokens.shape[1] > ESM_MAX_LENGTH:
print(f'warning max length protein esm: {protein_str}')
batch_tokens = batch_tokens[:, :ESM_MAX_LENGTH]
if not PROTEIN_EMBED_USE_CPU:
batch_tokens = batch_tokens.cuda()
with torch.no_grad():
results = model(batch_tokens, repr_layers=[33])
token_representations = results['representations'][33]
representation = token_representations[0][1 : len(protein_str) + 1]
return representation
def get_esm_repr(proteins, device):
if isinstance(proteins, torch.Tensor):
proteins = tensor_to_aa_str(proteins)
get_protein_repr_fn = cache_fn(get_single_esm_repr, path = 'esm/proteins')
return calc_protein_representations_with_subunits(proteins, get_protein_repr_fn, device = device)
# prot-albert 2048 context length
PROT_ALBERT_PATH = 'Rostlab/prot_albert'
PROT_ALBERT_DIM = 4096
PROT_ALBERT_MAX_LENGTH = 2048
def protein_str_with_spaces(protein_str):
protein_str = re.sub(r"[UZOB]", 'X', protein_str)
return ' '.join([*protein_str])
@run_once('init_prot_albert')
def init_prot_albert():
GLOBAL_VARIABLES['tokenizer'] = AlbertTokenizer.from_pretrained(PROT_ALBERT_PATH, do_lower_case = False)
model = AutoModelForMaskedLM.from_pretrained(PROT_ALBERT_PATH)
if not PROTEIN_EMBED_USE_CPU:
model = model.cuda()
GLOBAL_VARIABLES['model'] = model
def get_single_prot_albert_repr(
protein_str,
max_length = PROT_ALBERT_MAX_LENGTH,
hidden_state_index = -1
):
init_prot_albert()
model = GLOBAL_VARIABLES['model']
tokenizer = GLOBAL_VARIABLES['tokenizer']
encoding = tokenizer.batch_encode_plus(
[protein_str_with_spaces(protein_str)],
add_special_tokens = True,
padding = True,
truncation = True,
max_length = max_length,
return_attention_mask = True,
return_tensors = 'pt'
)
if not PROTEIN_EMBED_USE_CPU:
encoding = map_values(lambda t: t.cuda(), encoding)
model.eval()
with torch.no_grad():
outputs = model(**encoding, output_hidden_states = True)
hidden_state = outputs.hidden_states[hidden_state_index][0]
return hidden_state
def get_prot_albert_repr(
proteins,
device,
max_length = PROT_ALBERT_MAX_LENGTH,
hidden_state_index = -1
):
if isinstance(proteins, str):
proteins = [proteins]
if isinstance(proteins, torch.Tensor):
proteins = tensor_to_aa_str(proteins)
get_protein_repr_fn = cache_fn(get_single_prot_albert_repr, path = f'proteins/prot_albert')
return calc_protein_representations_with_subunits(proteins, get_protein_repr_fn, device = device)
# alphafold2 functions
AF2_MAX_LENGTH = 2500
AF2_EMBEDDING_DIM = 384
AF2_DIRECTORY = os.getenv('TF_BIND_AF2_DIRECTORY', os.path.expanduser('~/.cache.tf.bind.transformer/.af2_embeddings'))
AF2_DIRECTORY_PATH = Path(AF2_DIRECTORY)
def get_single_alphafold2_repr(
protein_str,
max_length = AF2_MAX_LENGTH,
):
md5 = md5_hash_fn(protein_str)
embedding_path = AF2_DIRECTORY_PATH / f'{md5}.pt'
assert embedding_path.exists(), f'af2 embedding not found for {protein_str}'
tensor = torch.load(str(embedding_path))
return tensor[:max_length]
def get_alphafold2_repr(
proteins,
device,
max_length = AF2_MAX_LENGTH,
**kwargs
):
representations = []
for subunits in proteins:
subunits = cast_tuple(subunits)
subunits = list(map(lambda t: get_single_alphafold2_repr(t, max_length = max_length), subunits))
subunits = torch.cat(subunits, dim = 0)
representations.append(subunits)
lengths = [seq_repr.shape[0] for seq_repr in representations]
masks = torch.arange(max(lengths), device = device)[None, :] < torch.tensor(lengths, device = device)[:, None]
padded_representations = pad_sequence(representations, batch_first = True)
return padded_representations.to(device), masks.to(device)
# factory functions
PROTEIN_REPR_CONFIG = {
'esm': {
'dim': ESM_EMBED_DIM,
'fn': get_esm_repr
},
'protalbert': {
'dim': PROT_ALBERT_DIM,
'fn': get_prot_albert_repr
},
'alphafold2': {
'dim': AF2_EMBEDDING_DIM,
'fn': get_alphafold2_repr
}
}
def get_protein_embedder(name):
allowed_protein_embedders = list(PROTEIN_REPR_CONFIG.keys())
assert name in allowed_protein_embedders, f"must be one of {', '.join(allowed_protein_embedders)}"
config = PROTEIN_REPR_CONFIG[name]
return config
|
tf-bind-transformer-main
|
tf_bind_transformer/protein_utils.py
|
import copy
import math
import torch
import torch.nn.functional as F
from torch import nn, einsum
from functools import wraps
from einops import rearrange, reduce, repeat
from einops.layers.torch import Rearrange, Reduce
from contextlib import contextmanager
from enformer_pytorch import Enformer
from enformer_pytorch.modeling_enformer import poisson_loss, pearson_corr_coef
from enformer_pytorch.finetune import freeze_batchnorms_, freeze_all_but_layernorms_, unfreeze_last_n_layers_, unfreeze_all_layers_
from logavgexp_pytorch import logavgexp
from tf_bind_transformer.cache_utils import cache_fn
from tf_bind_transformer.protein_utils import get_protein_embedder
from tf_bind_transformer.context_utils import get_text_repr, get_contextual_dim
from tf_bind_transformer.attention import FeedForward, JointCrossAttentionBlock, CrossAttention, SelfAttentionBlock
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def identity(fn, *args, **kwargs):
return fn
@contextmanager
def null_context():
yield
# tensor helpers
def l2norm(t):
return F.normalize(t, dim = -1)
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def fourier_encode(x, dims, theta = 20000):
device, dtype = x.device, x.dtype
emb = math.log(theta) / (dims // 2)
emb = torch.exp(torch.arange(dims // 2, device = device) * -emb)
emb = rearrange(x, 'n -> n 1') * rearrange(emb, 'd -> 1 d')
emb = torch.cat((emb.sin(), emb.cos()), dim = -1)
return emb
def corr_coef_loss(pred, target):
return 1 - pearson_corr_coef(pred, target).mean()
# genetic sequence caching enformer forward decorator
def cache_enformer_forward(fn):
cached_forward = cache_fn(fn, clear = True, path = 'genetic')
@wraps(fn)
def inner(seqs, *args, **kwargs):
if seqs.ndim == 3:
seqs = seqs.argmax(dim = -1)
seq_list = seqs.unbind(dim = 0)
seq_cache_keys = [''.join(list(map(str, one_seq.tolist()))) for one_seq in seq_list]
outputs = [cached_forward(one_seq, *args, __cache_key = seq_cache_key, **kwargs) for one_seq, seq_cache_key in zip(seq_list, seq_cache_keys)]
return torch.stack(outputs)
return inner
# model
class FiLM(nn.Module):
def __init__(
self,
dim,
conditioned_dim
):
super().__init__()
self.to_gamma = nn.Linear(dim, conditioned_dim)
self.to_bias = nn.Linear(dim, conditioned_dim)
def forward(self, x, condition, mask = None):
gamma = self.to_gamma(condition)
bias = self.to_bias(condition)
x = x * rearrange(gamma, 'b d -> b 1 d')
x = x + rearrange(bias, 'b d -> b 1 d')
return x
class SqueezeExcitation(nn.Module):
def __init__(
self,
dim,
conditioned_dim,
eps = 1e-8
):
super().__init__()
self.eps = eps
self.to_gate = nn.Linear(dim + conditioned_dim, conditioned_dim)
def forward(self, x, condition, mask = None):
if exists(mask):
numer = x.masked_fill(mask[..., None], 0.).sum(dim = 1)
denom = mask.sum(dim = 1)[..., None].clamp(min = self.eps)
mean_x = numer / denom
else:
mean_x = x.mean(dim = 1)
condition = torch.cat((condition, mean_x), dim = -1)
gate = self.to_gate(condition)
x = x * rearrange(gate, 'b d -> b 1 d').sigmoid()
return x
# read value MLP for calculating auxiliary loss
class ReadValueMLP(nn.Module):
def __init__(
self,
dim,
*,
fourier_dims = 256,
norm_factor_fourier = 50,
norm_factor_linear = 8000,
eps = 1e-20
):
super().__init__()
self.eps = eps
self.fourier_dims = fourier_dims
self.norm_factor_fourier = norm_factor_fourier
self.norm_factor_linear = norm_factor_linear
self.logits_norm = nn.Sequential(
Reduce('b n d -> b d', 'mean'),
nn.LayerNorm(dim)
)
self.mlp = nn.Sequential(
nn.Linear(dim + fourier_dims + 2, dim * 2),
nn.GELU(),
nn.Linear(dim * 2, 1),
Rearrange('... 1 -> ...')
)
def forward(self, logits, peaks_nr, read_value):
logits = self.logits_norm(logits)
peaks_nr_log_space = torch.log(peaks_nr + self.eps)
peaks_nr = rearrange(peaks_nr, '... -> (...)')
peaks_nr_encoded = fourier_encode(peaks_nr / self.norm_factor_fourier, self.fourier_dims)
peaks_nr_normed = rearrange(peaks_nr, '... -> ... 1') / self.norm_factor_linear
peaks_nr_encoded_with_self = torch.cat((peaks_nr_normed, peaks_nr_log_space, peaks_nr_encoded), dim = -1)
logits_with_peaks = torch.cat((logits, peaks_nr_encoded_with_self), dim = -1)
pred = self.mlp(logits_with_peaks)
read_value = rearrange(read_value, '... -> (...)')
return F.smooth_l1_loss(pred, read_value)
class HypergridLinear(nn.Module):
def __init__(
self,
dim,
dim_out,
*,
context_dim
):
super().__init__()
self.weights = nn.Parameter(torch.randn(dim, dim_out))
self.contextual_projection = nn.Linear(context_dim, dim * dim_out)
def forward(self, x, context):
# derive contextual gating, from hypergrids paper
gating = self.contextual_projection(context).sigmoid()
gating = rearrange(gating, 'b (i o) -> b i o', i = int(math.sqrt(gating.shape[-1])))
# gate interactions projection with context
to_logits_w = rearrange(self.weights, 'i o -> 1 i o') * gating
return einsum('b n d, b d e -> b n e', x, to_logits_w)
# FILIP adapter model
class FILIP(nn.Module):
def __init__(
self,
dim,
context_dim,
heads,
dim_head = 64,
dropout = 0.
):
super().__init__()
self.heads = heads
inner_latent_dim = heads * dim_head
self.to_latent_w = nn.Parameter(torch.randn(dim, inner_latent_dim))
self.to_latent_b = nn.Parameter(torch.randn(inner_latent_dim))
self.pre_attn_dropout = dropout
self.null_context = nn.Parameter(torch.randn(heads, dim_head))
self.context_to_latent_w = nn.Parameter(torch.randn(context_dim, inner_latent_dim))
self.context_to_latent_b = nn.Parameter(torch.randn(inner_latent_dim))
def forward(
self,
x,
context,
context_mask = None
):
b, heads, device = x.shape[0], self.heads, x.device
x = einsum('b n d, d e -> b n e', x, self.to_latent_w)
x = x + self.to_latent_b
x = rearrange(x, 'b n (h d) -> b h n d', h = heads)
context = einsum('b n d, d e -> b n e', context, self.context_to_latent_w)
context = context + self.context_to_latent_b
context = rearrange(context, 'b n (h d) -> b h n d', h = heads)
context, x = map(l2norm, (context, x))
# fine grained interaction between dna and protein sequences
# FILIP https://arxiv.org/abs/2111.07783
if x.shape[0] == 1:
# in the case one passes in 1 genomic sequence track
# but multiple factors + contexts, as in enformer training
x = rearrange(x, '1 ... -> ...')
einsum_eq = 'h i d, b h j d -> b h i j'
else:
einsum_eq = 'b h i d, b h j d -> b h i j'
# create context mask if not exist
if not exists(context_mask):
context_mask = torch.ones((b, context.shape[-1]), device = device).bool()
# dropout mask by dropout prob
if self.training:
keep_mask = prob_mask_like(context_mask, 1 - self.pre_attn_dropout)
context_mask = context_mask & keep_mask
# add null context and modify mask
context_mask = F.pad(context_mask, (1, 0), value = True)
context_mask = rearrange(context_mask, 'b j -> b 1 1 j')
null_context = repeat(self.null_context, 'h d -> b h 1 d', b = b)
context = torch.cat((null_context, context), dim = -2)
# differentiable max, as in FILIP paper
interactions = einsum(einsum_eq, x, context)
interactions = logavgexp(interactions, mask = context_mask, dim = -1, temp = 0.05)
interactions = rearrange(interactions, 'b h i -> b i h')
return interactions
class AdapterModel(nn.Module):
def __init__(
self,
*,
enformer,
latent_dim = 64,
latent_heads = 32,
aa_embed_dim = None,
aa_embed_encoder = 'esm',
contextual_embed_dim = None,
use_aa_embeds = False,
use_free_text_context = False,
free_text_context_encoder = 'pubmed',
free_text_embed_method = 'cls',
dropout = 0.,
binary_target = False,
target_mse_loss = False,
aux_read_value_loss = False,
read_value_aux_loss_weight = 0.05,
joint_cross_attn_depth = 1,
genome_self_attn_depth = 0,
fourier_dims = 256,
condition_squeeze_excite = False,
condition_film = False,
condition_hypergrid = True,
use_corr_coef_loss = False,
finetune_output_heads = None,
**kwargs
):
super().__init__()
assert isinstance(enformer, Enformer), 'enformer must be an instance of Enformer'
self.enformer = enformer
enformer_dim = enformer.dim * 2
if exists(finetune_output_heads):
self.enformer.add_heads(**finetune_output_heads)
self.norm_seq_embed = nn.LayerNorm(enformer_dim)
# contextual embedding related variables
assert free_text_embed_method in {'cls', 'mean_pool'}, 'must be either cls or mean_pool'
self.free_text_embed_method = free_text_embed_method
self.use_free_text_context = use_free_text_context
if use_free_text_context:
contextual_embed_dim = get_contextual_dim(free_text_context_encoder)
else:
assert exists(contextual_embed_dim), 'contextual embedding dimension must be given if not using transformer encoder'
# protein embedding related variables
self.use_aa_embeds = use_aa_embeds
self.aa_embed_config = get_protein_embedder(aa_embed_encoder)
self.get_aa_embed = self.aa_embed_config['fn']
if use_aa_embeds:
aa_embed_dim = self.aa_embed_config['dim']
else:
assert exists(aa_embed_dim), 'AA embedding dimensions must be set if not using ESM'
# conditioning
self.cond_genetic = None
self.cond_protein = None
if condition_squeeze_excite or condition_film:
condition_klass = SqueezeExcitation if condition_squeeze_excite else FiLM
self.cond_genetic = condition_klass(contextual_embed_dim, enformer_dim)
self.cond_protein = condition_klass(contextual_embed_dim, aa_embed_dim)
# genome self attn
self.genome_self_attns = nn.ModuleList([])
for _ in range(genome_self_attn_depth):
attn = SelfAttentionBlock(
dim = enformer_dim,
dropout = dropout
)
self.genome_self_attns.append(attn)
# joint attn
self.joint_cross_attns = nn.ModuleList([])
for _ in range(joint_cross_attn_depth):
attn = JointCrossAttentionBlock(
dim = enformer_dim,
context_dim = aa_embed_dim,
dropout = dropout
)
self.joint_cross_attns.append(attn)
# latents
self.filip = FILIP(
dim = enformer_dim,
context_dim = aa_embed_dim,
dim_head = latent_dim,
heads = latent_heads,
dropout = dropout
)
# hypergrid conditioning
if condition_hypergrid:
self.linear_with_hypergrid = HypergridLinear(latent_heads, latent_heads, context_dim = contextual_embed_dim)
else:
self.linear_to_logits = nn.Linear(latent_heads, latent_heads)
# to prediction
self.binary_target = binary_target
self.aux_read_value_loss = aux_read_value_loss
self.read_value_aux_loss_weight = read_value_aux_loss_weight
if binary_target:
self.loss_fn = F.binary_cross_entropy_with_logits if not target_mse_loss else F.mse_loss
self.to_pred = nn.Sequential(
Reduce('... n d -> ... d', 'mean'),
nn.LayerNorm(latent_heads),
nn.Linear(latent_heads, 1),
Rearrange('... 1 -> ...')
)
self.to_read_value_aux_loss = ReadValueMLP(
dim = latent_heads,
fourier_dims = fourier_dims
)
else:
self.loss_fn = poisson_loss if not use_corr_coef_loss else corr_coef_loss
self.to_pred = nn.Sequential(
nn.Linear(latent_heads, 1),
Rearrange('... 1 -> ...'),
nn.Softplus()
)
def combine_losses(self, loss, aux_loss):
if not self.aux_read_value_loss:
return loss
return loss + self.read_value_aux_loss_weight * aux_loss
def forward_enformer_head(
self,
seq_embed,
*,
head,
target = None,
return_corr_coef = False
):
assert not self.binary_target, 'cannot finetune on tracks if binary_target training is turned on'
unfreeze_all_layers_(self.enformer._heads)
assert head in self.enformer._heads, f'{head} head not found in enformer'
pred = self.enformer._heads[head](seq_embed)
if not exists(target):
return pred
assert pred.shape[-1] == target.shape[-1], f'{head} head on enformer produced {pred.shape[-1]} tracks, but the supplied target only has {target.shape[-1]}'
if exists(target) and return_corr_coef:
return pearson_corr_coef(pred, target)
return self.loss_fn(pred, target)
def forward(
self,
seq,
*,
aa = None,
aa_embed = None,
contextual_embed = None,
contextual_free_text = None,
aa_mask = None,
target = None,
read_value = None,
peaks_nr = None,
return_corr_coef = False,
finetune_enformer = False,
finetune_enformer_ln_only = False,
unfreeze_enformer_last_n_layers = 0,
head = None
):
device = seq.device
# prepare enformer for training
# - set to eval and no_grad if not fine-tuning
# - always freeze the batchnorms
freeze_batchnorms_(self.enformer)
enformer_forward = self.enformer.forward
if finetune_enformer:
enformer_context = null_context()
elif finetune_enformer_ln_only:
enformer_context = null_context()
freeze_all_but_layernorms_(self.enformer)
else:
self.enformer.eval()
enformer_context = torch.no_grad()
enformer_forward_wrapper = cache_enformer_forward if self.training else identity
enformer_forward = enformer_forward_wrapper(enformer_forward)
# if unfreezing last N layers of enformer
if unfreeze_enformer_last_n_layers > 0:
unfreeze_last_n_layers_(self.enformer, unfreeze_enformer_last_n_layers)
# genetic sequence embedding
with enformer_context:
seq_embed = enformer_forward(seq, return_only_embeddings = True)
# if training off an enformer head
if exists(head):
return self.forward_enformer_head(seq_embed, head = head, target = target)
# norm sequence embedding
seq_embed = self.norm_seq_embed(seq_embed)
for self_attn_block in self.genome_self_attns:
seq_embed = self_attn_block(seq_embed)
# protein related embeddings
if self.use_aa_embeds:
assert exists(aa), 'aa must be passed in as tensor of integers from 0 - 20 (20 being padding)'
aa_embed, aa_mask = self.get_aa_embed(aa, device = seq.device)
else:
assert exists(aa_embed), 'protein embeddings must be given as aa_embed'
# free text embeddings, for cell types and experimental params
if not exists(contextual_embed):
assert self.use_free_text_context, 'use_free_text_context must be set to True if one is not passing in contextual_embed tensor'
assert exists(contextual_free_text), 'context must be supplied as array of strings as contextual_free_text if contextual_embed is not supplied'
contextual_embed = get_text_repr(
contextual_free_text,
return_cls_token = (self.free_text_embed_method == 'cls'),
device = seq.device
)
# contextual conditioning
# film or squeeze-excite for both genetic and protein sequences
if exists(self.cond_genetic):
seq_embed = self.cond_genetic(seq_embed, contextual_embed)
if exists(self.cond_protein):
aa_embed = self.cond_protein(aa_embed, contextual_embed, mask = aa_mask)
# joint cross attention
for cross_attn in self.joint_cross_attns:
seq_embed, aa_embed = cross_attn(
seq_embed,
context = aa_embed,
context_mask = aa_mask
)
# project both embeddings into shared latent space
interactions = self.filip(
seq_embed,
aa_embed,
context_mask = aa_mask
)
# linear with hypergrid conditioning
if exists(self.linear_with_hypergrid):
logits = self.linear_with_hypergrid(interactions, context = contextual_embed)
else:
logits = self.linear_to_logits(interactions)
# to *-seq prediction
pred = self.to_pred(logits)
if not exists(target):
return pred
if exists(target) and return_corr_coef:
return pearson_corr_coef(pred, target)
if exists(target) and not self.binary_target:
return self.loss_fn(pred, target)
# binary loss w/ optional auxiliary loss
loss = self.loss_fn(pred, target.float())
if not self.aux_read_value_loss:
return loss, torch.Tensor([0.]).to(device)
# return prediction if not auto-calculating loss
assert exists(read_value) and exists(peaks_nr), 'peaks NR must be supplied if doing auxiliary read value loss'
aux_loss = self.to_read_value_aux_loss(
logits,
peaks_nr,
read_value = read_value
)
return loss, aux_loss
|
tf-bind-transformer-main
|
tf_bind_transformer/tf_bind_transformer.py
|
import os
from shutil import rmtree
import torch
import hashlib
from functools import wraps
from pathlib import Path
def exists(val):
return val is not None
# constants
CACHE_PATH = Path(os.getenv('TF_BIND_CACHE_PATH', os.path.expanduser('~/.cache.tf.bind.transformer')))
CACHE_PATH.mkdir(exist_ok = True, parents = True)
CLEAR_CACHE = exists(os.getenv('CLEAR_CACHE', None))
VERBOSE = exists(os.getenv('VERBOSE', None))
# helper functions
def log(s):
if not VERBOSE:
return
print(s)
def md5_hash_fn(s):
encoded = s.encode('utf-8')
return hashlib.md5(encoded).hexdigest()
# run once function
GLOBAL_RUN_RECORDS = dict()
def run_once(global_id = None):
def outer(fn):
has_ran_local = False
output = None
@wraps(fn)
def inner(*args, **kwargs):
nonlocal has_ran_local
nonlocal output
has_ran = GLOBAL_RUN_RECORDS.get(global_id, False) if exists(global_id) else has_ran_local
if has_ran:
return output
output = fn(*args, **kwargs)
if exists(global_id):
GLOBAL_RUN_RECORDS[global_id] = True
has_ran = True
return output
return inner
return outer
# caching function
def cache_fn(
fn,
path = '',
hash_fn = md5_hash_fn,
clear = False or CLEAR_CACHE,
should_cache = True
):
if not should_cache:
return fn
(CACHE_PATH / path).mkdir(parents = True, exist_ok = True)
@run_once(path)
def clear_cache_folder_():
cache_path = rmtree(str(CACHE_PATH / path))
(CACHE_PATH / path).mkdir(parents = True, exist_ok = True)
@wraps(fn)
def inner(t, *args, __cache_key = None, **kwargs):
if clear:
clear_cache_folder_()
cache_str = __cache_key if exists(__cache_key) else t
key = hash_fn(cache_str)
entry_path = CACHE_PATH / path / f'{key}.pt'
if entry_path.exists():
log(f'cache hit: fetching {t} from {str(entry_path)}')
return torch.load(str(entry_path))
out = fn(t, *args, **kwargs)
log(f'saving: {t} to {str(entry_path)}')
torch.save(out, str(entry_path))
return out
return inner
|
tf-bind-transformer-main
|
tf_bind_transformer/cache_utils.py
|
import torch
from torch import nn
from tf_bind_transformer.optimizer import get_optimizer
from tf_bind_transformer.data import read_bed, collate_dl_outputs, get_dataloader, remap_df_add_experiment_target_cell
from tf_bind_transformer.data import RemapAllPeakDataset, NegativePeakDataset, ScopedNegativePeakDataset
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
# helpers for logging and accumulating values across gradient steps
def accum_log(log, new_logs):
for key, new_value in new_logs.items():
old_value = log.get(key, 0.)
log[key] = old_value + new_value
return log
# simple Trainer class
class Trainer(nn.Module):
def __init__(
self,
model,
*,
remap_bed_file,
negative_bed_file,
factor_fasta_folder,
fasta_file,
train_chromosome_ids,
valid_chromosome_ids,
batch_size,
context_length,
lr = 3e-4,
wd = 0.1,
validate_every = 250,
grad_clip_norm = None,
grad_accum_every = 1,
held_out_targets = [],
held_out_cell_types = [],
exclude_targets = [],
exclude_cell_types = [],
shuffle = False,
train_sample_frac = 1.,
valid_sample_frac = 1.,
remap_sample_frac = 1.,
shift_aug_range = (-2, 2),
rc_aug = False,
experiments_json_path = None,
read_value_aux_loss = False,
checkpoint_filename = './checkpoint.pt',
include_scoped_negs = False,
scoped_negs_remap_bed_path = None,
scoped_negs_path = None,
scoped_negs_exts = '.bed.bool.npy',
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
include_biotypes_metadata_columns = ['germ_layer', 'cellline_cat'],
biotypes_metadata_delimiter = ' | ',
balance_sampling_by_target = True,
valid_balance_sampling_by_target = None,
):
super().__init__()
self.model = model
valid_balance_sampling_by_target = default(valid_balance_sampling_by_target, balance_sampling_by_target)
remap_df = read_bed(remap_bed_file)
if remap_sample_frac < 1:
remap_df = remap_df.sample(frac = remap_sample_frac)
remap_df = remap_df_add_experiment_target_cell(remap_df)
neg_df = read_bed(negative_bed_file)
self.ds = RemapAllPeakDataset(
remap_df = remap_df,
fasta_file = fasta_file,
factor_fasta_folder = factor_fasta_folder,
filter_chromosome_ids = train_chromosome_ids,
exclude_targets = [*held_out_targets, *exclude_targets],
exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],
context_length = context_length,
remap_df_frac = train_sample_frac,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
experiments_json_path = experiments_json_path,
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter,
balance_sampling_by_target = balance_sampling_by_target
)
self.neg_ds = NegativePeakDataset(
remap_df = remap_df,
negative_df = neg_df,
fasta_file = fasta_file,
factor_fasta_folder = factor_fasta_folder,
filter_chromosome_ids = train_chromosome_ids,
exclude_targets = [*held_out_targets, *exclude_targets],
exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],
context_length = context_length,
experiments_json_path = experiments_json_path,
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter,
balance_sampling_by_target = balance_sampling_by_target
)
self.valid_ds = RemapAllPeakDataset(
remap_df = remap_df,
fasta_file = fasta_file,
factor_fasta_folder = factor_fasta_folder,
include_targets = held_out_targets,
include_cell_types = held_out_cell_types,
exclude_targets = exclude_targets,
exclude_cell_types = exclude_cell_types,
filter_chromosome_ids = valid_chromosome_ids,
context_length = context_length,
remap_df_frac = valid_sample_frac,
shift_augs = shift_aug_range,
rc_aug = rc_aug,
experiments_json_path = experiments_json_path,
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter,
balance_sampling_by_target = valid_balance_sampling_by_target
)
self.valid_neg_ds = NegativePeakDataset(
remap_df = remap_df,
negative_df = neg_df,
fasta_file = fasta_file,
factor_fasta_folder = factor_fasta_folder,
filter_chromosome_ids = valid_chromosome_ids,
include_targets = held_out_targets,
include_cell_types = held_out_cell_types,
exclude_targets = exclude_targets,
exclude_cell_types = exclude_cell_types,
context_length = context_length,
experiments_json_path = experiments_json_path,
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter,
balance_sampling_by_target = valid_balance_sampling_by_target
)
self.include_scoped_negs = include_scoped_negs
self.dl = get_dataloader(self.ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)
self.neg_dl = get_dataloader(self.neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)
if include_scoped_negs:
self.scoped_neg_ds = ScopedNegativePeakDataset(
fasta_file = fasta_file,
factor_fasta_folder = factor_fasta_folder,
numpy_folder_with_scoped_negatives = scoped_negs_path,
remap_bed_file = scoped_negs_remap_bed_path,
exts = scoped_negs_exts,
exclude_targets = [*held_out_targets, *exclude_targets],
exclude_cell_types = [*held_out_cell_types, *exclude_cell_types],
filter_chromosome_ids = train_chromosome_ids,
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter,
balance_sampling_by_target = balance_sampling_by_target
)
self.scoped_neg_dl = get_dataloader(self.scoped_neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)
self.valid_dl = get_dataloader(self.valid_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)
self.valid_neg_dl = get_dataloader(self.valid_neg_ds, cycle_iter = True, shuffle = shuffle, batch_size = batch_size)
self.aux_read_value_loss = model.aux_read_value_loss
if self.aux_read_value_loss:
print(f'training with read value aux loss')
self.optim = get_optimizer(model.parameters(), lr = lr, wd = wd)
self.grad_accum_every = grad_accum_every
self.grad_clip_norm = grad_clip_norm
self.validate_every = validate_every
self.register_buffer('steps', torch.Tensor([0.]))
self.checkpoint_filename = checkpoint_filename
def forward(
self,
finetune_enformer_ln_only = True,
**kwargs
):
grad_accum_every = self.grad_accum_every
curr_step = int(self.steps.item())
self.model.train()
log = {}
for _ in range(self.grad_accum_every):
dl_outputs = [next(self.dl), next(self.neg_dl)]
if self.include_scoped_negs:
dl_outputs.append(next(self.scoped_neg_dl))
seq, tf_aa, contextual_texts, peaks_nr, read_value, binary_target = collate_dl_outputs(*dl_outputs)
seq, binary_target, read_value, peaks_nr = seq.cuda(), binary_target.cuda(), read_value.cuda(), peaks_nr.cuda()
loss, aux_loss = self.model(
seq,
target = binary_target,
aa = tf_aa,
contextual_free_text = contextual_texts,
finetune_enformer_ln_only = finetune_enformer_ln_only,
read_value = read_value,
peaks_nr = peaks_nr,
**kwargs
)
total_loss = self.model.combine_losses(loss, aux_loss)
log = accum_log(log, {
'loss': loss.item() / grad_accum_every,
'aux_loss': aux_loss.item() / grad_accum_every,
'total_loss': total_loss.item() / grad_accum_every
})
(total_loss / self.grad_accum_every).backward()
print(f'{curr_step} loss: {log["total_loss"]}')
if exists(self.grad_clip_norm):
nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip_norm)
self.optim.step()
self.optim.zero_grad()
if (curr_step % self.validate_every) == 0:
self.model.eval()
for _ in range(self.grad_accum_every):
seq, tf_aa, contextual_texts, peaks_nr, read_value, binary_target = collate_dl_outputs(next(self.valid_dl), next(self.valid_neg_dl))
seq, binary_target = seq.cuda(), binary_target.cuda()
valid_logits = self.model(
seq,
aa = tf_aa,
contextual_free_text = contextual_texts,
)
valid_loss = self.model.loss_fn(valid_logits, binary_target.float())
valid_accuracy = ((valid_logits.sigmoid() > 0.5).int() == binary_target).sum() / (binary_target.numel())
log = accum_log(log, {
'valid_loss': valid_loss.item() / grad_accum_every,
'valid_accuracy': valid_accuracy.item() / grad_accum_every
})
print(f'{curr_step} valid loss: {log["valid_loss"]}')
print(f'{curr_step} valid accuracy: {log["valid_accuracy"]}')
if curr_step > 0:
torch.save(self.model.state_dict(), self.checkpoint_filename)
self.steps += 1
return log
|
tf-bind-transformer-main
|
tf_bind_transformer/training_utils.py
|
from pathlib import Path
import polars as pl
import numpy as np
import torch
from torch.utils.data import Dataset, DataLoader
from tf_bind_transformer.data import FactorProteinDataset, ContextDataset, cast_list, filter_df_by_tfactor_fastas
from tf_bind_transformer.data import pl_isin, pl_notin, fetch_experiments_index, parse_exp_target_cell, read_bed, cycle, filter_by_col_isin
from tf_bind_transformer.data import CHR_IDS, CHR_NAMES, get_chr_names
from enformer_pytorch import FastaInterval
try:
import pyBigWig
except ImportError:
print('pyBigWig needs to be installed - conda install pyBigWig')
exit()
def exists(val):
return val is not None
def chip_atlas_add_experiment_target_cell(
df,
col_target = 'column_4',
col_cell_type = 'column_5'
):
df = df.clone()
targets = df.select(col_target)
targets = targets.to_series(0).str.to_uppercase().rename('target')
df.insert_at_idx(2, targets)
cell_type = df.select(col_cell_type)
cell_type = cell_type.rename({col_cell_type: 'cell_type'}).to_series(0)
df.insert_at_idx(2, cell_type)
return df
# dataset for CHIP ATLAS - all peaks
class BigWigDataset(Dataset):
def __init__(
self,
*,
factor_fasta_folder,
bigwig_folder,
enformer_loci_path,
fasta_file,
annot_file = None,
filter_chromosome_ids = None,
exclude_targets = None,
include_targets = None,
exclude_cell_types = None,
include_cell_types = None,
df_frac = 1.,
experiments_json_path = None,
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
filter_sequences_by = None,
include_biotypes_metadata_columns = [],
biotypes_metadata_delimiter = ' | ',
only_ref = ['mm10', 'hg38'],
factor_species_priority = ['human', 'mouse'],
downsample_factor = 128,
target_length = 896,
bigwig_reduction_type = 'sum',
**kwargs
):
super().__init__()
assert exists(annot_file)
if not exists(bigwig_folder):
self.invalid = True
self.ntargets = 0
return
bigwig_folder = Path(bigwig_folder)
assert bigwig_folder.exists(), 'bigwig folder does not exist'
bw_experiments = [p.stem for p in bigwig_folder.glob('*.bw')]
assert len(bw_experiments) > 0, 'no bigwig files found in bigwig folder'
loci = read_bed(enformer_loci_path)
annot_df = pl.read_csv(annot_file, sep = "\t", has_headers = False, columns = list(map(lambda i: f'column_{i + 1}', range(17))))
annot_df = annot_df.filter(pl_isin('column_2', only_ref))
annot_df = filter_by_col_isin(annot_df, 'column_1', bw_experiments)
if df_frac < 1:
annot_df = annot_df.sample(frac = df_frac)
dataset_chr_ids = CHR_IDS
if exists(filter_chromosome_ids):
dataset_chr_ids = dataset_chr_ids.intersection(set(filter_chromosome_ids))
# filtering loci by chromosomes
# as well as training or validation
loci = loci.filter(pl_isin('column_1', get_chr_names(dataset_chr_ids)))
if exists(filter_sequences_by):
col_name, col_val = filter_sequences_by
loci = loci.filter(pl.col(col_name) == col_val)
self.factor_ds = FactorProteinDataset(factor_fasta_folder, species_priority = factor_species_priority)
exp_ids = set(annot_df.get_column('column_1').to_list())
annot_df = chip_atlas_add_experiment_target_cell(annot_df)
annot_df = filter_df_by_tfactor_fastas(annot_df, factor_fasta_folder)
filtered_exp_ids = set(annot_df.get_column('column_1').to_list())
filtered_out_exp_ids = exp_ids - filtered_exp_ids
print(f'{", ".join(only_ref)} - {len(filtered_out_exp_ids)} experiments filtered out by lack of transcription factor fastas', filtered_out_exp_ids)
# filter dataset by inclusion and exclusion list of targets
# (<all available targets> intersect <include targets>) subtract <exclude targets>
include_targets = cast_list(include_targets)
exclude_targets = cast_list(exclude_targets)
if include_targets:
annot_df = annot_df.filter(pl_isin('target', include_targets))
if exclude_targets:
annot_df = annot_df.filter(pl_notin('target', exclude_targets))
# filter dataset by inclusion and exclusion list of cell types
# same logic as for targets
include_cell_types = cast_list(include_cell_types)
exclude_cell_types = cast_list(exclude_cell_types)
# :TODO reformulate this
# Cell_type should probably be column_6
if include_cell_types:
annot_df = annot_df.filter(pl_isin('cell_type', include_cell_types))
if exclude_cell_types:
annot_df = annot_df.filter(pl_notin('cell_type', exclude_cell_types))
self.fasta = FastaInterval(fasta_file = fasta_file, **kwargs)
self.df = loci
self.annot = annot_df
self.ntargets = self.annot.shape[0]
# bigwigs
self.bigwigs = [pyBigWig.open(str(bigwig_folder / f'{str(i)}.bw')) for i in self.annot.get_column("column_1")]
self.downsample_factor = downsample_factor
self.target_length = target_length
self.bigwig_reduction_type = bigwig_reduction_type
self.invalid = False
def __len__(self):
if self.invalid:
return 0
return len(self.df) * self.ntargets
def __getitem__(self, ind):
# TODO return all targets from an individual enformer loci
chr_name, begin, end, _ = self.df.row(ind % self.df.shape[0])
targets = self.annot.select('target').to_series(0)
cell_types = self.annot.select('cell_type').to_series(0)
ix_target = ind // self.df.shape[0]
#experiment, target, cell_type = parse_exp_target_cell(experiment_target_cell_type)
target = targets[ix_target]
context_str = cell_types[ix_target]
exp_bw = self.bigwigs[ix_target]
# figure out ref and fetch appropriate sequence
aa_seq = self.factor_ds[target]
seq = self.fasta(chr_name, begin, end)
# calculate bigwig
# properly downsample and then crop
output = np.array(exp_bw.values(chr_name, begin, end))
output = output.reshape((-1, self.downsample_factor))
if self.bigwig_reduction_type == 'mean':
om = np.nanmean(output, axis = 1)
elif self.bigwig_reduction_type == 'sum':
om = np.nansum(output, axis = 1)
else:
raise ValueError(f'unknown reduction type {self.bigwig_reduction_type}')
output_length = output.shape[0]
if output_length < self.target_length:
assert f'target length {self.target_length} cannot be less than the {output_length}'
trim = (output.shape[0] - self.target_length) // 2
om = om[trim:-trim]
np.nan_to_num(om, copy = False)
label = torch.Tensor(om)
return seq, aa_seq, context_str, label
# BigWig dataset for tracks only
class BigWigTracksOnlyDataset(Dataset):
def __init__(
self,
*,
bigwig_folder,
enformer_loci_path,
fasta_file,
ref,
annot_file = None,
filter_chromosome_ids = None,
downsample_factor = 128,
target_length = 896,
bigwig_reduction_type = 'sum',
filter_sequences_by = None,
**kwargs
):
super().__init__()
assert exists(annot_file)
if not exists(bigwig_folder):
self.invalid = True
self.ntargets = 0
return
bigwig_folder = Path(bigwig_folder)
assert bigwig_folder.exists(), 'bigwig folder does not exist'
bw_experiments = [p.stem for p in bigwig_folder.glob('*.bw')]
assert len(bw_experiments) > 0, 'no bigwig files found in bigwig folder'
loci = read_bed(enformer_loci_path)
annot_df = pl.read_csv(annot_file, sep = "\t", has_headers = False, columns = list(map(lambda i: f'column_{i + 1}', range(17))))
annot_df = annot_df.filter(pl.col('column_2') == ref)
annot_df = filter_by_col_isin(annot_df, 'column_1', bw_experiments)
dataset_chr_ids = CHR_IDS
if exists(filter_chromosome_ids):
dataset_chr_ids = dataset_chr_ids.intersection(set(filter_chromosome_ids))
# filtering loci by chromosomes
# as well as training or validation
loci = loci.filter(pl_isin('column_1', get_chr_names(dataset_chr_ids)))
if exists(filter_sequences_by):
col_name, col_val = filter_sequences_by
loci = loci.filter(pl.col(col_name) == col_val)
self.fasta = FastaInterval(fasta_file = fasta_file, **kwargs)
self.df = loci
self.annot = annot_df
self.ntargets = self.annot.shape[0]
# bigwigs
self.bigwigs = [(str(i), pyBigWig.open(str(bigwig_folder / f'{str(i)}.bw'))) for i in self.annot.get_column("column_1")]
self.downsample_factor = downsample_factor
self.target_length = target_length
self.bigwig_reduction_type = bigwig_reduction_type
self.invalid = False
def __len__(self):
if self.invalid:
return 0
return len(self.df) * int(self.ntargets > 0)
def __getitem__(self, ind):
chr_name, begin, end, _ = self.df.row(ind)
# figure out ref and fetch appropriate sequence
seq = self.fasta(chr_name, begin, end)
# calculate bigwig
# properly downsample and then crop
all_bw_values = []
for bw_path, bw in self.bigwigs:
try:
bw_values = bw.values(chr_name, begin, end)
all_bw_values.append(bw_values)
except:
print(f'hitting invalid range for {bw_path} - ({chr_name}, {begin}, {end})')
exit()
output = np.stack(all_bw_values, axis = -1)
output = output.reshape((-1, self.downsample_factor, self.ntargets))
if self.bigwig_reduction_type == 'mean':
om = np.nanmean(output, axis = 1)
elif self.bigwig_reduction_type == 'sum':
om = np.nansum(output, axis = 1)
else:
raise ValueError(f'unknown reduction type {self.bigwig_reduction_type}')
output_length = output.shape[0]
if output_length < self.target_length:
assert f'target length {self.target_length} cannot be less than the {output_length}'
trim = (output.shape[0] - self.target_length) // 2
om = om[trim:-trim]
np.nan_to_num(om, copy = False)
label = torch.Tensor(om)
return seq, label
# data loader
def bigwig_collate_fn(data):
seq, aa_seq, context_str, labels = list(zip(*data))
return torch.stack(seq), tuple(aa_seq), tuple(context_str), torch.stack(labels)
def get_bigwig_dataloader(ds, cycle_iter = False, **kwargs):
dataset_len = len(ds)
batch_size = kwargs.get('batch_size')
drop_last = dataset_len > batch_size
dl = DataLoader(ds, collate_fn = bigwig_collate_fn, drop_last = drop_last, **kwargs)
wrapper = cycle if cycle_iter else iter
return wrapper(dl)
def get_bigwig_tracks_dataloader(ds, cycle_iter = False, **kwargs):
dataset_len = len(ds)
batch_size = kwargs.get('batch_size')
drop_last = dataset_len > batch_size
dl = DataLoader(ds, drop_last = drop_last, **kwargs)
wrapper = cycle if cycle_iter else iter
return wrapper(dl)
|
tf-bind-transformer-main
|
tf_bind_transformer/data_bigwig.py
|
from torch.optim import AdamW
def separate_weight_decayable_params(params):
no_wd_params = set([param for param in params if param.ndim < 2])
wd_params = set(params) - no_wd_params
return wd_params, no_wd_params
def get_optimizer(params, lr = 3e-4, wd = 1e-1, filter_by_requires_grad = False):
if filter_by_requires_grad:
params = list(filter(lambda t: t.requires_grad, params))
params = set(params)
wd_params, no_wd_params = separate_weight_decayable_params(params)
param_groups = [
{'params': list(wd_params)},
{'params': list(no_wd_params), 'weight_decay': 0},
]
return AdamW(param_groups, lr = lr, weight_decay = wd)
|
tf-bind-transformer-main
|
tf_bind_transformer/optimizer.py
|
from Bio import SeqIO
from random import choice, randrange
from pathlib import Path
import functools
import polars as pl
from collections import defaultdict
import os
import json
import shutil
import numpy as np
import torch
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from tf_bind_transformer.gene_utils import parse_gene_name
from enformer_pytorch import FastaInterval
from pyfaidx import Fasta
import pybedtools
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def find_first_index(cond, arr):
for ind, el in enumerate(arr):
if cond(el):
return ind
return -1
def cast_list(val = None):
if not exists(val):
return []
return [val] if not isinstance(val, (tuple, list)) else val
def read_bed(path):
return pl.read_csv(path, sep = '\t', has_headers = False)
def save_bed(df, path):
df.to_csv(path, sep = '\t', has_header = False)
def parse_exp_target_cell(exp_target_cell):
experiment, target, *cell_type = exp_target_cell.split('.')
cell_type = '.'.join(cell_type) # handle edge case where cell type contains periods
return experiment, target, cell_type
# fetch index of datasets, for providing the sequencing reads
# for auxiliary read value prediction
def fetch_experiments_index(path):
if not exists(path):
return dict()
exp_path = Path(path)
assert exp_path.exists(), 'path to experiments json must exist'
root_json = json.loads(exp_path.read_text())
experiments = root_json['experiments']
index = {}
for experiment in experiments:
exp_id = experiment['accession']
if 'details' not in experiment:
continue
details = experiment['details']
if 'datasets' not in details:
continue
datasets = details['datasets']
for dataset in datasets:
dataset_name = dataset['dataset_name']
index[dataset_name] = dataset['peaks_NR']
return index
# fetch protein sequences by gene name and uniprot id
class FactorProteinDatasetByUniprotID(Dataset):
def __init__(
self,
folder,
species_priority = ['human', 'mouse']
):
super().__init__()
fasta_paths = [*Path(folder).glob('*.fasta')]
assert len(fasta_paths) > 0, f'no fasta files found at {folder}'
self.paths = fasta_paths
self.index_by_id = dict()
for path in fasta_paths:
gene, uniprotid, *_ = path.stem.split('.')
self.index_by_id[uniprotid] = path
def __len__(self):
return len(self.paths)
def __getitem__(self, uid):
index = self.index_by_id
if uid not in index:
return None
entry = index[uid]
fasta = SeqIO.read(entry, 'fasta')
return str(fasta.seq)
# fetch
class FactorProteinDataset(Dataset):
def __init__(
self,
folder,
species_priority = ['human', 'mouse', 'unknown'],
return_tuple_only = False
):
super().__init__()
fasta_paths = [*Path(folder).glob('*.fasta')]
assert len(fasta_paths) > 0, f'no fasta files found at {folder}'
self.paths = fasta_paths
index_by_gene = defaultdict(list)
self.return_tuple_only = return_tuple_only # whether to return tuple even if there is only one subunit
for path in fasta_paths:
gene, uniprotid, *_ = path.stem.split('.')
index_by_gene[gene].append(path)
# prioritize fasta files of certain species
# but allow for appropriate fallback, by order of species_priority
get_species_from_path = lambda p: p.stem.split('_')[-1].lower() if '_' in p.stem else 'unknown'
filtered_index_by_gene = defaultdict(list)
for gene, gene_paths in index_by_gene.items():
species_count = list(map(lambda specie: len(list(filter(lambda p: get_species_from_path(p) == specie, gene_paths))), species_priority))
species_ind_non_zero = find_first_index(lambda t: t > 0, species_count)
if species_ind_non_zero == -1:
continue
species = species_priority[species_ind_non_zero]
filtered_index_by_gene[gene] = list(filter(lambda p: get_species_from_path(p) == species, gene_paths))
self.index_by_gene = filtered_index_by_gene
def __len__(self):
return len(self.paths)
def __getitem__(self, unparsed_gene_name):
index = self.index_by_gene
genes = parse_gene_name(unparsed_gene_name)
seqs = []
for gene in genes:
entry = index[gene]
if len(entry) == 0:
print(f'no entries for {gene}')
continue
path = choice(entry) if isinstance(entry, list) else entry
fasta = SeqIO.read(path, 'fasta')
seqs.append(str(fasta.seq))
seqs = tuple(seqs)
if len(seqs) == 1 and not self.return_tuple_only:
return seqs[0]
return seqs
# remap dataframe functions
def get_chr_names(ids):
return set(map(lambda t: f'chr{t}', ids))
CHR_IDS = set([*range(1, 23), 'X'])
CHR_NAMES = get_chr_names(CHR_IDS)
def remap_df_add_experiment_target_cell(df, col = 'column_4'):
df = df.clone()
exp_id = df.select([pl.col(col).str.extract(r"^([\w\-]+)\.*")])
exp_id = exp_id.rename({col: 'experiment'}).to_series(0)
df.insert_at_idx(3, exp_id)
targets = df.select([pl.col(col).str.extract(r"[\w\-]+\.([\w\-]+)\.[\w\-]+")])
targets = targets.rename({col: 'target'}).to_series(0)
df.insert_at_idx(3, targets)
cell_type = df.select([pl.col(col).str.extract(r"^.*\.([\w\-]+)$")])
cell_type = cell_type.rename({col: 'cell_type'}).to_series(0)
df.insert_at_idx(3, cell_type)
return df
def pl_isin(col, arr):
equalities = list(map(lambda t: pl.col(col) == t, arr))
return functools.reduce(lambda a, b: a | b, equalities)
def pl_notin(col, arr):
equalities = list(map(lambda t: pl.col(col) != t, arr))
return functools.reduce(lambda a, b: a & b, equalities)
def filter_by_col_isin(df, col, arr, chunk_size = 25):
"""
polars seem to have a bug
where OR more than 25 conditions freezes (for pl_isin)
do in chunks of 25 and then concat instead
"""
dataframes = []
for i in range(0, len(arr), chunk_size):
sub_arr = arr[i:(i + chunk_size)]
filtered_df = df.filter(pl_isin(col, sub_arr))
dataframes.append(filtered_df)
return pl.concat(dataframes)
def filter_bed_file_by_(bed_file_1, bed_file_2, output_file):
# generated by OpenAI Codex
bed_file_1_bedtool = pybedtools.BedTool(bed_file_1)
bed_file_2_bedtool = pybedtools.BedTool(bed_file_2)
bed_file_1_bedtool_intersect_bed_file_2_bedtool = bed_file_1_bedtool.intersect(bed_file_2_bedtool, v = True)
bed_file_1_bedtool_intersect_bed_file_2_bedtool.saveas(output_file)
def filter_df_by_tfactor_fastas(df, folder):
files = [*Path(folder).glob('**/*.fasta')]
present_target_names = set([f.stem.split('.')[0] for f in files])
all_df_targets = df.get_column('target').unique().to_list()
all_df_targets_with_parsed_name = [(target, parse_gene_name(target)) for target in all_df_targets]
unknown_targets = [target for target, parsed_target_name in all_df_targets_with_parsed_name for parsed_target_name_sub_el in parsed_target_name if parsed_target_name_sub_el not in present_target_names]
if len(unknown_targets) > 0:
df = df.filter(pl_notin('target', unknown_targets))
return df
def generate_random_ranges_from_fasta(
fasta_file,
*,
output_filename = 'random-ranges.bed',
context_length,
filter_bed_files = [],
num_entries_per_key = 10,
keys = None,
):
fasta = Fasta(fasta_file)
tmp_file = f'/tmp/{output_filename}'
with open(tmp_file, 'w') as f:
for chr_name in sorted(CHR_NAMES):
print(f'generating ranges for {chr_name}')
if chr_name not in fasta:
print(f'{chr_name} not found in fasta file')
continue
chromosome = fasta[chr_name]
chromosome_length = len(chromosome)
start = np.random.randint(0, chromosome_length - context_length, (num_entries_per_key,))
end = start + context_length
start_and_end = np.stack((start, end), axis = -1)
for row in start_and_end.tolist():
start, end = row
f.write('\t'.join((chr_name, str(start), str(end))) + '\n')
for file in filter_bed_files:
filter_bed_file_by_(tmp_file, file, tmp_file)
shutil.move(tmp_file, f'./{output_filename}')
print('success')
# context string creator class
class ContextDataset(Dataset):
def __init__(
self,
*,
biotypes_metadata_path = None,
include_biotypes_metadata_in_context = False,
include_biotypes_metadata_columns = [],
biotypes_metadata_delimiter = ' | ',
):
self.include_biotypes_metadata_in_context = include_biotypes_metadata_in_context
self.include_biotypes_metadata_columns = include_biotypes_metadata_columns
self.biotypes_metadata_delimiter = biotypes_metadata_delimiter
if include_biotypes_metadata_in_context:
assert len(self.include_biotypes_metadata_columns) > 0, 'must have more than one biotype metadata column to include'
assert exists(biotypes_metadata_path), 'biotypes metadata path must be supplied if to be included in context string'
p = Path(biotypes_metadata_path)
if p.suffix == '.csv':
sep = ','
elif p.suffix == '.tsv':
sep = '\t'
else:
raise ValueError(f'invalid suffix {p.suffix} for biotypes')
self.df = pl.read_csv(str(p), sep = sep)
def __len__():
return len(self.df) if self.include_biotypes_metadata_in_context else -1
def __getitem__(self, biotype):
if not self.include_biotypes_metadata_in_context:
return biotype
col_indices = list(map(self.df.columns.index, self.include_biotypes_metadata_columns))
filtered = self.df.filter(pl.col('biotype') == biotype)
if len(filtered) == 0:
print(f'no rows found for {biotype} in biotype metadata file')
return biotype
row = filtered.row(0)
columns = list(map(lambda t: row[t], col_indices))
context_string = self.biotypes_metadata_delimiter.join([biotype, *columns])
return context_string
# dataset for remap data - all peaks
class RemapAllPeakDataset(Dataset):
def __init__(
self,
*,
factor_fasta_folder,
bed_file = None,
remap_df = None,
filter_chromosome_ids = None,
exclude_targets = None,
include_targets = None,
exclude_cell_types = None,
include_cell_types = None,
remap_df_frac = 1.,
experiments_json_path = None,
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
include_biotypes_metadata_columns = [],
biotypes_metadata_delimiter = ' | ',
balance_sampling_by_target = False,
**kwargs
):
super().__init__()
assert exists(remap_df) ^ exists(bed_file), 'either remap bed file or remap dataframe must be passed in'
if not exists(remap_df):
remap_df = read_bed(bed_file)
if remap_df_frac < 1:
remap_df = remap_df.sample(frac = remap_df_frac)
dataset_chr_ids = CHR_IDS
if exists(filter_chromosome_ids):
dataset_chr_ids = dataset_chr_ids.intersection(set(filter_chromosome_ids))
remap_df = remap_df.filter(pl_isin('column_1', get_chr_names(dataset_chr_ids)))
remap_df = filter_df_by_tfactor_fastas(remap_df, factor_fasta_folder)
self.factor_ds = FactorProteinDataset(factor_fasta_folder)
# filter dataset by inclusion and exclusion list of targets
# (<all available targets> intersect <include targets>) subtract <exclude targets>
include_targets = cast_list(include_targets)
exclude_targets = cast_list(exclude_targets)
if include_targets:
remap_df = remap_df.filter(pl_isin('target', include_targets))
if exclude_targets:
remap_df = remap_df.filter(pl_notin('target', exclude_targets))
# filter dataset by inclusion and exclusion list of cell types
# same logic as for targets
include_cell_types = cast_list(include_cell_types)
exclude_cell_types = cast_list(exclude_cell_types)
if include_cell_types:
remap_df = remap_df.filter(pl_isin('cell_type', include_cell_types))
if exclude_cell_types:
remap_df = remap_df.filter(pl_notin('cell_type', exclude_cell_types))
assert len(remap_df) > 0, 'dataset is empty by filter criteria'
self.df = remap_df
self.fasta = FastaInterval(**kwargs)
self.experiments_index = fetch_experiments_index(experiments_json_path)
# balanced target sampling logic
self.balance_sampling_by_target = balance_sampling_by_target
if self.balance_sampling_by_target:
self.df_indexed_by_target = []
for target in self.df.get_column('target').unique().to_list():
df_by_target = self.df.filter(pl.col('target') == target)
self.df_indexed_by_target.append(df_by_target)
# context string creator
self.context_ds = ContextDataset(
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter
)
def __len__(self):
if self.balance_sampling_by_target:
return len(self.df_indexed_by_target)
else:
return len(self.df)
def __getitem__(self, ind):
# if balancing by target, randomly draw sample from indexed dataframe
if self.balance_sampling_by_target:
filtered_df = self.df_indexed_by_target[ind]
rand_ind = randrange(0, len(filtered_df))
sample = filtered_df.row(rand_ind)
else:
sample = self.df.row(ind)
chr_name, begin, end, _, _, _, experiment_target_cell_type, reading, *_ = sample
# now aggregate all the data
experiment, target, cell_type = parse_exp_target_cell(experiment_target_cell_type)
seq = self.fasta(chr_name, begin, end)
aa_seq = self.factor_ds[target]
context_str = self.context_ds[cell_type]
read_value = torch.Tensor([reading])
peaks_nr = self.experiments_index.get(experiment_target_cell_type, 0.)
peaks_nr = torch.Tensor([peaks_nr])
label = torch.Tensor([1.])
return seq, aa_seq, context_str, peaks_nr, read_value, label
# filter functions for exp-target-cells based on heldouts
def filter_exp_target_cell(
arr,
*,
exclude_targets = None,
include_targets = None,
exclude_cell_types = None,
include_cell_types = None,
):
out = []
for el in arr:
experiment, target, cell_type = parse_exp_target_cell(el)
if exists(include_targets) and len(include_targets) > 0 and target not in include_targets:
continue
if exists(exclude_targets) and target in exclude_targets:
continue
if exists(include_cell_types) and len(include_cell_types) > 0 and cell_type not in include_cell_types:
continue
if exists(exclude_cell_types) and cell_type in exclude_cell_types:
continue
out.append(el)
return out
# dataset for negatives scoped to a specific exp-target-celltype
class ScopedNegativePeakDataset(Dataset):
def __init__(
self,
*,
fasta_file,
factor_fasta_folder,
numpy_folder_with_scoped_negatives,
exts = '.bed.bool.npy',
remap_bed_file = None,
remap_df = None,
filter_chromosome_ids = None,
experiments_json_path = None,
exclude_targets = None,
include_targets = None,
exclude_cell_types = None,
include_cell_types = None,
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
include_biotypes_metadata_columns = [],
biotypes_metadata_delimiter = ' | ',
balance_sampling_by_target = False,
**kwargs
):
super().__init__()
assert exists(remap_df) ^ exists(remap_bed_file), 'either remap bed file or remap dataframe must be passed in'
if not exists(remap_df):
remap_df = read_bed(remap_bed_file)
dataset_chr_ids = CHR_IDS
if exists(filter_chromosome_ids):
dataset_chr_ids = dataset_chr_ids.intersection(set(filter_chromosome_ids))
filter_map_df = remap_df.with_column(pl.when(pl_isin('column_1', get_chr_names(dataset_chr_ids))).then(True).otherwise(False).alias('mask'))
mask = filter_map_df.get_column('mask').to_numpy()
num_scoped_negs = mask.sum()
print(f'{num_scoped_negs} scoped negative rows found for training')
assert num_scoped_negs > 0, 'all remap rows filtered out for scoped negative peak dataset'
self.df = remap_df
self.chromosome_mask = mask
# get dictionary with exp-target-cell to boolean numpy indicating which ones are negatives
npys_paths = [*Path(numpy_folder_with_scoped_negatives).glob('**/*.npy')]
exp_target_cell_negatives = [(path.name.rstrip(exts), path) for path in npys_paths]
exp_target_cells = [el[0] for el in exp_target_cell_negatives]
exp_target_cells = filter_exp_target_cell(
exp_target_cells,
include_targets = include_targets,
exclude_targets = exclude_targets,
include_cell_types = include_cell_types,
exclude_cell_types = exclude_cell_types
)
filtered_exp_target_cell_negatives = list(filter(lambda el: el[0] in exp_target_cells, exp_target_cell_negatives))
self.exp_target_cell_negatives = filtered_exp_target_cell_negatives
assert len(self.exp_target_cell_negatives) > 0, 'no experiment-target-cell scoped negatives to select from after filtering'
# balanced target sampling
self.balance_sampling_by_target = balance_sampling_by_target
if balance_sampling_by_target:
self.exp_target_cell_by_target = defaultdict(list)
for exp_target_cell, filepath in self.exp_target_cell_negatives:
_, target, *_ = parse_exp_target_cell(exp_target_cell)
self.exp_target_cell_by_target[target].append((exp_target_cell, filepath))
# tfactor dataset
self.factor_ds = FactorProteinDataset(factor_fasta_folder)
self.fasta = FastaInterval(fasta_file = fasta_file, **kwargs)
self.experiments_index = fetch_experiments_index(experiments_json_path)
# context string creator
self.context_ds = ContextDataset(
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter
)
def __len__(self):
if self.balance_sampling_by_target:
return len(self.exp_target_cell_by_target)
else:
return len(self.exp_target_cell_negatives)
def __getitem__(self, idx):
if self.balance_sampling_by_target:
negatives = list(self.exp_target_cell_by_target.values())[idx]
sample = choice(negatives)
else:
sample = self.exp_target_cell_negatives[idx]
exp_target_cell, bool_numpy_path = sample
experiment, target, cell_type = parse_exp_target_cell(exp_target_cell)
# load boolean numpy array
# and select random peak that is a negative
np_arr = np.load(str(bool_numpy_path))
np_arr_noised = np_arr.astype(np.float32) + np.random.uniform(low = -1e-1, high = 1e-1, size = np_arr.shape[0])
# mask with chromosomes allowed
np_arr_noised *= self.chromosome_mask.astype(np.float32)
# select random negative peak
random_neg_peak_index = np_arr_noised.argmax()
chr_name, begin, end, *_ = self.df.row(random_neg_peak_index)
seq = self.fasta(chr_name, begin, end)
aa_seq = self.factor_ds[target]
context_str = self.context_ds[cell_type]
peaks_nr = self.experiments_index.get(exp_target_cell, 0.)
peaks_nr = torch.Tensor([peaks_nr])
read_value = torch.Tensor([0.])
label = torch.Tensor([0.])
return seq, aa_seq, context_str, peaks_nr, read_value, label
# dataset for hard negatives (negatives to all peaks)
class NegativePeakDataset(Dataset):
def __init__(
self,
*,
factor_fasta_folder,
negative_bed_file = None,
remap_bed_file = None,
remap_df = None,
negative_df = None,
filter_chromosome_ids = None,
exclude_targets = None,
include_targets = None,
exclude_cell_types = None,
include_cell_types = None,
exp_target_cell_column = 'column_4',
experiments_json_path = None,
include_biotypes_metadata_in_context = False,
biotypes_metadata_path = None,
include_biotypes_metadata_columns = [],
biotypes_metadata_delimiter = ' | ',
balance_sampling_by_target = False,
**kwargs
):
super().__init__()
assert exists(remap_df) ^ exists(remap_bed_file), 'either remap bed file or remap dataframe must be passed in'
assert exists(negative_df) ^ exists(negative_bed_file), 'either negative bed file or negative dataframe must be passed in'
# instantiate dataframes if not passed in
if not exists(remap_df):
remap_df = read_bed(remap_bed_file)
neg_df = negative_df
if not exists(negative_df):
neg_df = read_bed(negative_bed_file)
# filter remap dataframe
remap_df = filter_df_by_tfactor_fastas(remap_df, factor_fasta_folder)
dataset_chr_ids = CHR_IDS
if exists(filter_chromosome_ids):
dataset_chr_ids = dataset_chr_ids.intersection(set(filter_chromosome_ids))
neg_df = neg_df.filter(pl_isin('column_1', get_chr_names(dataset_chr_ids)))
assert len(neg_df) > 0, 'dataset is empty by filter criteria'
self.neg_df = neg_df
# get all exp-target-cells and filter by above
exp_target_cells = remap_df.get_column(exp_target_cell_column).unique().to_list()
self.filtered_exp_target_cells = filter_exp_target_cell(
exp_target_cells,
include_targets = include_targets,
exclude_targets = exclude_targets,
include_cell_types = include_cell_types,
exclude_cell_types = exclude_cell_types
)
assert len(self.filtered_exp_target_cells), 'no experiment-target-cell left for hard negative set'
# balanced sampling of targets
self.balance_sampling_by_target = balance_sampling_by_target
if balance_sampling_by_target:
self.exp_target_cell_by_target = defaultdict(list)
for exp_target_cell in self.filtered_exp_target_cells:
_, target, *_ = parse_exp_target_cell(exp_target_cell)
self.exp_target_cell_by_target[target].append(exp_target_cell)
# factor ds
self.factor_ds = FactorProteinDataset(factor_fasta_folder)
self.fasta = FastaInterval(**kwargs)
self.experiments_index = fetch_experiments_index(experiments_json_path)
# context string creator
self.context_ds = ContextDataset(
include_biotypes_metadata_in_context = include_biotypes_metadata_in_context,
biotypes_metadata_path = biotypes_metadata_path,
include_biotypes_metadata_columns = include_biotypes_metadata_columns,
biotypes_metadata_delimiter = biotypes_metadata_delimiter
)
def __len__(self):
return len(self.neg_df)
def __getitem__(self, ind):
chr_name, begin, end = self.neg_df.row(ind)
if self.balance_sampling_by_target:
rand_ind = randrange(0, len(self.exp_target_cell_by_target))
exp_target_cell_by_target_list = list(self.exp_target_cell_by_target.values())
random_exp_target_cell_type = choice(exp_target_cell_by_target_list[rand_ind])
else:
random_exp_target_cell_type = choice(self.filtered_exp_target_cells)
experiment, target, cell_type = parse_exp_target_cell(random_exp_target_cell_type)
seq = self.fasta(chr_name, begin, end)
aa_seq = self.factor_ds[target]
context_str = self.context_ds[cell_type]
read_value = torch.Tensor([0.])
peaks_nr = self.experiments_index.get(random_exp_target_cell_type, 0.)
peaks_nr = torch.Tensor([peaks_nr])
label = torch.Tensor([0.])
return seq, aa_seq, context_str, peaks_nr, read_value, label
# dataloader related functions
def collate_fn(data):
seq, aa_seq, context_str, peaks_nr, read_values, labels = list(zip(*data))
return torch.stack(seq), tuple(aa_seq), tuple(context_str), torch.stack(peaks_nr, dim = 0), torch.stack(read_values, dim = 0), torch.cat(labels, dim = 0)
def collate_dl_outputs(*dl_outputs):
outputs = list(zip(*dl_outputs))
ret = []
for entry in outputs:
if isinstance(entry[0], torch.Tensor):
entry = torch.cat(entry, dim = 0)
else:
entry = (sub_el for el in entry for sub_el in el)
ret.append(entry)
return tuple(ret)
def cycle(loader):
while True:
for data in loader:
yield data
def get_dataloader(ds, cycle_iter = False, **kwargs):
dataset_len = len(ds)
batch_size = kwargs.get('batch_size')
drop_last = dataset_len > batch_size
dl = DataLoader(ds, collate_fn = collate_fn, drop_last = drop_last, **kwargs)
wrapper = cycle if cycle_iter else iter
return wrapper(dl)
|
tf-bind-transformer-main
|
tf_bind_transformer/data.py
|
import polars as pl
from pathlib import Path
from tf_bind_transformer.data import read_bed, save_bed
def generate_separate_exp_target_cell_beds(
remap_file,
*,
output_folder = './negative-peaks-per-target',
exp_target_cell_type_col = 'column_4'
):
output_folder = Path(output_folder)
output_folder.mkdir(exist_ok = True, parents = True)
df = read_bed(remap_file)
target_experiments = df.get_column(exp_target_cell_type_col).unique().to_list()
for target_experiment in target_experiments:
filtered_df = df.filter(pl.col(exp_target_cell_type_col) == target_experiment)
target_bed_path = str(output_folder / f'{target_experiment}.bed')
save_bed(filtered_df, target_bed_path)
print('success')
|
tf-bind-transformer-main
|
scripts/remap_to_separate_exp_target_cell_beds.py
|
import json
import tqdm
import requests
NCBI_TAX_ID = dict(
human = 9606,
mouse = 10090
)
SPECIES = 'human'
API_URL = 'https://remap.univ-amu.fr/api/v1/'
def get_json(url, params = dict()):
headers = dict(Accept = 'application/json')
resp = requests.get(url, params = params, headers = headers)
return resp.json()
def get_experiments(species):
assert species in NCBI_TAX_ID
taxid = NCBI_TAX_ID[species]
experiments = get_json(f'{API_URL}list/experiments/taxid={taxid}')
return experiments
def get_experiment(experiment_id, species):
assert species in NCBI_TAX_ID
taxid = NCBI_TAX_ID[species]
experiment = get_json(f'http://remap.univ-amu.fr/api/v1/info/byExperiment/experiment={experiment_id}&taxid={taxid}')
return experiment
experiments = get_experiments(SPECIES)
for experiment in tqdm.tqdm(experiments['experiments']):
experiment_details = get_experiment(experiment['accession'], SPECIES)
experiment['details'] = experiment_details
with open('data/experiments.json', 'w+') as f:
contents = json.dumps(experiments, indent = 4, sort_keys = True)
f.write(contents)
print('success')
|
tf-bind-transformer-main
|
scripts/download_experiments.py
|
#/usr/bin/python
import polars as pl
import numpy as np
from pathlib import Path
import sys
NEGATIVE_PEAK_PATH = sys.argv[1]
NUMROWS = int(sys.argv[2])
ID_COLUMN = 'column_6'
df = pl.read_csv(NEGATIVE_PEAK_PATH, sep = '\t', has_headers = False)
np_array = df.get_column(ID_COLUMN).to_numpy()
to_save = np.full((NUMROWS,), False)
to_save[np_array - 1] = True
p = Path(NEGATIVE_PEAK_PATH)
filename = f'{p.stem}.bool'
np.save(filename, to_save)
print(f'{filename} saved')
|
tf-bind-transformer-main
|
scripts/negative_peak_to_bool_npy.py
|
import requests
from pathlib import Path
import click
import polars as pl
from tqdm import tqdm
from tf_bind_transformer.gene_utils import parse_gene_name
from tf_bind_transformer.data import read_bed
# constants
UNIPROT_URL = 'http://www.uniprot.org'
DEFAULT_REMAP_PATH = dict(
HUMAN = './remap2022_crm_macs2_hg38_v1_0.bed',
MOUSE = './remap2022_crm_macs2_mm10_v1_0.bed',
)
GENE_NAME_TO_ID_OVERRIDE = {
'SS18-SSX': ['Q8IZH1'],
'TFIIIC': ['A6ZV34'] # todo: figure out where the human entry is in Uniprot
}
# helper functions
def uniprot_mapping(fromtype, totype, identifier):
params = {
'from': fromtype,
'to': totype,
'format': 'tab',
'query': identifier,
}
response = requests.get(f'{UNIPROT_URL}/mapping', params = params)
return response.text
# main functions
@click.command()
@click.option('--species', help = 'Species', default = 'human', type = click.Choice(['human', 'mouse']))
@click.option('--remap-bed-path', help = 'Path to species specific remap file')
@click.option('--fasta-folder', help = 'Path to factor fastas', default = './tfactor.fastas')
def fetch_factors(
species,
remap_bed_path,
fasta_folder
):
species = species.upper()
if remap_bed_path is None:
remap_bed_path = DEFAULT_REMAP_PATH[species]
remap_bed_path = Path(remap_bed_path)
assert remap_bed_path.exists(), f'remap file does not exist at {str(remap_bed_path)}'
# load bed file and get all unique targets from column 3
df = read_bed(remap_bed_path)
genes = set([target for targets in df[:, 3] for target in targets.split(',')])
print(f'{len(genes)} factors found')
# load all saved fasta files, so can resume gracefully
fasta_files = [str(path) for path in Path('./').glob('*.fasta')]
processed_genes = set([*map(lambda t: str(t).split('.')[0], fasta_files)])
results_folder = Path(fasta_folder)
results_folder.mkdir(exist_ok = True, parents = True)
for unparsed_gene_name in tqdm(genes):
for gene_name in parse_gene_name(unparsed_gene_name):
if gene_name in processed_genes:
continue
# fetch uniprot id based on gene id
if gene_name not in GENE_NAME_TO_ID_OVERRIDE:
uniprot_resp = uniprot_mapping('GENENAME', 'ID', gene_name)
# only get the human ones (todo: make species agnostic)
entries = list(filter(lambda t: f'_{species}' in t, uniprot_resp.split('\n')))
entries = list(map(lambda t: t.split('\t')[1], entries))
else:
entries = GENE_NAME_TO_ID_OVERRIDE[gene_name]
if len(entries) == 0:
print(f'no entries found for {gene_name}')
continue
# save all hits
for entry in entries:
response = requests.get(f'{UNIPROT_URL}/uniprot/{entry}.fasta')
if response.status_code != 200:
print(f'<{response.status_code}> error fetching fasta file from gene {gene_name} {entry}')
continue
fasta_path = str(results_folder / f'{gene_name}.{entry}.fasta')
with open(fasta_path, 'w') as f:
f.write(response.text)
print(f'gene {gene_name} written')
# main function
if __name__ == '__main__':
fetch_factors()
|
tf-bind-transformer-main
|
scripts/fetch_factor_fastas.py
|
from setuptools import setup, find_packages
setup(
name = 'vector_quantize_pytorch',
packages = find_packages(),
version = '1.7.1',
license='MIT',
description = 'Vector Quantization - Pytorch',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/vector-quantizer-pytorch',
keywords = [
'artificial intelligence',
'deep learning',
'pytorch',
'quantization'
],
install_requires=[
'einops>=0.6.1',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
vector-quantize-pytorch-master
|
setup.py
|
import torch
from torch import nn, einsum
import torch.nn.functional as F
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from einops import rearrange, repeat, pack, unpack
def exists(val):
return val is not None
class RandomProjectionQuantizer(nn.Module):
""" https://arxiv.org/abs/2202.01855 """
def __init__(
self,
*,
dim,
codebook_size,
codebook_dim,
num_codebooks = 1,
norm = True,
**kwargs
):
super().__init__()
self.num_codebooks = num_codebooks
rand_projs = torch.empty(num_codebooks, dim, codebook_dim)
nn.init.xavier_normal_(rand_projs)
self.register_buffer('rand_projs', rand_projs)
# in section 3 of https://arxiv.org/abs/2202.01855
# "The input data is normalized to have 0 mean and standard deviation of 1 ... to prevent collapse"
self.norm = nn.LayerNorm(dim, elementwise_affine = False) if norm else nn.Identity()
self.vq = VectorQuantize(
dim = codebook_dim * num_codebooks,
heads = num_codebooks,
codebook_size = codebook_size,
use_cosine_sim = True,
separate_codebook_per_head = True,
**kwargs
)
def forward(
self,
x,
indices = None
):
return_loss = exists(indices)
x = self.norm(x)
x = einsum('b n d, h d e -> b n h e', x, self.rand_projs)
x, ps = pack([x], 'b n *')
self.vq.eval()
out = self.vq(x, indices = indices)
if return_loss:
_, ce_loss = out
return ce_loss
_, indices, _ = out
return indices
|
vector-quantize-pytorch-master
|
vector_quantize_pytorch/random_projection_quantizer.py
|
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from vector_quantize_pytorch.residual_vq import ResidualVQ, GroupedResidualVQ
from vector_quantize_pytorch.random_projection_quantizer import RandomProjectionQuantizer
|
vector-quantize-pytorch-master
|
vector_quantize_pytorch/__init__.py
|
from functools import partial
import torch
from torch import nn, einsum
import torch.nn.functional as F
import torch.distributed as distributed
from torch.optim import Optimizer
from torch.cuda.amp import autocast
from einops import rearrange, repeat, reduce, pack, unpack
from typing import Callable
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def noop(*args, **kwargs):
pass
def identity(t):
return t
def l2norm(t):
return F.normalize(t, p = 2, dim = -1)
def cdist(x, y):
x2 = reduce(x ** 2, 'b n d -> b n', 'sum')
y2 = reduce(y ** 2, 'b n d -> b n', 'sum')
xy = einsum('b i d, b j d -> b i j', x, y) * -2
return (rearrange(x2, 'b i -> b i 1') + rearrange(y2, 'b j -> b 1 j') + xy).sqrt()
def log(t, eps = 1e-20):
return torch.log(t.clamp(min = eps))
def ema_inplace(old, new, decay):
is_mps = str(old.device).startswith('mps:')
if not is_mps:
old.lerp_(new, 1 - decay)
else:
old.mul_(decay).add_(new * (1 - decay))
def pack_one(t, pattern):
return pack([t], pattern)
def unpack_one(t, ps, pattern):
return unpack(t, ps, pattern)[0]
def uniform_init(*shape):
t = torch.empty(shape)
nn.init.kaiming_uniform_(t)
return t
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(
logits,
temperature = 1.,
stochastic = False,
straight_through = False,
reinmax = False,
dim = -1,
training = True
):
dtype, size = logits.dtype, logits.shape[dim]
if training and stochastic and temperature > 0:
sampling_logits = (logits / temperature) + gumbel_noise(logits)
else:
sampling_logits = logits
ind = sampling_logits.argmax(dim = dim)
one_hot = F.one_hot(ind, size).type(dtype)
assert not (reinmax and not straight_through), 'reinmax can only be turned on if using straight through gumbel softmax'
if not straight_through or temperature <= 0. or not training:
return ind, one_hot
# use reinmax for better second-order accuracy - https://arxiv.org/abs/2304.08612
# algorithm 2
if reinmax:
π0 = logits.softmax(dim = dim)
π1 = (one_hot + (logits / temperature).softmax(dim = dim)) / 2
π1 = ((log(π1) - logits).detach() + logits).softmax(dim = 1)
π2 = 2 * π1 - 0.5 * π0
one_hot = π2 - π2.detach() + one_hot
else:
π1 = (logits / temperature).softmax(dim = dim)
one_hot = one_hot + π1 - π1.detach()
return ind, one_hot
def laplace_smoothing(x, n_categories, eps = 1e-5, dim = -1):
denom = x.sum(dim = dim, keepdim = True)
return (x + eps) / (denom + n_categories * eps)
def sample_vectors(samples, num):
num_samples, device = samples.shape[0], samples.device
if num_samples >= num:
indices = torch.randperm(num_samples, device = device)[:num]
else:
indices = torch.randint(0, num_samples, (num,), device = device)
return samples[indices]
def batched_sample_vectors(samples, num):
return torch.stack([sample_vectors(sample, num) for sample in samples.unbind(dim = 0)], dim = 0)
def pad_shape(shape, size, dim = 0):
return [size if i == dim else s for i, s in enumerate(shape)]
def sample_multinomial(total_count, probs):
device = probs.device
probs = probs.cpu()
total_count = probs.new_full((), total_count)
remainder = probs.new_ones(())
sample = torch.empty_like(probs, dtype = torch.long)
for i, p in enumerate(probs):
s = torch.binomial(total_count, p / remainder)
sample[i] = s
total_count -= s
remainder -= p
return sample.to(device)
def all_gather_sizes(x, dim):
size = torch.tensor(x.shape[dim], dtype = torch.long, device = x.device)
all_sizes = [torch.empty_like(size) for _ in range(distributed.get_world_size())]
distributed.all_gather(all_sizes, size)
return torch.stack(all_sizes)
def all_gather_variably_sized(x, sizes, dim = 0):
rank = distributed.get_rank()
all_x = []
for i, size in enumerate(sizes):
t = x if i == rank else x.new_empty(pad_shape(x.shape, size, dim))
distributed.broadcast(t, src = i, async_op = True)
all_x.append(t)
distributed.barrier()
return all_x
def sample_vectors_distributed(local_samples, num):
local_samples = rearrange(local_samples, '1 ... -> ...')
rank = distributed.get_rank()
all_num_samples = all_gather_sizes(local_samples, dim = 0)
if rank == 0:
samples_per_rank = sample_multinomial(num, all_num_samples / all_num_samples.sum())
else:
samples_per_rank = torch.empty_like(all_num_samples)
distributed.broadcast(samples_per_rank, src = 0)
samples_per_rank = samples_per_rank.tolist()
local_samples = sample_vectors(local_samples, samples_per_rank[rank])
all_samples = all_gather_variably_sized(local_samples, samples_per_rank, dim = 0)
out = torch.cat(all_samples, dim = 0)
return rearrange(out, '... -> 1 ...')
def batched_bincount(x, *, minlength):
batch, dtype, device = x.shape[0], x.dtype, x.device
target = torch.zeros(batch, minlength, dtype = dtype, device = device)
values = torch.ones_like(x)
target.scatter_add_(-1, x, values)
return target
def kmeans(
samples,
num_clusters,
num_iters = 10,
use_cosine_sim = False,
sample_fn = batched_sample_vectors,
all_reduce_fn = noop
):
num_codebooks, dim, dtype, device = samples.shape[0], samples.shape[-1], samples.dtype, samples.device
means = sample_fn(samples, num_clusters)
for _ in range(num_iters):
if use_cosine_sim:
dists = samples @ rearrange(means, 'h n d -> h d n')
else:
dists = -torch.cdist(samples, means, p = 2)
buckets = torch.argmax(dists, dim = -1)
bins = batched_bincount(buckets, minlength = num_clusters)
all_reduce_fn(bins)
zero_mask = bins == 0
bins_min_clamped = bins.masked_fill(zero_mask, 1)
new_means = buckets.new_zeros(num_codebooks, num_clusters, dim, dtype = dtype)
new_means.scatter_add_(1, repeat(buckets, 'h n -> h n d', d = dim), samples)
new_means = new_means / rearrange(bins_min_clamped, '... -> ... 1')
all_reduce_fn(new_means)
if use_cosine_sim:
new_means = l2norm(new_means)
means = torch.where(
rearrange(zero_mask, '... -> ... 1'),
means,
new_means
)
return means, bins
def batched_embedding(indices, embeds):
batch, dim = indices.shape[1], embeds.shape[-1]
indices = repeat(indices, 'h b n -> h b n d', d = dim)
embeds = repeat(embeds, 'h c d -> h b c d', b = batch)
return embeds.gather(2, indices)
# regularization losses
def orthogonal_loss_fn(t):
# eq (2) from https://arxiv.org/abs/2112.00384
h, n = t.shape[:2]
normed_codes = l2norm(t)
cosine_sim = einsum('h i d, h j d -> h i j', normed_codes, normed_codes)
return (cosine_sim ** 2).sum() / (h * n ** 2) - (1 / n)
# distance types
class EuclideanCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks = 1,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
decay = 0.8,
eps = 1e-5,
threshold_ema_dead_code = 2,
reset_cluster_size = None,
use_ddp = False,
learnable_codebook = False,
gumbel_sample = gumbel_sample,
sample_codebook_temp = 1.,
ema_update = True,
affine_param = False,
sync_affine_param = False,
affine_param_batch_decay = 0.99,
affine_param_codebook_decay = 0.9
):
super().__init__()
self.transform_input = identity
self.decay = decay
self.ema_update = ema_update
init_fn = uniform_init if not kmeans_init else torch.zeros
embed = init_fn(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.reset_cluster_size = default(reset_cluster_size, threshold_ema_dead_code)
assert callable(gumbel_sample)
self.gumbel_sample = gumbel_sample
self.sample_codebook_temp = sample_codebook_temp
assert not (use_ddp and num_codebooks > 1 and kmeans_init), 'kmeans init is not compatible with multiple codebooks in distributed environment for now'
self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors
self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer('initted', torch.Tensor([not kmeans_init]))
self.register_buffer('cluster_size', torch.zeros(num_codebooks, codebook_size))
self.register_buffer('embed_avg', embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer('embed', embed)
# affine related params
self.affine_param = affine_param
self.sync_affine_param = sync_affine_param
if not affine_param:
return
self.affine_param_batch_decay = affine_param_batch_decay
self.affine_param_codebook_decay = affine_param_codebook_decay
self.register_buffer('batch_mean', None)
self.register_buffer('batch_variance', None)
self.register_buffer('codebook_mean_needs_init', torch.Tensor([True]))
self.register_buffer('codebook_mean', torch.empty(num_codebooks, 1, dim))
self.register_buffer('codebook_variance_needs_init', torch.Tensor([True]))
self.register_buffer('codebook_variance', torch.empty(num_codebooks, 1, dim))
@torch.jit.ignore
def init_embed_(self, data, mask = None):
if self.initted:
return
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
sample_fn = self.sample_fn,
all_reduce_fn = self.kmeans_all_reduce_fn
)
embed_sum = embed * rearrange(cluster_size, '... -> ... 1')
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed_sum)
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(torch.Tensor([True]))
@torch.jit.ignore
def update_with_decay(self, buffer_name, new_value, decay):
old_value = getattr(self, buffer_name)
needs_init = getattr(self, buffer_name + "_needs_init", False)
if needs_init:
self.register_buffer(buffer_name + "_needs_init", torch.Tensor([False]))
if not exists(old_value) or needs_init:
self.register_buffer(buffer_name, new_value.detach())
return
value = old_value * decay + new_value.detach() * (1 - decay)
self.register_buffer(buffer_name, value)
@torch.jit.ignore
def update_affine(self, data, embed, mask = None):
assert self.affine_param
var_fn = partial(torch.var, unbiased = False)
# calculate codebook mean and variance
embed = rearrange(embed, 'h ... d -> h (...) d')
if self.training:
self.update_with_decay('codebook_mean', reduce(embed, 'h n d -> h 1 d', 'mean'), self.affine_param_codebook_decay)
self.update_with_decay('codebook_variance', reduce(embed, 'h n d -> h 1 d', var_fn), self.affine_param_codebook_decay)
# prepare batch data, which depends on whether it has masking
data = rearrange(data, 'h ... d -> h (...) d')
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
# calculate batch mean and variance
if not self.sync_affine_param:
self.update_with_decay('batch_mean', reduce(data, 'h n d -> h 1 d', 'mean'), self.affine_param_batch_decay)
self.update_with_decay('batch_variance', reduce(data, 'h n d -> h 1 d', var_fn), self.affine_param_batch_decay)
return
num_vectors, device, dtype = data.shape[-2], data.device, data.dtype
# number of vectors, for denominator
num_vectors = torch.tensor([num_vectors], device = device, dtype = dtype)
distributed.all_reduce(num_vectors)
# calculate distributed mean
batch_sum = reduce(data, 'h n d -> h 1 d', 'sum')
distributed.all_reduce(batch_sum)
batch_mean = batch_sum / num_vectors
self.update_with_decay('batch_mean', batch_mean, self.affine_param_batch_decay)
# calculate distributed variance
variance_numer = reduce((data - batch_mean) ** 2, 'h n d -> h 1 d', 'sum')
distributed.all_reduce(variance_numer)
batch_variance = variance_numer / num_vectors
self.update_with_decay('batch_variance', batch_variance, self.affine_param_batch_decay)
def replace(self, batch_samples, batch_mask):
for ind, (samples, mask) in enumerate(zip(batch_samples.unbind(dim = 0), batch_mask.unbind(dim = 0))):
if not torch.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, '... -> 1 ...'), mask.sum().item())
sampled = rearrange(sampled, '1 ... -> ...')
self.embed.data[ind][mask] = sampled
self.cluster_size.data[ind][mask] = self.reset_cluster_size
self.embed_avg.data[ind][mask] = sampled * self.reset_cluster_size
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d')
self.replace(batch_samples, batch_mask = expired_codes)
@autocast(enabled = False)
def forward(
self,
x,
sample_codebook_temp = None,
mask = None,
freeze_codebook = False
):
needs_codebook_dim = x.ndim < 4
sample_codebook_temp = default(sample_codebook_temp, self.sample_codebook_temp)
x = x.float()
if needs_codebook_dim:
x = rearrange(x, '... -> 1 ...')
dtype = x.dtype
flatten, ps = pack_one(x, 'h * d')
if exists(mask):
mask = repeat(mask, 'b n -> c (b h n)', c = flatten.shape[0], h = flatten.shape[-2] // (mask.shape[0] * mask.shape[1]))
self.init_embed_(flatten, mask = mask)
if self.affine_param:
self.update_affine(flatten, self.embed, mask = mask)
embed = self.embed if self.learnable_codebook else self.embed.detach()
if self.affine_param:
codebook_std = self.codebook_variance.clamp(min = 1e-5).sqrt()
batch_std = self.batch_variance.clamp(min = 1e-5).sqrt()
embed = (embed - self.codebook_mean) * (batch_std / codebook_std) + self.batch_mean
dist = -cdist(flatten, embed)
embed_ind, embed_onehot = self.gumbel_sample(dist, dim = -1, temperature = sample_codebook_temp, training = self.training)
embed_ind = unpack_one(embed_ind, ps, 'h *')
if self.training:
unpacked_onehot = unpack_one(embed_onehot, ps, 'h * c')
quantize = einsum('h b n c, h c d -> h b n d', unpacked_onehot, embed)
else:
quantize = batched_embedding(embed_ind, embed)
if self.training and self.ema_update and not freeze_codebook:
if self.affine_param:
flatten = (flatten - self.batch_mean) * (codebook_std / batch_std) + self.codebook_mean
if exists(mask):
embed_onehot[~mask] = 0.
cluster_size = embed_onehot.sum(dim = 1)
self.all_reduce_fn(cluster_size)
ema_inplace(self.cluster_size.data, cluster_size, self.decay)
embed_sum = einsum('h n d, h n c -> h c d', flatten, embed_onehot)
self.all_reduce_fn(embed_sum.contiguous())
ema_inplace(self.embed_avg.data, embed_sum, self.decay)
cluster_size = laplace_smoothing(self.cluster_size, self.codebook_size, self.eps) * self.cluster_size.sum(dim = -1, keepdim = True)
embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1')
self.embed.data.copy_(embed_normalized)
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, '1 ... -> ...'), (quantize, embed_ind))
dist = unpack_one(dist, ps, 'h * d')
return quantize, embed_ind, dist
class CosineSimCodebook(nn.Module):
def __init__(
self,
dim,
codebook_size,
num_codebooks = 1,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
decay = 0.8,
eps = 1e-5,
threshold_ema_dead_code = 2,
reset_cluster_size = None,
use_ddp = False,
learnable_codebook = False,
gumbel_sample = gumbel_sample,
sample_codebook_temp = 1.,
ema_update = True
):
super().__init__()
self.transform_input = l2norm
self.ema_update = ema_update
self.decay = decay
if not kmeans_init:
embed = l2norm(uniform_init(num_codebooks, codebook_size, dim))
else:
embed = torch.zeros(num_codebooks, codebook_size, dim)
self.codebook_size = codebook_size
self.num_codebooks = num_codebooks
self.kmeans_iters = kmeans_iters
self.eps = eps
self.threshold_ema_dead_code = threshold_ema_dead_code
self.reset_cluster_size = default(reset_cluster_size, threshold_ema_dead_code)
assert callable(gumbel_sample)
self.gumbel_sample = gumbel_sample
self.sample_codebook_temp = sample_codebook_temp
self.sample_fn = sample_vectors_distributed if use_ddp and sync_kmeans else batched_sample_vectors
self.kmeans_all_reduce_fn = distributed.all_reduce if use_ddp and sync_kmeans else noop
self.all_reduce_fn = distributed.all_reduce if use_ddp else noop
self.register_buffer('initted', torch.Tensor([not kmeans_init]))
self.register_buffer('cluster_size', torch.zeros(num_codebooks, codebook_size))
self.register_buffer('embed_avg', embed.clone())
self.learnable_codebook = learnable_codebook
if learnable_codebook:
self.embed = nn.Parameter(embed)
else:
self.register_buffer('embed', embed)
@torch.jit.ignore
def init_embed_(self, data, mask = None):
if self.initted:
return
if exists(mask):
c = data.shape[0]
data = rearrange(data[mask], '(c n) d -> c n d', c = c)
embed, cluster_size = kmeans(
data,
self.codebook_size,
self.kmeans_iters,
use_cosine_sim = True,
sample_fn = self.sample_fn,
all_reduce_fn = self.kmeans_all_reduce_fn
)
embed_sum = embed * rearrange(cluster_size, '... -> ... 1')
self.embed.data.copy_(embed)
self.embed_avg.data.copy_(embed_sum)
self.cluster_size.data.copy_(cluster_size)
self.initted.data.copy_(torch.Tensor([True]))
def replace(self, batch_samples, batch_mask):
batch_samples = l2norm(batch_samples)
for ind, (samples, mask) in enumerate(zip(batch_samples.unbind(dim = 0), batch_mask.unbind(dim = 0))):
if not torch.any(mask):
continue
sampled = self.sample_fn(rearrange(samples, '... -> 1 ...'), mask.sum().item())
sampled = rearrange(sampled, '1 ... -> ...')
self.embed.data[ind][mask] = sampled
self.embed_avg.data[ind][mask] = sampled * self.reset_cluster_size
self.cluster_size.data[ind][mask] = self.reset_cluster_size
def expire_codes_(self, batch_samples):
if self.threshold_ema_dead_code == 0:
return
expired_codes = self.cluster_size < self.threshold_ema_dead_code
if not torch.any(expired_codes):
return
batch_samples = rearrange(batch_samples, 'h ... d -> h (...) d')
self.replace(batch_samples, batch_mask = expired_codes)
@autocast(enabled = False)
def forward(
self,
x,
sample_codebook_temp = None,
mask = None,
freeze_codebook = False
):
needs_codebook_dim = x.ndim < 4
sample_codebook_temp = default(sample_codebook_temp, self.sample_codebook_temp)
x = x.float()
if needs_codebook_dim:
x = rearrange(x, '... -> 1 ...')
dtype = x.dtype
flatten, ps = pack_one(x, 'h * d')
if exists(mask):
mask = repeat(mask, 'b n -> c (b h n)', c = flatten.shape[0], h = flatten.shape[-2] // (mask.shape[0] * mask.shape[1]))
self.init_embed_(flatten, mask = mask)
embed = self.embed if self.learnable_codebook else self.embed.detach()
dist = einsum('h n d, h c d -> h n c', flatten, embed)
embed_ind, embed_onehot = self.gumbel_sample(dist, dim = -1, temperature = sample_codebook_temp, training = self.training)
embed_ind = unpack_one(embed_ind, ps, 'h *')
if self.training:
unpacked_onehot = unpack_one(embed_onehot, ps, 'h * c')
quantize = einsum('h b n c, h c d -> h b n d', unpacked_onehot, embed)
else:
quantize = batched_embedding(embed_ind, embed)
if self.training and self.ema_update and not freeze_codebook:
if exists(mask):
embed_onehot[~mask] = 0.
bins = embed_onehot.sum(dim = 1)
self.all_reduce_fn(bins)
ema_inplace(self.cluster_size.data, bins, self.decay)
embed_sum = einsum('h n d, h n c -> h c d', flatten, embed_onehot)
self.all_reduce_fn(embed_sum.contiguous())
ema_inplace(self.embed_avg.data, embed_sum, self.decay)
cluster_size = laplace_smoothing(self.cluster_size, self.codebook_size, self.eps) * self.cluster_size.sum(dim = -1, keepdim = True)
embed_normalized = self.embed_avg / rearrange(cluster_size, '... -> ... 1')
embed_normalized = l2norm(embed_normalized)
self.embed.data.copy_(l2norm(embed_normalized))
self.expire_codes_(x)
if needs_codebook_dim:
quantize, embed_ind = map(lambda t: rearrange(t, '1 ... -> ...'), (quantize, embed_ind))
dist = unpack_one(dist, ps, 'h * d')
return quantize, embed_ind, dist
# main class
class VectorQuantize(nn.Module):
def __init__(
self,
dim,
codebook_size,
codebook_dim = None,
heads = 1,
separate_codebook_per_head = False,
decay = 0.8,
eps = 1e-5,
freeze_codebook = False,
kmeans_init = False,
kmeans_iters = 10,
sync_kmeans = True,
use_cosine_sim = False,
threshold_ema_dead_code = 0,
channel_last = True,
accept_image_fmap = False,
commitment_weight = 1.,
commitment_use_cross_entropy_loss = False,
orthogonal_reg_weight = 0.,
orthogonal_reg_active_codes_only = False,
orthogonal_reg_max_codes = None,
stochastic_sample_codes = False,
sample_codebook_temp = 1.,
straight_through = False,
reinmax = False, # using reinmax for improved straight-through, assuming straight through helps at all
sync_codebook = None,
sync_affine_param = False,
ema_update = True,
learnable_codebook = False,
in_place_codebook_optimizer: Callable[..., Optimizer] = None, # Optimizer used to update the codebook embedding if using learnable_codebook
affine_param = False,
affine_param_batch_decay = 0.99,
affine_param_codebook_decay = 0.9,
sync_update_v = 0. # the v that controls optimistic vs pessimistic update for synchronous update rule (21) https://minyoungg.github.io/vqtorch/assets/draft_050523.pdf
):
super().__init__()
self.dim = dim
self.heads = heads
self.separate_codebook_per_head = separate_codebook_per_head
codebook_dim = default(codebook_dim, dim)
codebook_input_dim = codebook_dim * heads
requires_projection = codebook_input_dim != dim
self.project_in = nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity()
self.project_out = nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity()
self.eps = eps
self.commitment_weight = commitment_weight
self.commitment_use_cross_entropy_loss = commitment_use_cross_entropy_loss # whether to use cross entropy loss to codebook as commitment loss
self.learnable_codebook = learnable_codebook
has_codebook_orthogonal_loss = orthogonal_reg_weight > 0
self.has_codebook_orthogonal_loss = has_codebook_orthogonal_loss
self.orthogonal_reg_weight = orthogonal_reg_weight
self.orthogonal_reg_active_codes_only = orthogonal_reg_active_codes_only
self.orthogonal_reg_max_codes = orthogonal_reg_max_codes
assert not (ema_update and learnable_codebook), 'learnable codebook not compatible with EMA update'
assert 0 <= sync_update_v <= 1.
assert not (sync_update_v > 0. and not learnable_codebook), 'learnable codebook must be turned on'
self.sync_update_v = sync_update_v
codebook_class = EuclideanCodebook if not use_cosine_sim else CosineSimCodebook
gumbel_sample_fn = partial(
gumbel_sample,
stochastic = stochastic_sample_codes,
reinmax = reinmax,
straight_through = straight_through
)
if not exists(sync_codebook):
sync_codebook = distributed.is_initialized() and distributed.get_world_size() > 1
codebook_kwargs = dict(
dim = codebook_dim,
num_codebooks = heads if separate_codebook_per_head else 1,
codebook_size = codebook_size,
kmeans_init = kmeans_init,
kmeans_iters = kmeans_iters,
sync_kmeans = sync_kmeans,
decay = decay,
eps = eps,
threshold_ema_dead_code = threshold_ema_dead_code,
use_ddp = sync_codebook,
learnable_codebook = has_codebook_orthogonal_loss or learnable_codebook,
sample_codebook_temp = sample_codebook_temp,
gumbel_sample = gumbel_sample_fn,
ema_update = ema_update
)
if affine_param:
assert not use_cosine_sim, 'affine param is only compatible with euclidean codebook'
codebook_kwargs = dict(
**codebook_kwargs,
affine_param = True,
sync_affine_param = sync_affine_param,
affine_param_batch_decay = affine_param_batch_decay,
affine_param_codebook_decay = affine_param_codebook_decay,
)
self._codebook = codebook_class(**codebook_kwargs)
self.in_place_codebook_optimizer = in_place_codebook_optimizer(self._codebook.parameters()) if exists(in_place_codebook_optimizer) else None
self.codebook_size = codebook_size
self.accept_image_fmap = accept_image_fmap
self.channel_last = channel_last
@property
def codebook(self):
codebook = self._codebook.embed
if self.separate_codebook_per_head:
return codebook
return rearrange(codebook, '1 ... -> ...')
@codebook.setter
def codebook(self, codes):
if not self.separate_codebook_per_head:
codes = rearrange(codes, '... -> 1 ...')
self._codebook.embed.copy_(codes)
def get_codes_from_indices(self, indices):
codebook = self.codebook
is_multiheaded = codebook.ndim > 2
if not is_multiheaded:
codes = codebook[indices]
return rearrange(codes, '... h d -> ... (h d)')
indices, ps = pack_one(indices, 'b * h')
indices = rearrange(indices, 'b n h -> b h n')
indices = repeat(indices, 'b h n -> b h n d', d = codebook.shape[-1])
codebook = repeat(codebook, 'h n d -> b h n d', b = indices.shape[0])
codes = codebook.gather(2, indices)
codes = rearrange(codes, 'b h n d -> b n (h d)')
codes = unpack_one(codes, ps, 'b * d')
return codes
def forward(
self,
x,
indices = None,
mask = None,
sample_codebook_temp = None,
freeze_codebook = False
):
orig_input = x
only_one = x.ndim == 2
if only_one:
assert not exists(mask)
x = rearrange(x, 'b d -> b 1 d')
shape, device, heads, is_multiheaded, codebook_size, return_loss = x.shape, x.device, self.heads, self.heads > 1, self.codebook_size, exists(indices)
need_transpose = not self.channel_last and not self.accept_image_fmap
should_inplace_optimize = exists(self.in_place_codebook_optimizer)
# rearrange inputs
if self.accept_image_fmap:
height, width = x.shape[-2:]
x = rearrange(x, 'b c h w -> b (h w) c')
if need_transpose:
x = rearrange(x, 'b d n -> b n d')
# project input
x = self.project_in(x)
# handle multi-headed separate codebooks
if is_multiheaded:
ein_rhs_eq = 'h b n d' if self.separate_codebook_per_head else '1 (b h) n d'
x = rearrange(x, f'b n (h d) -> {ein_rhs_eq}', h = heads)
# l2norm for cosine sim, otherwise identity
x = self._codebook.transform_input(x)
# codebook forward kwargs
codebook_forward_kwargs = dict(
sample_codebook_temp = sample_codebook_temp,
mask = mask,
freeze_codebook = freeze_codebook
)
# quantize
quantize, embed_ind, distances = self._codebook(x, **codebook_forward_kwargs)
# one step in-place update
if should_inplace_optimize and self.training and not freeze_codebook:
if exists(mask):
loss = F.mse_loss(quantize, x.detach(), reduction = 'none')
loss_mask = mask
if is_multiheaded:
loss_mask = repeat(mask, 'b n -> c (b h) n', c = loss.shape[0], h = loss.shape[1] // mask.shape[0])
loss = loss[loss_mask].mean()
else:
loss = F.mse_loss(quantize, x.detach())
loss.backward()
self.in_place_codebook_optimizer.step()
self.in_place_codebook_optimizer.zero_grad()
# quantize again
quantize, embed_ind, distances = self._codebook(x, **codebook_forward_kwargs)
if self.training:
# determine code to use for commitment loss
maybe_detach = torch.detach if not self.learnable_codebook or freeze_codebook else identity
commit_quantize = maybe_detach(quantize)
# straight through
quantize = x + (quantize - x).detach()
if self.sync_update_v > 0.:
# (21) in https://minyoungg.github.io/vqtorch/assets/draft_050523.pdf
quantize = quantize + self.sync_update_v * (quantize - quantize.detach())
# function for calculating cross entropy loss to distance matrix
# used for (1) naturalspeech2 training residual vq latents to be close to the correct codes and (2) cross-entropy based commitment loss
def calculate_ce_loss(codes):
if not is_multiheaded:
dist_einops_eq = '1 b n l -> b l n'
elif self.separate_codebook_per_head:
dist_einops_eq = 'c b n l -> b l n c'
else:
dist_einops_eq = '1 (b h) n l -> b l n h'
ce_loss = F.cross_entropy(
rearrange(distances, dist_einops_eq, b = shape[0]),
codes,
ignore_index = -1
)
return ce_loss
# if returning cross entropy loss on codes that were passed in
if return_loss:
return quantize, calculate_ce_loss(indices)
# transform embedding indices
if is_multiheaded:
if self.separate_codebook_per_head:
embed_ind = rearrange(embed_ind, 'h b n -> b n h', h = heads)
else:
embed_ind = rearrange(embed_ind, '1 (b h) n -> b n h', h = heads)
if self.accept_image_fmap:
embed_ind = rearrange(embed_ind, 'b (h w) ... -> b h w ...', h = height, w = width)
if only_one:
embed_ind = rearrange(embed_ind, 'b 1 -> b')
# aggregate loss
loss = torch.tensor([0.], device = device, requires_grad = self.training)
if self.training:
if self.commitment_weight > 0:
if self.commitment_use_cross_entropy_loss:
if exists(mask):
ce_loss_mask = mask
if is_multiheaded:
ce_loss_mask = repeat(ce_loss_mask, 'b n -> b n h', h = heads)
embed_ind.masked_fill_(~ce_loss_mask, -1)
commit_loss = calculate_ce_loss(embed_ind)
else:
if exists(mask):
# with variable lengthed sequences
commit_loss = F.mse_loss(commit_quantize, x, reduction = 'none')
loss_mask = mask
if is_multiheaded:
loss_mask = repeat(loss_mask, 'b n -> c (b h) n', c = commit_loss.shape[0], h = commit_loss.shape[1] // mask.shape[0])
commit_loss = commit_loss[loss_mask].mean()
else:
commit_loss = F.mse_loss(commit_quantize, x)
loss = loss + commit_loss * self.commitment_weight
if self.has_codebook_orthogonal_loss:
codebook = self._codebook.embed
# only calculate orthogonal loss for the activated codes for this batch
if self.orthogonal_reg_active_codes_only:
assert not (is_multiheaded and self.separate_codebook_per_head), 'orthogonal regularization for only active codes not compatible with multi-headed with separate codebooks yet'
unique_code_ids = torch.unique(embed_ind)
codebook = codebook[:, unique_code_ids]
num_codes = codebook.shape[-2]
if exists(self.orthogonal_reg_max_codes) and num_codes > self.orthogonal_reg_max_codes:
rand_ids = torch.randperm(num_codes, device = device)[:self.orthogonal_reg_max_codes]
codebook = codebook[:, rand_ids]
orthogonal_reg_loss = orthogonal_loss_fn(codebook)
loss = loss + orthogonal_reg_loss * self.orthogonal_reg_weight
# handle multi-headed quantized embeddings
if is_multiheaded:
if self.separate_codebook_per_head:
quantize = rearrange(quantize, 'h b n d -> b n (h d)', h = heads)
else:
quantize = rearrange(quantize, '1 (b h) n d -> b n (h d)', h = heads)
# project out
quantize = self.project_out(quantize)
# rearrange quantized embeddings
if need_transpose:
quantize = rearrange(quantize, 'b n d -> b d n')
if self.accept_image_fmap:
quantize = rearrange(quantize, 'b (h w) c -> b c h w', h = height, w = width)
if only_one:
quantize = rearrange(quantize, 'b 1 d -> b d')
# if masking, only return quantized for where mask has True
if exists(mask):
quantize = torch.where(
rearrange(mask, '... -> ... 1'),
quantize,
orig_input
)
return quantize, embed_ind, loss
|
vector-quantize-pytorch-master
|
vector_quantize_pytorch/vector_quantize_pytorch.py
|
from math import ceil
from functools import partial
from itertools import zip_longest
from random import randrange
import torch
from torch import nn
import torch.nn.functional as F
from vector_quantize_pytorch.vector_quantize_pytorch import VectorQuantize
from einops import rearrange, repeat, pack, unpack
# helper functions
def exists(val):
return val is not None
def default(val, d):
return val if exists(val) else d
def round_up_multiple(num, mult):
return ceil(num / mult) * mult
# main class
class ResidualVQ(nn.Module):
""" Follows Algorithm 1. in https://arxiv.org/pdf/2107.03312.pdf """
def __init__(
self,
*,
dim,
num_quantizers,
codebook_dim = None,
shared_codebook = False,
heads = 1,
quantize_dropout = False,
quantize_dropout_cutoff_index = 0,
quantize_dropout_multiple_of = 1,
accept_image_fmap = False,
**kwargs
):
super().__init__()
assert heads == 1, 'residual vq is not compatible with multi-headed codes'
codebook_dim = default(codebook_dim, dim)
codebook_input_dim = codebook_dim * heads
requires_projection = codebook_input_dim != dim
self.project_in = nn.Linear(dim, codebook_input_dim) if requires_projection else nn.Identity()
self.project_out = nn.Linear(codebook_input_dim, dim) if requires_projection else nn.Identity()
self.num_quantizers = num_quantizers
self.accept_image_fmap = accept_image_fmap
self.layers = nn.ModuleList([VectorQuantize(dim = codebook_dim, codebook_dim = codebook_dim, accept_image_fmap = accept_image_fmap, **kwargs) for _ in range(num_quantizers)])
self.quantize_dropout = quantize_dropout and num_quantizers > 1
assert quantize_dropout_cutoff_index >= 0
self.quantize_dropout_cutoff_index = quantize_dropout_cutoff_index
self.quantize_dropout_multiple_of = quantize_dropout_multiple_of # encodec paper proposes structured dropout, believe this was set to 4
if not shared_codebook:
return
first_vq, *rest_vq = self.layers
codebook = first_vq._codebook
for vq in rest_vq:
vq._codebook = codebook
@property
def codebooks(self):
codebooks = [layer._codebook.embed for layer in self.layers]
codebooks = torch.stack(codebooks, dim = 0)
codebooks = rearrange(codebooks, 'q 1 c d -> q c d')
return codebooks
def get_codes_from_indices(self, indices):
batch, quantize_dim = indices.shape[0], indices.shape[-1]
# may also receive indices in the shape of 'b h w q' (accept_image_fmap)
indices, ps = pack([indices], 'b * q')
# because of quantize dropout, one can pass in indices that are coarse
# and the network should be able to reconstruct
if quantize_dim < self.num_quantizers:
assert self.quantize_dropout > 0., 'quantize dropout must be greater than 0 if you wish to reconstruct from a signal with less fine quantizations'
indices = F.pad(indices, (0, self.num_quantizers - quantize_dim), value = -1)
# get ready for gathering
codebooks = repeat(self.codebooks, 'q c d -> q b c d', b = batch)
gather_indices = repeat(indices, 'b n q -> q b n d', d = codebooks.shape[-1])
# take care of quantizer dropout
mask = gather_indices == -1.
gather_indices = gather_indices.masked_fill(mask, 0) # have it fetch a dummy code to be masked out later
all_codes = codebooks.gather(2, gather_indices) # gather all codes
# mask out any codes that were dropout-ed
all_codes = all_codes.masked_fill(mask, 0.)
# if (accept_image_fmap = True) then return shape (quantize, batch, height, width, dimension)
all_codes, = unpack(all_codes, ps, 'q b * d')
return all_codes
def forward(
self,
x,
indices = None,
return_all_codes = False,
sample_codebook_temp = None
):
num_quant, quant_dropout_multiple_of, return_loss, device = self.num_quantizers, self.quantize_dropout_multiple_of, exists(indices), x.device
x = self.project_in(x)
assert not (self.accept_image_fmap and exists(indices))
quantized_out = 0.
residual = x
all_losses = []
all_indices = []
if return_loss:
assert not torch.any(indices == -1), 'some of the residual vq indices were dropped out. please use indices derived when the module is in eval mode to derive cross entropy loss'
ce_losses = []
should_quantize_dropout = self.training and self.quantize_dropout and not return_loss
# sample a layer index at which to dropout further residual quantization
# also prepare null indices and loss
if should_quantize_dropout:
rand_quantize_dropout_index = randrange(self.quantize_dropout_cutoff_index, num_quant)
if quant_dropout_multiple_of != 1:
rand_quantize_dropout_index = round_up_multiple(rand_quantize_dropout_index + 1, quant_dropout_multiple_of) - 1
null_indices_shape = (x.shape[0], *x.shape[-2:]) if self.accept_image_fmap else tuple(x.shape[:2])
null_indices = torch.full(null_indices_shape, -1., device = device, dtype = torch.long)
null_loss = torch.full((1,), 0., device = device, dtype = x.dtype)
# go through the layers
for quantizer_index, layer in enumerate(self.layers):
if should_quantize_dropout and quantizer_index > rand_quantize_dropout_index:
all_indices.append(null_indices)
all_losses.append(null_loss)
continue
layer_indices = None
if return_loss:
layer_indices = indices[..., quantizer_index]
quantized, *rest = layer(residual, indices = layer_indices, sample_codebook_temp = sample_codebook_temp)
residual = residual - quantized.detach()
quantized_out = quantized_out + quantized
if return_loss:
ce_loss = rest[0]
ce_losses.append(ce_loss)
continue
embed_indices, loss = rest
all_indices.append(embed_indices)
all_losses.append(loss)
# project out, if needed
quantized_out = self.project_out(quantized_out)
# whether to early return the cross entropy loss
if return_loss:
return quantized_out, sum(ce_losses)
# stack all losses and indices
all_losses, all_indices = map(partial(torch.stack, dim = -1), (all_losses, all_indices))
ret = (quantized_out, all_indices, all_losses)
if return_all_codes:
# whether to return all codes from all codebooks across layers
all_codes = self.get_codes_from_indices(all_indices)
# will return all codes in shape (quantizer, batch, sequence length, codebook dimension)
ret = (*ret, all_codes)
return ret
# grouped residual vq
class GroupedResidualVQ(nn.Module):
def __init__(
self,
*,
dim,
groups = 1,
accept_image_fmap = False,
**kwargs
):
super().__init__()
self.dim = dim
self.groups = groups
assert (dim % groups) == 0
dim_per_group = dim // groups
self.accept_image_fmap = accept_image_fmap
self.rvqs = nn.ModuleList([])
for _ in range(groups):
self.rvqs.append(ResidualVQ(
dim = dim_per_group,
accept_image_fmap = accept_image_fmap,
**kwargs
))
@property
def codebooks(self):
return torch.stack(tuple(rvq.codebooks for rvq in self.rvqs))
def get_codes_from_indices(self, indices):
codes = tuple(rvq.get_codes_from_indices(chunk_indices) for rvq, chunk_indices in zip(self.rvqs, indices))
return torch.stack(codes)
def forward(
self,
x,
indices = None,
return_all_codes = False,
sample_codebook_temp = None
):
shape = x.shape
split_dim = 1 if self.accept_image_fmap else -1
assert shape[split_dim] == self.dim
# split the feature dimension into groups
x = x.chunk(self.groups, dim = split_dim)
indices = default(indices, tuple())
return_ce_loss = len(indices) > 0
assert len(indices) == 0 or len(indices) == self.groups
forward_kwargs = dict(
return_all_codes = return_all_codes,
sample_codebook_temp = sample_codebook_temp
)
# invoke residual vq on each group
out = tuple(rvq(chunk, indices = chunk_indices, **forward_kwargs) for rvq, chunk, chunk_indices in zip_longest(self.rvqs, x, indices))
out = tuple(zip(*out))
# if returning cross entropy loss to rvq codebooks
if return_ce_loss:
quantized, ce_losses = out
return torch.cat(quantized, dim = split_dim), sum(ce_losses)
# otherwise, get all the zipped outputs and combine them
quantized, all_indices, commit_losses, *maybe_all_codes = out
quantized = torch.cat(quantized, dim = split_dim)
all_indices = torch.stack(all_indices)
commit_losses = torch.stack(commit_losses)
ret = (quantized, all_indices, commit_losses, *maybe_all_codes)
return ret
|
vector-quantize-pytorch-master
|
vector_quantize_pytorch/residual_vq.py
|
# FashionMnist VQ experiment with various settings.
# From https://github.com/minyoungg/vqtorch/blob/main/examples/autoencoder.py
from tqdm.auto import trange
import torch
import torch.nn as nn
from torchvision import datasets, transforms
from torch.utils.data import DataLoader
from vector_quantize_pytorch import VectorQuantize
lr = 3e-4
train_iter = 1000
num_codes = 256
seed = 1234
device = "cuda" if torch.cuda.is_available() else "cpu"
class SimpleVQAutoEncoder(nn.Module):
def __init__(self, **vq_kwargs):
super().__init__()
self.layers = nn.ModuleList(
[
nn.Conv2d(1, 16, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2),
nn.GELU(),
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1),
nn.MaxPool2d(kernel_size=2, stride=2),
VectorQuantize(dim=32, **vq_kwargs),
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(32, 16, kernel_size=3, stride=1, padding=1),
nn.GELU(),
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Conv2d(16, 1, kernel_size=3, stride=1, padding=1),
]
)
return
def forward(self, x):
for layer in self.layers:
if isinstance(layer, VectorQuantize):
x_shape = x.shape[:-1]
x_flat = x.view(x.size(0), -1, x.size(1))
x_flat, indices, commit_loss = layer(x_flat)
x = x_flat.view(*x_shape, -1)
else:
x = layer(x)
return x.clamp(-1, 1), indices, commit_loss
def train(model, train_loader, train_iterations=1000, alpha=10):
def iterate_dataset(data_loader):
data_iter = iter(data_loader)
while True:
try:
x, y = next(data_iter)
except StopIteration:
data_iter = iter(data_loader)
x, y = next(data_iter)
yield x.to(device), y.to(device)
for _ in (pbar := trange(train_iterations)):
opt.zero_grad()
x, _ = next(iterate_dataset(train_loader))
out, indices, cmt_loss = model(x)
rec_loss = (out - x).abs().mean()
(rec_loss + alpha * cmt_loss).backward()
opt.step()
pbar.set_description(
f"rec loss: {rec_loss.item():.3f} | "
+ f"cmt loss: {cmt_loss.item():.3f} | "
+ f"active %: {indices.unique().numel() / num_codes * 100:.3f}"
)
return
transform = transforms.Compose(
[transforms.ToTensor(), transforms.Normalize((0.5,), (0.5,))]
)
train_dataset = DataLoader(
datasets.FashionMNIST(
root="~/data/fashion_mnist", train=True, download=True, transform=transform
),
batch_size=256,
shuffle=True,
)
print("baseline")
torch.random.manual_seed(seed)
model = SimpleVQAutoEncoder(codebook_size=num_codes).to(device)
opt = torch.optim.AdamW(model.parameters(), lr=lr)
train(model, train_dataset, train_iterations=train_iter)
|
vector-quantize-pytorch-master
|
examples/autoencoder.py
|
from setuptools import setup, find_packages
setup(
name = 'coco-lm-pytorch',
packages = find_packages(),
version = '0.0.2',
license='MIT',
description = 'COCO - Pytorch',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/coco-lm-pytorch',
keywords = [
'transformers',
'artificial intelligence',
'deep learning',
'pretraining'
],
install_requires=[
'torch>=1.6.0',
'einops',
'x-transformers'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.7',
],
)
|
coco-lm-pytorch-main
|
setup.py
|
from coco_lm_pytorch.coco_lm_pytorch import COCO
|
coco-lm-pytorch-main
|
coco_lm_pytorch/__init__.py
|
import math
from functools import reduce
import torch
from torch import nn, einsum
import torch.nn.functional as F
# helpers
def log(t, eps=1e-9):
return torch.log(t + eps)
def norm(t):
return F.normalize(t, p = 2, dim = -1)
def gumbel_noise(t):
noise = torch.zeros_like(t).uniform_(0, 1)
return -log(-log(noise))
def gumbel_sample(t, temperature = 1.):
return ((t / temperature) + gumbel_noise(t)).argmax(dim=-1)
def prob_mask_like(t, prob):
return torch.zeros_like(t).float().uniform_(0, 1) < prob
def mask_with_tokens(t, token_ids):
init_no_mask = torch.full_like(t, False, dtype=torch.bool)
mask = reduce(lambda acc, el: acc | (t == el), token_ids, init_no_mask)
return mask
def get_mask_subset_with_prob(mask, prob):
batch, seq_len, device = *mask.shape, mask.device
max_masked = math.ceil(prob * seq_len)
num_tokens = mask.sum(dim=-1, keepdim=True)
mask_excess = (mask.cumsum(dim=-1) > (num_tokens * prob).ceil())
mask_excess = mask_excess[:, :max_masked]
rand = torch.rand((batch, seq_len), device=device).masked_fill(~mask, -1e9)
_, sampled_indices = rand.topk(max_masked, dim=-1)
sampled_indices = (sampled_indices + 1).masked_fill_(mask_excess, 0)
new_mask = torch.zeros((batch, seq_len + 1), device=device)
new_mask.scatter_(-1, sampled_indices, 1)
return new_mask[:, 1:].bool()
# hidden layer extractor class, for magically adding adapter to language model to be pretrained
class HiddenLayerExtractor(nn.Module):
def __init__(self, net, layer = -2):
super().__init__()
self.net = net
self.layer = layer
self.hidden = None
self.hook_registered = False
def _find_layer(self):
if type(self.layer) == str:
modules = dict([*self.net.named_modules()])
return modules.get(self.layer, None)
elif type(self.layer) == int:
children = [*self.net.children()]
return children[self.layer]
return None
def _hook(self, _, __, output):
self.hidden = output
def _register_hook(self):
layer = self._find_layer()
assert layer is not None, f'hidden layer ({self.layer}) not found'
handle = layer.register_forward_hook(self._hook)
self.hook_registered = True
def forward(self, x):
if self.layer == -1:
return self.net(x)
if not self.hook_registered:
self._register_hook()
_ = self.net(x)
hidden = self.hidden
self.hidden = None
assert hidden is not None, f'hidden layer {self.layer} never emitted an output'
return hidden
# main electra class
class COCO(nn.Module):
def __init__(
self,
generator,
discriminator,
*,
discr_dim,
num_tokens = None,
discr_layer = -1,
mask_prob = 0.15,
replace_prob = 0.85,
random_token_prob = 0.,
pad_token_id = 0,
cls_token_id = 1,
mask_token_id = 2,
mask_ignore_token_ids = [],
disc_weight = 50.,
gen_weight = 1.,
cl_weight = 1.,
temperature = 1.,
crop_percentage = 0.5
):
super().__init__()
self.generator = generator
self.discriminator = discriminator
self.discriminator = HiddenLayerExtractor(discriminator, layer = discr_layer)
self.to_correction_logits = nn.Linear(discr_dim, 1)
# mlm related probabilities
self.mask_prob = mask_prob
self.replace_prob = replace_prob
self.num_tokens = num_tokens
self.random_token_prob = random_token_prob
# token ids
self.cls_token_id = cls_token_id
self.pad_token_id = pad_token_id
self.mask_token_id = mask_token_id
self.mask_ignore_token_ids = set([*mask_ignore_token_ids, pad_token_id, cls_token_id])
# sampling temperature
self.temperature = temperature
# loss weights
self.disc_weight = disc_weight
self.gen_weight = gen_weight
self.cl_weight = cl_weight
self.cl_temperature = nn.Parameter(torch.tensor(1.))
self.crop_percentage = crop_percentage
def forward(self, input, **kwargs):
b, t, device = *input.shape, input.device
assert b > 1, 'batch size need to be bigger than 1 for contrastive learning'
cls_tokens = torch.empty(b, 1, dtype = torch.long).fill_(self.cls_token_id)
input = torch.cat((cls_tokens, input), dim = 1)
input = input[:, :-1]
replace_prob = prob_mask_like(input, self.replace_prob)
# do not mask [pad] tokens, or any other tokens in the tokens designated to be excluded ([cls], [sep])
# also do not include these special tokens in the tokens chosen at random
no_mask = mask_with_tokens(input, self.mask_ignore_token_ids)
mask = get_mask_subset_with_prob(~no_mask, self.mask_prob)
# get random cropped input for contrastive learning
random_crop = get_mask_subset_with_prob(~no_mask, self.crop_percentage)
crop_length = int(t * self.crop_percentage)
cropped_input = input.masked_select(random_crop).reshape(b, crop_length)
cropped_input = torch.cat((cls_tokens, cropped_input), dim = 1)
cropped_input = F.pad(cropped_input, (0, t - crop_length - 1), value = self.pad_token_id)
# get mask indices
mask_indices = torch.nonzero(mask, as_tuple=True)
# mask input with mask tokens with probability of `replace_prob` (keep tokens the same with probability 1 - replace_prob)
masked_input = input.clone().detach()
# if random token probability > 0 for mlm
if self.random_token_prob > 0:
assert self.num_tokens is not None, 'Number of tokens (num_tokens) must be passed to Electra for randomizing tokens during masked language modeling'
random_token_prob = prob_mask_like(input, self.random_token_prob)
random_tokens = torch.randint(0, self.num_tokens, input.shape, device=input.device)
random_no_mask = mask_with_tokens(random_tokens, self.mask_ignore_token_ids)
random_token_prob &= ~random_no_mask
random_indices = torch.nonzero(random_token_prob, as_tuple=True)
masked_input[random_indices] = random_tokens[random_indices]
# [mask] input
masked_input = masked_input.masked_fill(mask * replace_prob, self.mask_token_id)
# set inverse of mask to padding tokens for labels
gen_labels = input.masked_fill(~mask, self.pad_token_id)
# get generator output and get mlm loss
logits = self.generator(masked_input, **kwargs)
mlm_loss = F.cross_entropy(
logits.transpose(1, 2),
gen_labels,
ignore_index = self.pad_token_id
)
# use mask from before to select logits that need sampling
sample_logits = logits[mask_indices]
# sample
sampled = gumbel_sample(sample_logits, temperature = self.temperature)
# scatter the sampled values back to the input
disc_input = input.clone()
disc_input[mask_indices] = sampled.detach()
# generate discriminator labels, with replaced as True and original as False
disc_labels = (input != disc_input).float().detach()
# get discriminator predictions of replaced / original
non_padded_indices = torch.nonzero(input != self.pad_token_id, as_tuple=True)
# get discriminator output and binary cross entropy loss
disc_embeddings_correction = self.discriminator(disc_input, **kwargs)
correction_logits = self.to_correction_logits(disc_embeddings_correction)
disc_logits = correction_logits.reshape_as(disc_labels)
disc_loss = F.binary_cross_entropy_with_logits(
disc_logits[non_padded_indices],
disc_labels[non_padded_indices]
)
# contrastive loss
disc_embeddings_cropped = self.discriminator(cropped_input, **kwargs)
cls_tokens_corrected, cls_tokens_cropped = disc_embeddings_correction[:, 0], disc_embeddings_cropped[:, 0]
cls_tokens_corrected, cls_tokens_cropped = map(norm, (cls_tokens_corrected, cls_tokens_cropped))
cl_temperature = self.cl_temperature.exp()
sim = einsum('i d, j d -> i j', cls_tokens_corrected, cls_tokens_cropped) * cl_temperature
labels = torch.arange(b, device = device)
cl_loss = (F.cross_entropy(sim, labels) + F.cross_entropy(sim.t(), labels)) * 0.5
# weight all losses
weighted_loss = self.cl_weight * cl_loss + self.gen_weight * mlm_loss + self.disc_weight * disc_loss
return weighted_loss
|
coco-lm-pytorch-main
|
coco_lm_pytorch/coco_lm_pytorch.py
|
from setuptools import setup, find_packages
setup(
name = 'siren-pytorch',
packages = find_packages(),
version = '0.1.7',
license='MIT',
description = 'Implicit Neural Representations with Periodic Activation Functions',
long_description_content_type = 'text/markdown',
author = 'Phil Wang',
author_email = 'lucidrains@gmail.com',
url = 'https://github.com/lucidrains/siren-pytorch',
keywords = ['artificial intelligence', 'deep learning'],
install_requires=[
'einops',
'torch'
],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.6',
],
)
|
siren-pytorch-master
|
setup.py
|
from siren_pytorch.siren_pytorch import Sine, Siren, SirenNet, SirenWrapper
|
siren-pytorch-master
|
siren_pytorch/__init__.py
|
import math
import torch
from torch import nn
import torch.nn.functional as F
from einops import rearrange
# helpers
def exists(val):
return val is not None
def cast_tuple(val, repeat = 1):
return val if isinstance(val, tuple) else ((val,) * repeat)
# sin activation
class Sine(nn.Module):
def __init__(self, w0 = 1.):
super().__init__()
self.w0 = w0
def forward(self, x):
return torch.sin(self.w0 * x)
# siren layer
class Siren(nn.Module):
def __init__(
self,
dim_in,
dim_out,
w0 = 1.,
c = 6.,
is_first = False,
use_bias = True,
activation = None,
dropout = 0.
):
super().__init__()
self.dim_in = dim_in
self.is_first = is_first
weight = torch.zeros(dim_out, dim_in)
bias = torch.zeros(dim_out) if use_bias else None
self.init_(weight, bias, c = c, w0 = w0)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias) if use_bias else None
self.activation = Sine(w0) if activation is None else activation
self.dropout = nn.Dropout(dropout)
def init_(self, weight, bias, c, w0):
dim = self.dim_in
w_std = (1 / dim) if self.is_first else (math.sqrt(c / dim) / w0)
weight.uniform_(-w_std, w_std)
if exists(bias):
bias.uniform_(-w_std, w_std)
def forward(self, x):
out = F.linear(x, self.weight, self.bias)
out = self.activation(out)
out = self.dropout(out)
return out
# siren network
class SirenNet(nn.Module):
def __init__(
self,
dim_in,
dim_hidden,
dim_out,
num_layers,
w0 = 1.,
w0_initial = 30.,
use_bias = True,
final_activation = None,
dropout = 0.
):
super().__init__()
self.num_layers = num_layers
self.dim_hidden = dim_hidden
self.layers = nn.ModuleList([])
for ind in range(num_layers):
is_first = ind == 0
layer_w0 = w0_initial if is_first else w0
layer_dim_in = dim_in if is_first else dim_hidden
layer = Siren(
dim_in = layer_dim_in,
dim_out = dim_hidden,
w0 = layer_w0,
use_bias = use_bias,
is_first = is_first,
dropout = dropout
)
self.layers.append(layer)
final_activation = nn.Identity() if not exists(final_activation) else final_activation
self.last_layer = Siren(dim_in = dim_hidden, dim_out = dim_out, w0 = w0, use_bias = use_bias, activation = final_activation)
def forward(self, x, mods = None):
mods = cast_tuple(mods, self.num_layers)
for layer, mod in zip(self.layers, mods):
x = layer(x)
if exists(mod):
x *= rearrange(mod, 'd -> () d')
return self.last_layer(x)
# modulatory feed forward
class Modulator(nn.Module):
def __init__(self, dim_in, dim_hidden, num_layers):
super().__init__()
self.layers = nn.ModuleList([])
for ind in range(num_layers):
is_first = ind == 0
dim = dim_in if is_first else (dim_hidden + dim_in)
self.layers.append(nn.Sequential(
nn.Linear(dim, dim_hidden),
nn.ReLU()
))
def forward(self, z):
x = z
hiddens = []
for layer in self.layers:
x = layer(x)
hiddens.append(x)
x = torch.cat((x, z))
return tuple(hiddens)
# wrapper
class SirenWrapper(nn.Module):
def __init__(self, net, image_width, image_height, latent_dim = None):
super().__init__()
assert isinstance(net, SirenNet), 'SirenWrapper must receive a Siren network'
self.net = net
self.image_width = image_width
self.image_height = image_height
self.modulator = None
if exists(latent_dim):
self.modulator = Modulator(
dim_in = latent_dim,
dim_hidden = net.dim_hidden,
num_layers = net.num_layers
)
tensors = [torch.linspace(-1, 1, steps = image_height), torch.linspace(-1, 1, steps = image_width)]
mgrid = torch.stack(torch.meshgrid(*tensors, indexing = 'ij'), dim=-1)
mgrid = rearrange(mgrid, 'h w c -> (h w) c')
self.register_buffer('grid', mgrid)
def forward(self, img = None, *, latent = None):
modulate = exists(self.modulator)
assert not (modulate ^ exists(latent)), 'latent vector must be only supplied if `latent_dim` was passed in on instantiation'
mods = self.modulator(latent) if modulate else None
coords = self.grid.clone().detach().requires_grad_()
out = self.net(coords, mods)
out = rearrange(out, '(h w) c -> () c h w', h = self.image_height, w = self.image_width)
if exists(img):
return F.mse_loss(img, out)
return out
|
siren-pytorch-master
|
siren_pytorch/siren_pytorch.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
global __version__
__version__ = None
with open('jax/version.py') as f:
exec(f.read(), globals())
setup(
name='jax',
version=__version__,
description='Differentiate, compile, and transform Numpy code.',
author='JAX team',
author_email='jax-dev@google.com',
packages=find_packages(exclude=["examples"]),
install_requires=[
'numpy>=1.12', 'six', 'absl-py', 'opt_einsum', 'fastcache'
],
url='https://github.com/google/jax',
license='Apache-2.0',
)
|
jax-master
|
setup.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import scipy.special
import scipy.stats
from jax import api
from jax import lax
from jax import numpy as np
from jax import random
from jax import test_util as jtu
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class LaxRandomTest(jtu.JaxTestCase):
def _CheckCollisions(self, samples, nbits):
fail_prob = 0.01 # conservative bound on statistical fail prob by Chebyshev
nitems = len(samples)
nbins = 2 ** nbits
nexpected = nbins * (1 - ((nbins - 1) / nbins) ** nitems)
ncollisions = len(onp.unique(samples))
sq_percent_deviation = ((ncollisions - nexpected) / nexpected) ** 2
self.assertLess(sq_percent_deviation, 1 / onp.sqrt(nexpected * fail_prob))
def _CheckKolmogorovSmirnovCDF(self, samples, cdf):
fail_prob = 0.01 # conservative bound on statistical fail prob by Kolmo CDF
self.assertGreater(scipy.stats.kstest(samples, cdf).pvalue, fail_prob)
def _CheckChiSquared(self, samples, pmf):
alpha = 0.01 # significance level, threshold for p-value
values, actual_freq = onp.unique(samples, return_counts=True)
expected_freq = pmf(values) * len(values)
_, p_value = scipy.stats.chisquare(actual_freq, expected_freq)
self.assertLess(p_value, alpha)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testNumpyAndXLAAgreeOnFloatEndianness(self, dtype):
if not FLAGS.jax_enable_x64 and np.issubdtype(dtype, onp.float64):
raise SkipTest("can't test float64 agreement")
bits_dtype = onp.uint32 if np.finfo(dtype).bits == 32 else onp.uint64
numpy_bits = onp.array(1., dtype).view(bits_dtype)
xla_bits = api.jit(
lambda: lax.bitcast_convert_type(onp.array(1., dtype), bits_dtype))()
self.assertEqual(numpy_bits, xla_bits)
def testThreefry2x32(self):
# We test the hash by comparing to known values provided in the test code of
# the original reference implementation of Threefry. For the values, see
# https://github.com/DEShawResearch/Random123-Boost/blob/65e3d874b67aa7b3e02d5ad8306462f52d2079c0/libs/random/test/test_threefry.cpp#L30-L32
def result_to_hex(result):
return tuple([hex(x.copy()).rstrip("L") for x in result])
expected = ("0x6b200159", "0x99ba4efe")
result = random.threefry_2x32(onp.uint32([0, 0]), onp.uint32([0, 0]))
self.assertEqual(expected, result_to_hex(result))
expected = ("0x1cb996fc", "0xbb002be7")
result = random.threefry_2x32(onp.uint32([-1, -1]), onp.uint32([-1, -1]))
self.assertEqual(expected, result_to_hex(result))
expected = ("0xc4923a9c", "0x483df7a0")
result = random.threefry_2x32(
onp.uint32([0x13198a2e, 0x03707344]),
onp.uint32([0x243f6a88, 0x85a308d3]))
self.assertEqual(expected, result_to_hex(result))
def testThreefry2x32Large(self):
n = 10000000
result = random.threefry_2x32(
(onp.uint32(0x13198a2e), onp.uint32(0x03707344)),
np.concatenate([
np.full((n,), 0x243f6a88, np.uint32),
np.full((n,), 0x85a308d3, np.uint32)
]))
onp.testing.assert_equal(result[:n], onp.full((n,), 0xc4923a9c, dtype=onp.uint32))
onp.testing.assert_equal(result[n:], onp.full((n,), 0x483df7a0, dtype=onp.uint32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testRngUniform(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.uniform(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckCollisions(samples, np.finfo(dtype).nmant)
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.uniform().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.int32, onp.int64]))
def testRngRandint(self, dtype):
lo = 5
hi = 10
key = random.PRNGKey(0)
rand = lambda key: random.randint(key, (10000,), lo, hi, dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self.assertTrue(onp.all(lo <= samples))
self.assertTrue(onp.all(samples < hi))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testNormal(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.normal(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.norm().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64, onp.int32, onp.int64]))
def testShuffle(self, dtype):
key = random.PRNGKey(0)
x = onp.arange(100).astype(dtype)
rand = lambda key: random.shuffle(key, x)
crand = api.jit(rand)
perm1 = rand(key)
perm2 = crand(key)
self.assertTrue(onp.all(perm1 == perm2))
self.assertTrue(onp.all(perm1.dtype == perm2.dtype))
self.assertFalse(onp.all(perm1 == x)) # seems unlikely!
self.assertTrue(onp.all(onp.sort(perm1) == x))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_p={}_{}".format(p, dtype),
"p": p, "dtype": onp.dtype(dtype).name}
for p in [0.1, 0.5, 0.9]
for dtype in [onp.float32, onp.float64]))
def testBernoulli(self, p, dtype):
key = random.PRNGKey(0)
p = onp.array(p, dtype=dtype)
rand = lambda key, p: random.bernoulli(key, p, (10000,))
crand = api.jit(rand)
uncompiled_samples = rand(key, p)
compiled_samples = crand(key, p)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckChiSquared(samples, scipy.stats.bernoulli(p).pmf)
def testBernoulliShape(self):
key = random.PRNGKey(0)
x = random.bernoulli(key, onp.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_b={}_{}".format(a, b, dtype),
"a": a, "b": b, "dtype": onp.dtype(dtype).name}
for a in [0.2, 5.]
for b in [0.2, 5.]
for dtype in [onp.float32, onp.float64]))
# TODO(phawkins): slow compilation times on cpu and tpu.
# TODO(mattjj): test fails after https://github.com/google/jax/pull/1123
@jtu.skip_on_devices("cpu", "gpu", "tpu")
def testBeta(self, a, b, dtype):
key = random.PRNGKey(0)
rand = lambda key, a, b: random.beta(key, a, b, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, a, b)
compiled_samples = crand(key, a, b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.beta(a, b).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testCauchy(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.cauchy(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.cauchy().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_alpha={}_{}".format(alpha, dtype),
"alpha": alpha, "dtype": onp.dtype(dtype).name}
for alpha in [
onp.array([0.2, 1., 5.]),
]
for dtype in [onp.float32, onp.float64]))
def testDirichlet(self, alpha, dtype):
key = random.PRNGKey(0)
rand = lambda key, alpha: random.dirichlet(key, alpha, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, alpha)
compiled_samples = crand(key, alpha)
for samples in [uncompiled_samples, compiled_samples]:
self.assertAllClose(samples.sum(-1), onp.ones(10000, dtype=dtype), check_dtypes=True)
alpha_sum = sum(alpha)
for i, a in enumerate(alpha):
self._CheckKolmogorovSmirnovCDF(samples[..., i], scipy.stats.beta(a, alpha_sum - a).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testExponential(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.exponential(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.expon().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}_{}".format(a, dtype),
"a": a, "dtype": onp.dtype(dtype).name}
for a in [0.1, 1., 10.]
for dtype in [onp.float32, onp.float64]))
def testGamma(self, a, dtype):
key = random.PRNGKey(0)
rand = lambda key, a: random.gamma(key, a, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, a)
compiled_samples = crand(key, a)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gamma(a).cdf)
def testGammaShape(self):
key = random.PRNGKey(0)
x = random.gamma(key, onp.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_a={}".format(alpha), "alpha": alpha}
for alpha in [1e-4, 1e-3, 1e-2, 1e-1, 1e0, 1e1, 1e2, 1e3, 1e4]))
def testGammaGrad(self, alpha):
rng = random.PRNGKey(0)
alphas = onp.full((100,), alpha)
z = random.gamma(rng, alphas)
actual_grad = api.grad(lambda x: random.gamma(rng, x).sum())(alphas)
eps = 0.01 * alpha / (1.0 + onp.sqrt(alpha))
cdf_dot = (scipy.stats.gamma.cdf(z, alpha + eps)
- scipy.stats.gamma.cdf(z, alpha - eps)) / (2 * eps)
pdf = scipy.stats.gamma.pdf(z, alpha)
expected_grad = -cdf_dot / pdf
self.assertAllClose(actual_grad, expected_grad, check_dtypes=True,
rtol=2e-2 if jtu.device_under_test() == "tpu" else 5e-4)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testGumbel(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.gumbel(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.gumbel_r().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testLaplace(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.laplace(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.laplace().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(dtype), "dtype": onp.dtype(dtype).name}
for dtype in [onp.float32, onp.float64]))
def testLogistic(self, dtype):
key = random.PRNGKey(0)
rand = lambda key: random.logistic(key, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key)
compiled_samples = crand(key)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.logistic().cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_b={}_{}".format(b, dtype),
"b": b, "dtype": onp.dtype(dtype).name}
for b in [0.1, 1., 10.]
for dtype in [onp.float32, onp.float64]))
def testPareto(self, b, dtype):
key = random.PRNGKey(0)
rand = lambda key, b: random.pareto(key, b, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, b)
compiled_samples = crand(key, b)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.pareto(b).cdf)
def testParetoShape(self):
key = random.PRNGKey(0)
x = random.pareto(key, onp.array([0.2, 0.3]), shape=(3, 2))
assert x.shape == (3, 2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_df={}_{}".format(df, dtype),
"df": df, "dtype": onp.dtype(dtype).name}
for df in [0.1, 1., 10.]
for dtype in [onp.float32, onp.float64]))
@jtu.skip_on_devices("cpu", "tpu") # TODO(phawkins): slow compilation times
def testT(self, df, dtype):
key = random.PRNGKey(0)
rand = lambda key, df: random.t(key, df, (10000,), dtype)
crand = api.jit(rand)
uncompiled_samples = rand(key, df)
compiled_samples = crand(key, df)
for samples in [uncompiled_samples, compiled_samples]:
self._CheckKolmogorovSmirnovCDF(samples, scipy.stats.t(df).cdf)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}D_{}".format(dim, onp.dtype(dtype).name),
"dim": dim, "dtype": dtype}
for dim in [1, 3, 5]
for dtype in [onp.float32, onp.float64]))
def testMultivariateNormal(self, dim, dtype):
r = onp.random.RandomState(dim)
mean = r.randn(dim)
cov_factor = r.randn(dim, dim)
cov = onp.dot(cov_factor, cov_factor.T) + dim * onp.eye(dim)
key = random.PRNGKey(0)
rand = partial(random.multivariate_normal, mean=mean, cov=cov,
shape=(10000,))
crand = api.jit(rand)
uncompiled_samples = onp.asarray(rand(key), onp.float64)
compiled_samples = onp.asarray(crand(key), onp.float64)
inv_scale = scipy.linalg.lapack.dtrtri(onp.linalg.cholesky(cov), lower=True)[0]
for samples in [uncompiled_samples, compiled_samples]:
centered = samples - mean
whitened = onp.einsum('nj,ij->ni', centered, inv_scale)
# This is a quick-and-dirty multivariate normality check that tests that a
# uniform mixture of the marginals along the covariance matrix's
# eigenvectors follow a standard normal distribution.
self._CheckKolmogorovSmirnovCDF(whitened.ravel(), scipy.stats.norm().cdf)
def testMultivariateNormalCovariance(self):
# test code based on https://github.com/google/jax/issues/1869
N = 100000
cov = np.array([[ 0.19, 0.00, -0.13, 0.00],
[ 0.00, 0.29, 0.00, -0.23],
[ -0.13, 0.00, 0.39, 0.00],
[ 0.00, -0.23, 0.00, 0.49]])
mean = np.zeros(4)
out_onp = onp.random.RandomState(0).multivariate_normal(mean, cov, N)
key = random.PRNGKey(0)
out_jnp = random.multivariate_normal(key, mean=mean, cov=cov, shape=(N,))
var_onp = out_onp.var(axis=0)
var_jnp = out_jnp.var(axis=0)
self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
var_onp = onp.cov(out_onp, rowvar=False)
var_jnp = onp.cov(out_jnp, rowvar=False)
self.assertAllClose(var_onp, var_jnp, rtol=1e-2, atol=1e-2,
check_dtypes=False)
def testIssue222(self):
x = random.randint(random.PRNGKey(10003), (), 0, 0)
assert x == 0
def testFoldIn(self):
key = random.PRNGKey(0)
keys = [random.fold_in(key, i) for i in range(10)]
assert onp.unique(onp.ravel(keys)).shape == (20,)
def testStaticShapeErrors(self):
if config.read("jax_disable_jit"):
raise SkipTest("test only relevant when jit enabled")
@api.jit
def feature_map(n, d, sigma=1.0, seed=123):
key = random.PRNGKey(seed)
W = random.normal(key, (d, n)) / sigma
w = random.normal(key, (d, )) / sigma
b = 2 * np.pi * random.uniform(key, (d, ))
phi = lambda x, t: np.sqrt(2.0 / d) * np.cos(np.matmul(W, x) + w*t + b)
return phi
self.assertRaisesRegex(ValueError, '.*requires a concrete.*',
lambda: feature_map(5, 3))
def testIssue756(self):
key = random.PRNGKey(0)
w = random.normal(key, ())
if FLAGS.jax_enable_x64:
self.assertEqual(onp.result_type(w), onp.float64)
else:
self.assertEqual(onp.result_type(w), onp.float32)
def testNoOpByOpUnderHash(self):
def fail(*args, **kwargs): assert False
apply_primitive, xla.apply_primitive = xla.apply_primitive, fail
try:
out = random.threefry_2x32(onp.zeros(2, onp.uint32), onp.arange(10, dtype=onp.uint32))
finally:
xla.apply_primitive = apply_primitive
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/random_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import unittest
from unittest import SkipTest
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as np
from jax import test_util as jtu
from jax import lax
from jax.api import _papply, _parallelize, soft_pmap, jit, make_jaxpr
from jax.linear_util import wrap_init
from jax.util import prod
from jax.config import config
config.parse_flags_with_absl()
class PapplyTest(jtu.JaxTestCase):
def testIdentity(self):
pfun, axis_name = _papply(lambda x: x)
ans = pfun(onp.arange(3))
expected = onp.arange(3)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMap(self):
pfun, axis_name = _papply(np.sin)
ans = pfun(onp.arange(3.))
expected = onp.sin(onp.arange(3.))
self.assertAllClose(ans, expected, check_dtypes=False)
def testSum(self):
pfun, axis_name = _papply(lambda x: np.sum(x, axis=0))
jaxpr = make_jaxpr(pfun)(onp.ones(3))
expected_jaxpr = make_jaxpr(
lambda x: lax.psum(x, axis_name))(onp.zeros((5, 3)))
assert repr(jaxpr) == repr(expected_jaxpr)
arg = onp.arange(15.).reshape((5, 3))
ans = soft_pmap(pfun, axis_name)(arg)[0]
expected = onp.sum(arg, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMax(self):
pfun, axis_name = _papply(lambda x: np.max(x, axis=0))
jaxpr = make_jaxpr(pfun)(onp.ones(3))
expected_jaxpr = make_jaxpr(
lambda x: lax.pmax(x, axis_name))(onp.zeros((5, 3)))
assert repr(jaxpr) == repr(expected_jaxpr)
arg = onp.arange(15.).reshape((5, 3))
ans = soft_pmap(pfun, axis_name)(arg)[0]
expected = onp.max(arg, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
def testSelect(self):
p = onp.arange(15).reshape((5, 3)) % 4 == 1
f = onp.zeros((5, 3))
def fun(t):
return lax.select(p, t, f)
t = onp.ones((5, 3))
ans = soft_pmap(*_papply(fun))(t)
expected = fun(t)
self.assertAllClose(ans, expected, check_dtypes=True)
def testLogSoftmax(self):
raise SkipTest("test doesn't pass yet") # TODO(frostig)
def fun(x):
return x - np.log(np.sum(np.exp(x)))
pfun, axis_name = _papply(fun)
jaxpr = make_jaxpr(pfun)(onp.zeros(5))
expected_jaxpr = make_jaxpr(
lambda x: x - np.log(lax.psum(np.exp(x), axis_name)))(onp.zeros(5))
assert repr(jaxpr) == repr(expected_jaxpr)
ans = soft_pmap(pfun, axis_name)(onp.arange(1., 5.))
expected = fun(onp.arange(1., 5.))
self.assertAllClose(ans, expected, check_dtypes=False)
def testAdd(self):
x = onp.array([[1, 2, 3], [4, 5, 6]])
expected = x + x
pfun, axis_name = _papply(np.add)
ans = soft_pmap(pfun, axis_name)(x, x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testAddBroadcasting(self):
raise SkipTest("test doesn't pass yet") # TODO(frostig)
def fun(x):
return x + 3
x = onp.array([[1, 2], [3, 4]])
expected = x + 3
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testMakeJaxprPapplyComposition(self):
raise SkipTest( # TODO(mattjj)
"fails because select's papply rule calls an SPMD primitive")
x = b = onp.ones(3)
pfun, axis_name = _papply(lambda a: np.where(x, a, b))
make_jaxpr(pfun)(onp.ones(3)) # doesn't crash
class ParallelizeTest(jtu.JaxTestCase):
def dedup(self, arr, expected_rank):
if arr.ndim == expected_rank + 1:
for i in range(arr.shape[0] - 1):
self.assertAllClose(arr[i], arr[i + 1], check_dtypes=True)
return arr[0]
else:
assert arr.ndim == expected_rank
return arr
def testNormalize(self):
def f(x):
return x / x.sum(0)
x = onp.arange(4.)
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = make_jaxpr(_parallelize(f))(x)
self.assertIn('psum', repr(jaxpr))
def testAdd(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(x): return x + y
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAdd2(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(y): return x + y
expected = f(y)
ans = _parallelize(f)(y)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAdd3(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(x, y):
return x + y
expected = f(x, y)
ans = _parallelize(f)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@unittest.skip("Missing cases in gather papply rule")
def testOuter(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(x): return x[:, None] * y
expected = f(x)
ans = _parallelize(f)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testOuter2(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(y): return x[:, None] * y
expected = f(y)
ans = _parallelize(f)(y)
self.assertAllClose(ans, expected, check_dtypes=False)
@unittest.skip("Missing cases in gather papply rule")
def testOuter3(self):
x = onp.arange(10)
y = 2 * onp.arange(10)
def f(x, y): return x[:, None] * y
expected = f(x, y)
ans = _parallelize(f)(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "testTranspose_shape={}_perm={}"
.format(shape, perm),
"shape": shape, "perm": perm}
for shape in [
(2, 2),
(3, 3),
(2, 2, 2),
(2, 3, 4),
(2, 3, 2)
]
for perm in itertools.permutations(list(range(len(shape))))
))
def testTranspose(self, shape, perm):
def fun(x):
return lax.transpose(x, perm)
x = onp.arange(prod(shape)).reshape(shape)
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposeAndAddRank2(self):
def fun(x):
return x + x.T
x = onp.reshape(onp.arange(4., dtype=onp.float32), (2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposeAndAddRank3(self):
def fun(x):
return x + x.T
x = onp.reshape(onp.arange(8., dtype=onp.float32), (2, 2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot(self):
raise SkipTest("known failure") # TODO(frostig)
x = onp.reshape(onp.arange(4., dtype=onp.float32), (2, 2))
def fun(x, y):
return lax.dot(x, y)
expected = fun(x, x)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x, x)
ans = self.dedup(ans, expected.ndim)
self.assertAllClose(ans, expected, check_dtypes=False)
# Test lax.dot_general on two rank-3 arguments, generating a test method call
# for every matching of dimensions, and each matched pair of dimensions being
# {batch, contracting, neither}. In combination with that, split the first
# dimension of the LHS, that of the RHS, and that of both.
@parameterized.named_parameters(
{"testcase_name": "_dimMatch={}_matchTypes={}_split={}".format(
matching, coloring, split),
"matching": matching, "coloring": coloring, "split": split}
for matching in itertools.permutations(range(3))
for coloring in itertools.product(range(3), range(3), range(3))
for split in range(3))
def testDotGeneral(self, matching, coloring, split):
BATCH, CONTRACT, _ = range(3)
SPLIT_LHS, SPLIT_RHS, SPLIT_BOTH = range(3)
x = onp.reshape(onp.arange(8.), (2, 2, 2))
y = onp.reshape(onp.arange(8.), (2, 2, 2)) + 4.
cdims = [(i, matching[i]) for i in range(3) if coloring[i] == CONTRACT]
bdims = [(i, matching[i]) for i in range(3) if coloring[i] == BATCH]
dimension_numbers = [
list(zip(*cdims)) or [(), ()],
list(zip(*bdims)) or [(), ()]
]
def f(x, y):
return lax.dot_general(x, y, dimension_numbers)
if split == SPLIT_LHS:
fun = lambda x: f(x, y)
elif split == SPLIT_RHS:
fun = lambda y: f(x, y)
else:
fun = f
try:
if split != SPLIT_BOTH:
expected = fun(x)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x)
else:
expected = fun(x, y)
pfun, axis_name = _papply(fun)
ans = soft_pmap(pfun, axis_name)(x, y)
except (NotImplementedError, TypeError) as e:
raise SkipTest(e)
ans = self.dedup(ans, expected.ndim)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCall(self):
@jit
def fun(x):
return x
x = onp.reshape(onp.arange(8., dtype=onp.float32), (2, 2, 2))
expected = fun(x)
ans = _parallelize(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/parallel_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import unittest
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
from jax import numpy as np
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
float_dtypes = [onp.float32, onp.float64]
# TODO(b/144573940): onp.complex128 isn't supported by XLA, and the JAX
# implementation casts to complex64.
complex_dtypes = [onp.complex64]
inexact_dtypes = float_dtypes + complex_dtypes
int_dtypes = [onp.int32, onp.int64]
bool_dtypes = [onp.bool_]
all_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes
def _get_fftn_test_axes(shape):
axes = [[]]
ndims = len(shape)
# XLA's FFT op only supports up to 3 innermost dimensions.
if ndims <= 3:
axes.append(None)
for naxes in range(1, min(ndims, 3) + 1):
axes.extend(itertools.combinations(range(ndims), naxes))
for index in range(1, ndims + 1):
axes.append((-index,))
return axes
class FftTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}_shape={}_axes={}".format(
inverse, jtu.format_shape_dtype_string(shape, dtype), axes),
"axes": axes, "shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"inverse": inverse}
for inverse in [False, True]
for rng_factory in [jtu.rand_default]
for dtype in all_dtypes
for shape in [(10,), (10, 10), (2, 3, 4), (2, 3, 4, 5)]
for axes in _get_fftn_test_axes(shape)))
def testFftn(self, inverse, shape, dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: (rng(shape, dtype),)
np_op = np.fft.ifftn if inverse else np.fft.fftn
onp_op = onp.fft.ifftn if inverse else onp.fft.fftn
np_fn = lambda a: np_op(a, axes=axes)
onp_fn = lambda a: onp_op(a, axes=axes)
# Numpy promotes to complex128 aggressively.
self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)
# Test gradient for differentiable types.
if dtype in inexact_dtypes:
tol = 0.15 # TODO(skye): can we be more precise?
jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol)
jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}".format(inverse),
"inverse": inverse}
for inverse in [False, True]))
def testFftnErrors(self, inverse):
rng = jtu.rand_default()
name = 'ifftn' if inverse else 'fftn'
func = np.fft.ifftn if inverse else np.fft.fftn
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} only supports 1D, 2D, and 3D FFTs. "
"Got axes None with input rank 4.".format(name),
lambda: func(rng([2, 3, 4, 5], dtype=onp.float64), axes=None))
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} does not support repeated axes. Got axes \\[1, 1\\].".format(name),
lambda: func(rng([2, 3], dtype=onp.float64), axes=[1, 1]))
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[2]))
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[-3]))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}_size={}_axis={}".format(
inverse, jtu.format_shape_dtype_string([size], dtype), axis),
"axis": axis, "size": size, "dtype": dtype, "rng_factory": rng_factory,
"inverse": inverse}
for inverse in [False, True]
for rng_factory in [jtu.rand_default]
for dtype in all_dtypes
for size in [10]
for axis in [-1, 0]))
def testFft(self, inverse, size, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: (rng([size], dtype),)
np_op = np.fft.ifft if inverse else np.fft.fft
onp_op = onp.fft.ifft if inverse else onp.fft.fft
np_fn = lambda a: np_op(a, axis=axis)
onp_fn = lambda a: onp_op(a, axis=axis)
# Numpy promotes to complex128 aggressively.
self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)
# Test gradient for differentiable types.
if dtype in inexact_dtypes:
tol = 0.15 # TODO(skye): can we be more precise?
jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol)
jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}".format(inverse),
"inverse": inverse}
for inverse in [False, True]))
def testFftErrors(self, inverse):
rng = jtu.rand_default()
name = 'ifft' if inverse else 'fft'
func = np.fft.ifft if inverse else np.fft.fft
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} does not support multiple axes. "
"Please use jax.np.fft.{}n. "
"Got axis \\[1, 1\\].".format(name, name),
lambda: func(rng([2, 3], dtype=onp.float64), axis=[1, 1])
)
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} does not support multiple axes. "
"Please use jax.np.fft.{}n. "
"Got axis \\[1, 1\\].".format(name, name),
lambda: func(rng([2, 3], dtype=onp.float64), axis=(1, 1))
)
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axis=[2]))
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axis=[-3]))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}_shape={}_axis={}".format(
inverse, jtu.format_shape_dtype_string(shape, dtype), axes),
"axes": axes, "shape": shape, "dtype": dtype, "rng_factory": rng_factory,
"inverse": inverse}
for inverse in [False, True]
for rng_factory in [jtu.rand_default]
for dtype in all_dtypes
for shape in [(16, 8, 4, 8), (16, 8, 4, 8, 4)]
for axes in [(-2, -1), (0, 1), (1, 3), (-1, 2)]))
def testFft2(self, inverse, shape, dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: (rng(shape, dtype),)
np_op = np.fft.ifft2 if inverse else np.fft.fft2
onp_op = onp.fft.ifft2 if inverse else onp.fft.fft2
np_fn = lambda a: np_op(a, axes=axes)
onp_fn = lambda a: onp_op(a, axes=axes)
# Numpy promotes to complex128 aggressively.
self._CheckAgainstNumpy(onp_fn, np_fn, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)
# Test gradient for differentiable types.
if dtype in inexact_dtypes:
tol = 0.15 # TODO(skye): can we be more precise?
jtu.check_grads(np_fn, args_maker(), order=1, atol=tol, rtol=tol)
jtu.check_grads(np_fn, args_maker(), order=2, atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inverse={}".format(inverse),
"inverse": inverse}
for inverse in [False, True]))
def testFft2Errors(self, inverse):
rng = jtu.rand_default()
name = 'ifft2' if inverse else 'fft2'
func = np.fft.ifft2 if inverse else np.fft.fft2
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} only supports 2 axes. "
"Got axes = \\[0\\].".format(name, name),
lambda: func(rng([2, 3], dtype=onp.float64), axes=[0])
)
self.assertRaisesRegex(
ValueError,
"jax.np.fft.{} only supports 2 axes. "
"Got axes = \\[0, 1, 2\\].".format(name, name),
lambda: func(rng([2, 3, 3], dtype=onp.float64), axes=(0, 1, 2))
)
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[2, 3]))
self.assertRaises(
ValueError, lambda: func(rng([2, 3], dtype=onp.float64), axes=[-3, -4]))
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/fft_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for --debug_nans."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import test_util as jtu
from jax.test_util import check_grads
from jax import numpy as np
from jax import random
from jax.config import config
config.parse_flags_with_absl()
class DebugNaNsTest(jtu.JaxTestCase):
def setUp(self):
self.cfg = config.read("jax_debug_nans")
config.update("jax_debug_nans", True)
def tearDown(self):
config.update("jax_debug_nans", self.cfg)
def testSingleResultPrimitiveNoNaN(self):
A = np.array([[1., 2.], [2., 3.]])
B = np.tanh(A)
def testMultipleResultPrimitiveNoNaN(self):
A = np.array([[1., 2.], [2., 3.]])
D, V = np.linalg.eig(A)
def testJitComputationNoNaN(self):
A = np.array([[1., 2.], [2., 3.]])
B = jax.jit(np.tanh)(A)
def testSingleResultPrimitiveNaN(self):
A = np.array(0.)
with self.assertRaises(FloatingPointError):
B = 0. / A
|
jax-master
|
tests/debug_nans_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from functools import partial
import itertools
import re
from unittest import SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import numpy.random as npr
from jax import api
from jax import core
from jax import lax
from jax import random
from jax import test_util as jtu
from jax.util import unzip2
from jax.lib import xla_bridge
import jax.numpy as np # scan tests use numpy
import jax.scipy as jsp
from jax.config import config
config.parse_flags_with_absl()
def scan_reference(f, init, xs):
carry = init
ys = []
for x in xs:
(carry, y) = f(carry, x)
ys.append(lax.reshape(y, (1,) + onp.shape(y)))
ys = lax.concatenate(ys, 0)
return carry, ys
def high_precision_dot(a, b):
return lax.dot(a, b, precision=lax.Precision.HIGHEST)
class LaxControlFlowTest(jtu.JaxTestCase):
def testWhileWithTuple(self):
limit = 10
def loop_cond(state):
pos, _ = state
return lax.lt(pos, limit)
def loop_body(state):
pos, count = state
return (lax.add(pos, 1), lax.add(count, 1))
def loop(init):
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
self.assertEqual(loop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(2), limit - 2)
self.assertEqual(cloop(3), limit - 3)
def testWhileWithManyArgs(self):
nargs = 256
def loop_cond(state):
return lax.lt(state[0], 2)
def loop_body(state):
return tuple(lax.add(s, 1) for s in state)
_ = lax.while_loop(loop_cond, loop_body, (0,) * nargs)
def testNestedWhile(self):
def outer_loop(num): # pylint: disable=missing-docstring
def cond_fun(state):
num, i, _ = state
return lax.lt(i, num)
def body_fun(state):
num, i, count = state
return (num, lax.add(i, 1), inner_loop(i, count))
init_val = (num, 0, 0)
_, i, count = lax.while_loop(cond_fun, body_fun, init_val)
return (i, count)
def inner_loop(i, count): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, count = state
return (i, lax.add(j, 1), lax.add(count, 1))
init_val = (i, 0, count)
_, _, count = lax.while_loop(cond_fun, body_fun, init_val)
return count
cloop = api.jit(outer_loop)
self.assertEqual(outer_loop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(3), (3, 6))
self.assertEqual(cloop(2), (2, 3))
self.assertEqual(cloop(4), (4, 10))
def testWhileWithClosure(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
return (lax.add(pos, 1), lax.add(count, inc))
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileWithClosureJit(self):
def loop(init, local_limit, inc):
def loop_cond(state):
pos, _ = state
return lax.lt(pos, local_limit)
def loop_body(state):
effect[0] = True
pos, count = state
f = lambda pos, inc: (lax.add(pos, 1), lax.add(count, inc))
return api.jit(f)(pos, inc)
result = lax.while_loop(loop_cond, loop_body, (init, 0))
_, count = result
return count
cloop = api.jit(loop)
limit = 10
effect = [False]
self.assertEqual(loop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
assert effect[0]
effect[0] = False
self.assertEqual(cloop(2, limit, 1), limit - 2)
self.assertEqual(cloop(3, limit, 1), limit - 3)
assert not effect[0]
def testWhileTypeErrors(self):
"""Test typing error messages for while."""
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got pytree PyTreeDef(tuple, [*,*]).")):
lax.while_loop(lambda c: (1., 1.), lambda c: c, 0.)
with self.assertRaisesRegex(TypeError,
re.escape("cond_fun must return a boolean scalar, but got output type(s) [ShapedArray(float32[])].")):
lax.while_loop(lambda c: np.float32(1.), lambda c: c, np.float32(0.))
with self.assertRaisesRegex(TypeError,
re.escape("body_fun output and input must have same type structure, got PyTreeDef(tuple, [*,*]) and *.")):
lax.while_loop(lambda c: True, lambda c: (1., 1.), 0.)
with self.assertRaisesWithLiteralMatch(
TypeError,
"body_fun output and input must have identical types, got\n"
"ShapedArray(bool[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.while_loop(lambda c: True, lambda c: True, np.float32(0.))
def testNestedWhileWithDynamicUpdateSlice(self):
num = 5
def update_entry(arr, val, i, j):
val = lax.reshape(val, [1, 1])
return lax.dynamic_update_slice(arr, val, (i, j))
def outer_loop(arr): # pylint: disable=missing-docstring
def cond_fun(state):
i, num, _, _ = state
return lax.lt(i, num)
def body_fun(state):
i, num, arr, out = state
return (lax.add(i, 1), num, arr, inner_loop(i, arr, out))
out = onp.zeros(arr.shape, dtype=arr.dtype)
init_val = (0, num, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
def inner_loop(i, arr, out): # pylint: disable=missing-docstring
def cond_fun(state):
i, j, _, _ = state
return lax.le(j, i)
def body_fun(state):
i, j, arr, out = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
arr_i_j = lax.dynamic_index_in_dim(arr_i, j, 0, False)
out = update_entry(out, arr_i_j, i, j)
return (i, lax.add(j, 1), arr, out)
init_val = (i, 0, arr, out)
_, _, _, out = lax.while_loop(cond_fun, body_fun, init_val)
return out
cloop = api.jit(outer_loop)
arr = npr.RandomState(0).randn(5, 5)
self.assertAllClose(outer_loop(arr), onp.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)
self.assertAllClose(cloop(arr), onp.tril(arr), check_dtypes=False)
def testLoopWithConjunctionCondition(self):
def sum_first_n(arr, num): # pylint: disable=missing-docstring
def cond_fun(state):
arr, num, i, _ = state
return lax.bitwise_and(lax.lt(i, num), lax.lt(i, arr.shape[0]))
def body_fun(state):
arr, num, i, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, num, lax.add(i, 1), lax.add(total, arr_i))
init_val = (arr, num, 0, 0.)
_, _, _, total = lax.while_loop(cond_fun, body_fun, init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(np.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
def testWhileLoopBatched(self):
def fun(x):
return lax.while_loop(lambda x: x < 3, lambda x: x + 2, x)
ans = api.vmap(fun)(onp.array([0, 1, 2, 3]))
expected = onp.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
fun = api.jit(fun)
ans = api.vmap(fun)(onp.array([0, 1, 2, 3]))
expected = onp.array([4, 3, 4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopCondConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < y, lambda x: x + 2, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, onp.array([2, 3]))
expected = onp.array([2, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopBodyConstsBatched(self):
def fun(x, y):
return lax.while_loop(lambda x: x < 3, lambda x: x + y, x)
ans = api.vmap(fun, in_axes=(None, 0))(0, np.array([2, 3]))
expected = onp.array([4, 3])
self.assertAllClose(ans, expected, check_dtypes=False)
def testWhileLoopTupleBatched(self):
def cond_fun(loop_carry):
x, y = loop_carry
return x + y < 5
def body_fun(loop_carry):
x, y = loop_carry
x = x + 1
return x, y
def fun(x, y):
return lax.while_loop(cond_fun, body_fun, (x, y))
ans = api.vmap(fun)(onp.array([0, 0]), onp.array([1, 2]))
expected = (onp.array([4, 3]), onp.array([1, 2]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopErrors(self):
"""Test typing error messages for while."""
with self.assertRaisesRegex(
TypeError, "arguments to fori_loop must have equal types"):
lax.fori_loop(onp.int16(0), np.int32(10), (lambda i, c: c), np.float32(7))
def testForiLoopBatched(self):
def body_fun(i, loop_carry):
x, y = loop_carry
x = x + 1
y = y + 2
return x, y
def fun(x):
return lax.fori_loop(0, 10, body_fun, (x, 0))
ans = api.vmap(fun)(onp.array([0, 1]))
expected = (onp.array([10, 11]), onp.array([20, 20]))
self.assertAllClose(ans, expected, check_dtypes=False)
def testForiLoopBatchedIssue1190(self):
f = lambda x: lax.fori_loop(0, 4, lambda _, x: x + 1, x)
jaxpr = api.make_jaxpr(api.vmap(f))(np.arange(3))
eqn = jaxpr.jaxpr.eqns[0]
self.assertIs(eqn.primitive, lax.while_p)
self.assertEqual(eqn.params['cond_jaxpr'].in_avals[0].shape, ())
def testForiLoopBasic(self):
def body_fun(i, tot):
return lax.add(tot, i)
def count(num):
return lax.fori_loop(0, num, body_fun, 0)
self.assertEqual(count(2), 1)
self.assertEqual(count(3), 3)
self.assertEqual(count(4), 6)
for args_maker in [lambda: [2], lambda: [3], lambda: [4]]:
self._CompileAndCheck(count, args_maker, True)
def testForiLoopClosure(self):
def count(num):
def body_fun(i, tot):
return lax.add(num, lax.add(tot, i))
return lax.fori_loop(0, num, body_fun, 0)
cfun = api.jit(count)
self.assertEqual(count(2), 1 + 2**2)
self.assertEqual(count(2), cfun(2))
self.assertEqual(count(3), 3 + 3**2)
self.assertEqual(count(3), cfun(3))
self.assertEqual(count(4), 6 + 4**2)
self.assertEqual(count(4), cfun(4))
def testForiLoopTupleState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i))
init_val = (arr, 0.)
_, total = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun,
init_val)
return total
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(np.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
def testForiLoopDictState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total = state['arr'], state['total']
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return {'arr': arr, 'total': lax.add(total, arr_i)}
init_val = {'arr': arr, 'total': 0.}
out_val = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return out_val['total']
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(np.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
def testForiLoopEmptyTupleInState(self):
def sum_first_n(arr, num):
def body_fun(i, state):
arr, total, _ = state
arr_i = lax.dynamic_index_in_dim(arr, i, 0, False)
return (arr, lax.add(total, arr_i), ())
init_val = (arr, 0., ())
_, tot, _ = lax.fori_loop(0, lax.min(arr.shape[0], num), body_fun, init_val)
return tot
cfun = api.jit(sum_first_n)
x = npr.RandomState(0).randn(10).astype(np.float_)
for num in [0, 5, 10, 15]:
self.assertAllClose(sum_first_n(x, num), onp.sum(x[:num]),
check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
self.assertAllClose(cfun(x, num), onp.sum(x[:num]), check_dtypes=False)
def testCond(self):
def fun(x):
if x < 3:
return (x, x)
else:
y = lax.mul(2, x)
return y, lax.mul(2, y)
@api.jit
def cfun(x):
def false_fun(x):
y = lax.mul(2, x)
return y, lax.mul(2, y)
return lax.cond(lax.lt(x, 3), x, lambda x: (x, x), x, false_fun)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(fun(0), (0, 0))
self.assertEqual(fun(1), cfun(1))
self.assertEqual(fun(1), (1, 1))
self.assertEqual(fun(2), cfun(2))
self.assertEqual(fun(2), (2, 2))
self.assertEqual(fun(3), cfun(3))
self.assertEqual(fun(3), (6, 12))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(fun(4), (8, 16))
def testIssue1379(self):
def fun(pred):
return lax.cond(pred, pred, lambda x: (True, x), pred, lambda x: (False, x))
@api.jit
def cfun(pred):
return fun(pred)
self.assertEqual(fun(0), cfun(0), (False,0))
self.assertEqual(fun(0.), cfun(0.), (False,0.))
self.assertEqual(fun(1), cfun(1), (True,1))
self.assertEqual(fun(1.), cfun(1.), (True,1.))
# test that proper errors are raised for wrong types
for pred in ["abc", [], [1,2]]:
for f in [fun, cfun]:
self.assertRaises(TypeError, f, pred)
def testNestedCond(self):
def fun(x):
if x < 2:
return lax.mul(2, x)
else:
if x < 5:
return lax.mul(3, x)
else:
return lax.mul(4, x)
@api.jit
def cfun(x):
return lax.cond(
lax.lt(x, 2),
x, lambda x: lax.mul(2, x),
x, lambda x: lax.cond(lax.lt(x, 5),
x, lambda x: lax.mul(3, x),
4, lambda y: lax.mul(y, x)))
self.assertEqual(cfun(1), 2)
self.assertEqual(cfun(3), 9)
self.assertEqual(cfun(6), 24)
self.assertEqual(cfun(1), fun(1))
self.assertEqual(cfun(3), fun(3))
self.assertEqual(cfun(6), fun(6))
def testCondTypeErrors(self):
"""Test typing error messages for cond."""
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got <function")):
lax.cond(lambda x: True,
1., lambda top: 1., 2., lambda fop: 2.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred type must be either boolean or number, got foo.")):
lax.cond("foo",
1., lambda top: 1., 2., lambda fop: 2.)
with self.assertRaisesRegex(TypeError,
re.escape("Pred must be a scalar, got (1.0, 1.0) of shape (2,).")):
lax.cond((1., 1.),
1., lambda top: 1., 2., lambda fop: 2.)
with self.assertRaisesRegex(TypeError,
re.escape("true_fun and false_fun output must have same type structure, got * and PyTreeDef(tuple, [*,*]).")):
lax.cond(True,
1., lambda top: 1., 2., lambda fop: (2., 2.))
with self.assertRaisesWithLiteralMatch(
TypeError,
"true_fun and false_fun output must have identical types, got\n"
"ShapedArray(float32[1])\n"
"and\n"
"ShapedArray(float32[])."):
lax.cond(True,
1., lambda top: np.array([1.], np.float32),
2., lambda fop: np.float32(1.))
def testCondOneBranchConstant(self):
def fun(x):
if x < 3:
return 5.
else:
return x
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3), x, lambda x: 5, x, lambda x: x)
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), 5)
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), 4)
def testCondOneBranchConstantTuple(self):
def fun(x):
if x < 3:
return (1., 2., 3.)
else:
return (x, 2., 4.)
@api.jit
def cfun(x):
return lax.cond(lax.lt(x, 3),
x, lambda x: (1, 2., 3.),
x, lambda x: (x, 2., 4.))
self.assertEqual(fun(0), cfun(0))
self.assertEqual(cfun(0), (1, 2., 3.))
self.assertEqual(fun(4), cfun(4))
self.assertEqual(cfun(4), (4, 2., 4.))
def testCondBatched(self):
def fun(x, y, z):
pred = lax.lt(x, 3)
true_fun = lambda y: y
false_fun = lambda z: lax.neg(z)
return lax.cond(pred, y, true_fun, z, false_fun)
# these cases stay as cond
x = np.array(2)
y = np.array([1, 2])
z = np.array([3, 4])
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = onp.array([1, 2])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
x = np.array(4)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, 0)))(x, y, z)
expected = onp.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
fun = api.jit(fun)
ans = api.vmap(fun, (None, 0, 0))(x, y, z)
expected = onp.array([-3, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
z = np.array(5)
ans = api.vmap(fun, (None, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (None, 0, None)))(x, y, z)
expected = onp.array([-5, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" not in str(jaxpr)
# these cases become select
x = np.array([2, 4])
ans = api.vmap(fun, (0, 0, None))(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun, (0, 0, None)))(x, y, z)
expected = onp.array([1, -5])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
z = np.array([3, 4])
ans = api.vmap(fun)(x, y, z)
jaxpr = api.make_jaxpr(api.vmap(fun))(x, y, z)
expected = onp.array([1, -4])
self.assertAllClose(ans, expected, check_dtypes=False)
assert "select" in str(jaxpr)
def testIssue1263(self):
def f(rng, x):
cond = random.bernoulli(rng)
return lax.cond(cond, x, lambda x: x, np.abs(x) - 1., lambda x: x)
def body_fn(i, state):
rng, x = state
key, subkey = random.split(rng)
return key, f(subkey, x)
def g(rng, x):
return lax.fori_loop(0, 10, body_fn, (rng, x))
api.vmap(g)(random.split(random.PRNGKey(0), 3), np.ones((3, 4)))
def testIssue514(self):
# just check this doesn't crash
lax.cond(True,
(0, 0), lambda x: (x[0], 0),
(1, 1), lambda x: x)
def testIssue649(self):
from jax import lax
def body(x):
a, b = x
return (7, b + 1)
def cond(x):
a, b = x
return b < 10
out = lax.while_loop(cond, body, (33, 4))
self.assertEqual(out, (7, 10))
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f),
"jit_scan": jit_scan, "jit_f": jit_f}
for jit_scan in [False, True]
for jit_f in [False, True])
def testScanImpl(self, jit_scan, jit_f):
rng = onp.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = np.cos(np.sum(np.sin(a)) + np.sum(np.cos(c)) + np.sum(np.tan(d)))
c = np.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(lax.scan, (0,))
else:
scan = lax.scan
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = scan(f, c, as_)
expected = scan_reference(f, c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f),
"jit_scan": jit_scan, "jit_f": jit_f}
for jit_scan in [False, True]
for jit_f in [False, True])
def testScanJVP(self, jit_scan, jit_f):
rng = onp.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = np.cos(np.sum(np.sin(a)) + np.sum(np.cos(c)) + np.sum(np.tan(d)))
c = np.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(lax.scan, (0,))
else:
scan = lax.scan
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.jvp(lambda c, as_: scan(f, c, as_), (c, as_), (c, as_))
expected = api.jvp(lambda c, as_: scan_reference(f, c, as_), (c, as_), (c, as_))
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={onp.float64: 1e-14})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["fwd"])
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f),
"jit_scan": jit_scan, "jit_f": jit_f}
for jit_scan in [False, True]
for jit_f in [False, True])
def testScanLinearize(self, jit_scan, jit_f):
rng = onp.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = np.cos(np.sum(np.sin(a)) + np.sum(np.cos(c)) + np.sum(np.tan(d)))
c = np.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(lax.scan, (0,))
else:
scan = lax.scan
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.linearize(lambda c, as_: scan(f, c, as_), c, as_)[1](c, as_)
expected = api.linearize(lambda c, as_: scan_reference(f, c, as_), c, as_)[1](c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={onp.float64: 1e-14})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}".format(jit_scan, jit_f),
"jit_scan": jit_scan, "jit_f": jit_f}
for jit_scan in [False, True]
for jit_f in [False, True])
def testScanGrad(self, jit_scan, jit_f):
rng = onp.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = np.sum(np.sin(a)) + np.sum(np.sin(c)) + np.sum(np.sin(d))
c = np.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(lax.scan, static_argnums=(0,))
else:
scan = lax.scan
as_ = rng.randn(5, 3)
c = rng.randn(4)
ans = api.grad(lambda c, as_: list( scan(f, c, as_))[0].sum())(c, as_)
expected = api.grad(lambda c, as_: list(scan_reference(f, c, as_))[0].sum())(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False,
rtol={onp.float32: 2e-5, onp.float64: 1e-13})
jtu.check_grads(partial(scan, f), (c, as_), order=2, modes=["rev"],
atol=1e-3, rtol=2e-3)
def testScanRnn(self):
r = npr.RandomState(0)
n_in = 4
n_hid = 2
n_out = 1
length = 3
W_trans = r.randn(n_hid, n_hid + n_in).astype(np.float_)
W_out = r.randn(n_out, n_hid + n_in).astype(np.float_)
params = W_trans, W_out
inputs = r.randn(length, n_in).astype(np.float_)
targets = r.randn(length, n_out).astype(np.float_)
def step(params, state, input):
W_trans, W_out = params
stacked = np.concatenate([state, input])
output = np.tanh(np.dot(W_out, stacked))
next_state = np.tanh(np.dot(W_trans, stacked))
return next_state, output
def rnn(params, inputs):
init_state = np.zeros(n_hid)
_, outputs = lax.scan(partial(step, params), init_state, inputs)
return outputs
def loss(params, inputs, targets):
predictions = rnn(params, inputs)
return np.sum((predictions - targets)**2)
# evaluation doesn't crash
loss(params, inputs, targets)
# jvp evaluation doesn't crash
api.jvp(lambda params: loss(params, inputs, targets), (params,), (params,))
# jvp numerical check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, modes=["fwd"],
rtol={onp.float32: 2e-2, onp.float64: 1e-6})
# linearize works
_, expected = api.jvp(loss, (params, inputs, targets),
(params, inputs, targets))
_, linfun = api.linearize(loss, params, inputs, targets)
ans = linfun(params, inputs, targets)
self.assertAllClose(ans, expected, check_dtypes=False)
# gradient evaluation doesn't crash
api.grad(loss)(params, inputs, targets)
# gradient check passes
jtu.check_grads(loss, (params, inputs, targets), order=2, rtol=2e-2)
# we can vmap to batch things
batch_size = 7
batched_inputs = r.randn(batch_size, length, n_in).astype(np.float_)
batched_targets = r.randn(batch_size, length, n_out).astype(np.float_)
batched_loss = api.vmap(lambda x, y: loss(params, x, y))
losses = batched_loss(batched_inputs, batched_targets)
expected = onp.stack(list(map(lambda x, y: loss(params, x, y),
batched_inputs, batched_targets)))
self.assertAllClose(losses, expected, check_dtypes=False, rtol=1e-2)
def testIssue711(self):
# Tests reverse-mode differentiation through a scan for which the scanned
# function also involves reverse-mode differentiation.
# See https://github.com/google/jax/issues/711
def harmonic_bond(conf, params):
return np.sum(conf * params)
def minimize_structure(test_params):
energy_fn = partial(harmonic_bond, params=test_params)
grad_fn = api.grad(energy_fn)
def apply_carry(carry, _):
i, x = carry
new_x = x - 0.1 * api.grad(energy_fn)(x)
new_carry = (i+1, new_x)
return new_carry, _
x0 = np.array([1., 2., 3.])
carry_final, _ = lax.scan(apply_carry, (0, x0), np.zeros((75, 0)))
_, x_final = carry_final
return x_final
initial_params = 0.5
minimize_structure(initial_params) # doesn't crash
def loss(test_params):
x_final = minimize_structure(test_params)
return np.sum(np.sin(1.0 - x_final))
api.grad(loss)(0.25) # doesn't crash
def testIssue744(self):
Point = collections.namedtuple('Point', ['x', 'y'])
p0 = Point(x=np.array(1), y=np.array(2))
def plus_one(p, iter_idx):
return Point(p.x+1, p.y+1), iter_idx
self.assertRaisesRegex(
ValueError,
'scan got value with no leading axis to scan over.*',
lambda: lax.scan(plus_one, p0, list(range(5))))
def testScanTypeErrors(self):
"""Test typing error messages for scan."""
a = np.arange(5)
# Body output not a tuple
with self.assertRaisesRegex(TypeError,
re.escape("scan body output must be a pair, got ShapedArray(float32[]).")):
lax.scan(lambda c, x: np.float32(0.), 0, a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, "
"got PyTreeDef(tuple, [*,*,*]) and PyTreeDef(tuple, [*,PyTreeDef(tuple, [*,*])])")):
lax.scan(lambda c, x: ((0, 0, 0), x), (1, (2, 3)), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, got * and PyTreeDef(None, []).")):
lax.scan(lambda c, x: (0, x), None, a)
with self.assertRaisesWithLiteralMatch(
TypeError,
"scan carry output and input must have identical types, got\n"
"ShapedArray(int32[])\n"
"and\n"
"ShapedArray(float32[])."):
lax.scan(lambda c, x: (np.int32(0), x), np.float32(1.0), a)
with self.assertRaisesRegex(TypeError,
re.escape("scan carry output and input must have same type structure, got * and PyTreeDef(tuple, [*,*]).")):
lax.scan(lambda c, x: (0, x), (1, 2), np.arange(5))
def testScanHigherOrderDifferentiation(self):
d = 0.75
def f(c, a):
b = np.sin(c * np.sum(np.cos(d * a)))
c = 0.9 * np.cos(d * np.sum(np.sin(c * a)))
return c, b
as_ = np.arange(6.).reshape((3, 2))
c = 1.
jtu.check_grads(lambda c, as_: lax.scan(f, c, as_), (c, as_),
modes=["rev"], order=2, rtol={onp.float32: 6e-3})
@parameterized.named_parameters(
{"testcase_name": "_jit_scan={}_jit_f={}_in_axes={}".format(
jit_scan, jit_f, in_axes),
"jit_scan": jit_scan, "jit_f": jit_f, "in_axes": in_axes}
for jit_scan in [False, True]
for jit_f in [False, True]
for in_axes in itertools.product([None, 0, 1], [None, 0, 1, 2])
if in_axes != (None, None))
def testScanVmap(self, jit_scan, jit_f, in_axes):
rng = onp.random.RandomState(0)
d = rng.randn(2)
def f(c, a):
assert a.shape == (3,)
assert c.shape == (4,)
b = np.cos(np.sum(np.sin(a)) + np.sum(np.cos(c)) + np.sum(np.tan(d)))
c = np.sin(c * b)
assert b.shape == ()
return c, b
if jit_f:
f = api.jit(f)
if jit_scan:
scan = api.jit(lax.scan, (0,))
else:
scan = lax.scan
as_shape = [5, 3]
c_shape = [4]
c_bdim, as_bdim = in_axes
if c_bdim is not None:
c_shape.insert(c_bdim, 7)
if as_bdim is not None:
as_shape.insert(as_bdim, 7)
as_ = rng.randn(*as_shape)
c = rng.randn(*c_shape)
ans = api.vmap(lambda c, as_: scan(f, c, as_), in_axes)(c, as_)
expected = api.vmap(lambda c, as_: scan_reference(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapTuples(self):
def f(c, a):
a1, a2 = a
c1, c2 = c
b = np.sum(np.cos(a1)) * np.sum(np.tan(c2 * a2))
c = c1 * np.sin(np.sum(a1 * a2)), c2 * np.cos(np.sum(a1))
return c, b
in_axes = (0, (1, 2))
r = onp.random.RandomState(0)
as_ = (r.randn(3, 7), r.randn(3, 4, 7))
c = (r.randn(7, 2), r.randn(7))
expected_c_out, expected_bs = [], []
for i in range(7):
c_out, bs = lax.scan(f, (c[0][i], c[1][i]), (as_[0][:,i], as_[1][:,:,i]))
expected_c_out.append(c_out)
expected_bs.append(bs)
expected_c_out_0, expected_c_out_1 = unzip2(expected_c_out)
expected_c_out = (np.stack(expected_c_out_0), np.stack(expected_c_out_1))
expected_bs = np.stack(expected_bs)
expected = expected_c_out, expected_bs
ans = api.vmap(lambda c, as_: lax.scan(f, c, as_), in_axes)(c, as_)
self.assertAllClose(ans, expected, check_dtypes=False)
def testScanVmapFixpoint(self):
def f(carry_init):
def scan_body(c, x):
# The carry is a 4-tuple, the last element starts batched,
# and the carry is shifted left at each iteration.
return ((c[1], c[2], c[3], 0.), None)
return lax.scan(scan_body, (0., 1., 2., carry_init), np.zeros(2))
carry_init = np.array([3., 4., 5.])
carry_out, _ = api.vmap(f)(carry_init)
self.assertAllClose(carry_out[3], np.array([0., 0., 0.]), check_dtypes=False)
self.assertAllClose(carry_out[2], np.array([0., 0., 0.]), check_dtypes = False)
# After two shifts, we get the carry_init
self.assertAllClose(carry_out[1], carry_init, check_dtypes=False)
self.assertAllClose(carry_out[0], np.array([2., 2., 2.]), check_dtypes = False)
# TODO(mattjj, dougalm): fix this test when skip_checks is False
def testIssue757(self):
# code from https://github.com/google/jax/issues/757
def fn(a):
return np.cos(a)
def loop(val):
iterations = 10
def apply_carry(x, i):
return api.grad(fn, argnums=(0,))(x)[0], i
final_val, _ = lax.scan(
apply_carry,
val,
np.arange(iterations)
)
return final_val
arg = 0.5
api.jit(api.jacfwd(loop, argnums=(0,)))(arg) # doesn't crash
# TODO(mattjj): add a test for "the David Sussillo bug"
def testIssue804(self):
num_devices = xla_bridge.device_count()
f = partial(lax.scan, lambda c, x: (c + lax.psum(x, "i") , c), 0.)
api.pmap(f, axis_name="i")(np.ones((num_devices, 4))) # doesn't crash
def testMap(self):
f = lambda x: x ** 2
xs = np.arange(10)
expected = xs ** 2
actual = lax.map(f, xs)
self.assertAllClose(actual, expected, check_dtypes=True)
def testCaching(self):
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testCaching2(self):
# This second caching test shows a different kind of caching that we haven't
# implemented (but could!), namely that Python functions that are distinct
# objects but are equivalent functions trigger cache hits. This kind of
# caching could be salient when using lambda functions with control flow:
#
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
# lax.while_loop(lambda x: x < 5, lambda x: x + 2, 0)
#
# To get a cache hit on the second line we'd need to form a jaxpr and
# compare them for equality (including the literals on identity). We could
# implement that by adding a __hash__/__eq__ to core.Jaxpr and
# core.TypedJaxpr (see #1221).
raise SkipTest("not implemented")
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = True
lax.while_loop(cond, body, 0)
def cond(x):
assert python_should_be_executing
return x < 5
def body(x):
assert python_should_be_executing
return x + 2
python_should_be_executing = False
lax.while_loop(cond, body, 0)
def testWhileCondConstant(self):
out = lax.while_loop(lambda _: False, lambda _: (), ()) # doesn't crash
self.assertEqual(out, ())
def testIssue1316(self):
def f(carry, _):
c, key = carry
key, _ = random.split(key)
return (c, key), ()
key = random.PRNGKey(0)
api.grad(lambda c: lax.scan(f, (c, key), onp.ones(3))[0][0])(0.) # doesn't crash
def testIssue1361(self):
@api.jit
def jit_run_scan(x):
def fun(carry, _):
x, _ = carry
return (2 * x, 0.), None
(x, _), _ = lax.scan(fun, (x, 0.), np.arange(3))
return x
api.grad(lambda x: jit_run_scan(x))(0.) # doesn't crash
def test_custom_root_scalar(self):
# TODO(shoyer): Figure out why this fails and re-enable it, if possible. My
# best guess is that TPUs use less stable numerics for pow().
if jtu.device_under_test() == "tpu":
raise SkipTest("Test fails on TPU")
def scalar_solve(f, y):
return y / f(1.0)
def binary_search(func, x0, low=0.0, high=100.0, tolerance=1e-6):
del x0 # unused
def cond(state):
low, high = state
return high - low > tolerance
def body(state):
low, high = state
midpoint = 0.5 * (low + high)
update_upper = func(midpoint) > 0
low = np.where(update_upper, low, midpoint)
high = np.where(update_upper, midpoint, high)
return (low, high)
solution, _ = lax.while_loop(cond, body, (low, high))
return solution
def sqrt_cubed(x, tangent_solve=scalar_solve):
f = lambda y: y ** 2. - np.array(x) ** 3.
return lax.custom_root(f, 0.0, binary_search, tangent_solve)
value, grad = api.value_and_grad(sqrt_cubed)(5.0)
self.assertAllClose(value, 5 ** 1.5, check_dtypes=False, rtol=1e-6)
self.assertAllClose(grad, api.grad(pow)(5.0, 1.5), check_dtypes=False,
rtol=1e-7)
jtu.check_grads(sqrt_cubed, (5.0,), order=2, rtol=1e-3)
# TODO(shoyer): reenable when batching works
# inputs = np.array([4.0, 5.0])
# results = api.vmap(sqrt_cubed)(inputs)
# self.assertAllClose(results, inputs ** 1.5, check_dtypes=False)
results = api.jit(sqrt_cubed)(5.0)
self.assertAllClose(results, 5.0 ** 1.5, check_dtypes=False,
rtol={onp.float64:1e-7})
def test_custom_root_vector_with_solve_closure(self):
def vector_solve(f, y):
return np.linalg.solve(api.jacobian(f)(y), y)
def linear_solve(a, b):
f = lambda y: high_precision_dot(a, y) - b
x0 = np.zeros_like(b)
solution = np.linalg.solve(a, b)
oracle = lambda func, x0: solution
return lax.custom_root(f, x0, oracle, vector_solve)
rng = onp.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(linear_solve, (a, b), order=2,
atol={onp.float32: 1e-2, onp.float64: 1e-11})
actual = api.jit(linear_solve)(a, b)
expected = np.linalg.solve(a, b)
self.assertAllClose(expected, actual, check_dtypes=True)
def test_custom_root_with_custom_linear_solve(self):
# TODO(shoyer): Figure out why this fails and re-enable it.
if jtu.device_under_test() == "tpu":
raise SkipTest("Test fails on TPU")
def linear_solve(a, b):
f = lambda x: np.dot(a, x) - b
factors = jsp.linalg.cho_factor(a)
cho_solve = lambda f, b: jsp.linalg.cho_solve(factors, b)
def pos_def_solve(g, b):
return lax.custom_linear_solve(g, b, cho_solve, symmetric=True)
return lax.custom_root(f, b, cho_solve, pos_def_solve)
rng = onp.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
actual = linear_solve(np.dot(a, a.T), b)
expected = np.linalg.solve(np.dot(a, a.T), b)
self.assertAllClose(expected, actual, check_dtypes=True)
actual = api.jit(linear_solve)(np.dot(a, a.T), b)
expected = np.linalg.solve(np.dot(a, a.T), b)
self.assertAllClose(expected, actual, check_dtypes=True)
jtu.check_grads(lambda x, y: linear_solve(np.dot(x, x.T), y),
(a, b), order=2)
def test_custom_root_errors(self):
with self.assertRaisesRegex(TypeError, re.escape("f() output pytree")):
lax.custom_root(lambda x: (x, x), 0.0, lambda f, x: x, lambda f, x: x)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_root(lambda x: x, 0.0, lambda f, x: (x, x), lambda f, x: x)
def dummy_root_usage(x):
f = lambda y: x - y
return lax.custom_root(f, 0.0, lambda f, x: x, lambda f, x: (x, x))
with self.assertRaisesRegex(
TypeError, re.escape("tangent_solve() output pytree")):
api.jvp(dummy_root_usage, (0.0,), (0.0,))
@parameterized.named_parameters(
{"testcase_name": "nonsymmetric", "symmetric": False},
{"testcase_name": "symmetric", "symmetric": True},
)
def test_custom_linear_solve(self, symmetric):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(np.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(
matvec, b, explicit_jacobian_solve, explicit_jacobian_solve,
symmetric=symmetric)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = onp.random.RandomState(0)
a = rng.randn(3, 3)
if symmetric:
a = a + a.T
b = rng.randn(3)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
expected = np.linalg.solve(a, b)
actual = api.jit(linear_solve)(a, b)
self.assertAllClose(expected, actual, check_dtypes=True)
# TODO(shoyer): reenable when batching works
# c = rng.randn(3, 2)
# expected = np.linalg.solve(a, c)
# actual = api.vmap(linear_solve, (None, 1), 1)(a, c)
# self.assertAllClose(expected, actual, check_dtypes=True)
def test_custom_linear_solve_zeros(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(np.linalg.solve(api.jacobian(matvec)(b), b))
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, explicit_jacobian_solve,
explicit_jacobian_solve)
def linear_solve(a, b):
return matrix_free_solve(partial(high_precision_dot, a), b)
rng = onp.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
jtu.check_grads(lambda x: linear_solve(x, b), (a,), order=2,
rtol={onp.float32: 5e-3})
jtu.check_grads(lambda x: linear_solve(a, x), (b,), order=2,
rtol={onp.float32: 5e-3})
def test_custom_linear_solve_iterative(self):
def richardson_iteration(matvec, b, omega=0.1, tolerance=1e-6):
# Equivalent to vanilla gradient descent:
# https://en.wikipedia.org/wiki/Modified_Richardson_iteration
def cond(x):
return np.linalg.norm(matvec(x) - b) > tolerance
def body(x):
return x + omega * (b - matvec(x))
return lax.while_loop(cond, body, b)
def matrix_free_solve(matvec, b):
return lax.custom_linear_solve(matvec, b, richardson_iteration,
richardson_iteration)
def build_and_solve(a, b):
# intentionally non-linear in a and b
matvec = partial(high_precision_dot, np.exp(a))
return matrix_free_solve(matvec, np.cos(b))
rng = onp.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = np.linalg.solve(np.exp(a), np.cos(b))
actual = build_and_solve(a, b)
self.assertAllClose(expected, actual, atol=1e-5, check_dtypes=True)
jtu.check_grads(build_and_solve, (a, b), atol=1e-5, order=2, rtol=2e-3)
# TODO(shoyer): reenable when batching works
# a2 = rng.randn(1, 2, 2)
# b2 = rng.randn(1, 2, 2)
# jtu.check_grads(api.vmap(build_and_solve), (a2, b2), atol=1e-5, order=2)
def test_custom_linear_solve_cholesky(self):
def positive_definite_solve(a, b):
factors = jsp.linalg.cho_factor(a)
def solve(matvec, x):
return jsp.linalg.cho_solve(factors, x)
matvec = partial(high_precision_dot, a)
return lax.custom_linear_solve(matvec, b, solve, symmetric=True)
rng = onp.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
expected = np.linalg.solve(high_precision_dot(a, a.T), b)
actual = positive_definite_solve(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual, check_dtypes=True)
actual = api.jit(positive_definite_solve)(high_precision_dot(a, a.T), b)
self.assertAllClose(expected, actual, check_dtypes=True)
# numerical gradients are only well defined if ``a`` is guaranteed to be
# positive definite.
jtu.check_grads(
lambda x, y: positive_definite_solve(high_precision_dot(x, x.T), y),
(a, b), order=2, rtol=1e-2)
def test_custom_linear_solve_lu(self):
# TODO(b/143528110): re-enable when underlying XLA TPU issue is fixed
if jtu.device_under_test() == "tpu":
raise SkipTest("Test fails on TPU")
def linear_solve(a, b):
a_factors = jsp.linalg.lu_factor(a)
at_factors = jsp.linalg.lu_factor(a.T)
def solve(matvec, x):
return jsp.linalg.lu_solve(a_factors, x)
def transpose_solve(vecmat, x):
return jsp.linalg.lu_solve(at_factors, x)
return lax.custom_linear_solve(
partial(high_precision_dot, a), b, solve, transpose_solve)
rng = onp.random.RandomState(0)
a = rng.randn(3, 3)
b = rng.randn(3)
expected = np.linalg.solve(a, b)
actual = linear_solve(a, b)
self.assertAllClose(expected, actual, check_dtypes=True)
jtu.check_grads(linear_solve, (a, b), order=2, rtol=2e-3)
# regression test for https://github.com/google/jax/issues/1536
jtu.check_grads(api.jit(linear_solve), (a, b), order=2,
rtol={onp.float32: 2e-3})
def test_custom_linear_solve_without_transpose_solve(self):
def explicit_jacobian_solve(matvec, b):
return lax.stop_gradient(np.linalg.solve(api.jacobian(matvec)(b), b))
def loss(a, b):
matvec = partial(high_precision_dot, a)
x = lax.custom_linear_solve(matvec, b, explicit_jacobian_solve)
return np.sum(x)
rng = onp.random.RandomState(0)
a = rng.randn(2, 2)
b = rng.randn(2)
jtu.check_grads(loss, (a, b), order=2, modes=['fwd'],
atol={onp.float32: 2e-3, onp.float64: 1e-11})
with self.assertRaisesRegex(TypeError, "transpose_solve required"):
api.grad(loss)(a, b)
def test_custom_linear_solve_errors(self):
solve = lambda f, x: x
with self.assertRaisesRegex(TypeError, re.escape("matvec() output pytree")):
lax.custom_linear_solve(lambda x: [x], 1.0, solve, solve)
with self.assertRaisesRegex(TypeError, re.escape("solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: [x], solve)
with self.assertRaisesRegex(
TypeError, re.escape("transpose_solve() output pytree")):
lax.custom_linear_solve(lambda x: x, 1.0, solve, lambda f, x: [x])
with self.assertRaisesRegex(ValueError, re.escape("solve() output shapes")):
lax.custom_linear_solve(lambda x: x, 1.0, lambda f, x: np.ones(2), solve)
def bad_matvec_usage(a):
return lax.custom_linear_solve(
lambda x: a * np.ones(2), 1.0, solve, solve)
with self.assertRaisesRegex(ValueError, re.escape("matvec() output shapes")):
api.jvp(bad_matvec_usage, (1.0,), (1.0,))
def testIssue810(self):
def loss(A):
def step(x, i):
return np.matmul(A, x), None
init_x = np.zeros(A.shape[-1:])
last_x, _ = lax.scan(step, init_x, np.arange(10))
return np.sum(last_x)
A = np.zeros((3, 3))
# The second DUS was unnecessarily replicating A across time.
# We check XLA because _scan_impl is "underneath" the jaxpr language.
s = str(api.xla_computation(api.grad(loss))(A).GetHloText())
assert s.count("dynamic-update-slice(") < 2
def testScanLengthArg(self):
def arange(n):
return lax.scan(lambda c, _: (c + 1, c), 0, None, length=n)[1]
ans = arange(10)
expected = onp.arange(10)
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/lax_control_flow_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import jax.numpy as np
from jax.tools.jax_to_hlo import jax_to_hlo
from jax.lib import xla_client
class JaxToHloTest(absltest.TestCase):
def test_convert_axpy(self):
def axpy(a, x, y):
return a * x + y[:,np.newaxis]
hlo_proto, hlo_text = jax_to_hlo(
axpy, [
('y', xla_client.Shape('f32[128]')),
('a', xla_client.Shape('f32[]')),
('x', xla_client.Shape('f32[128,2]')),
])
# Check that hlo_text contains a broadcast, add, and multiply.
self.assertIn('broadcast', hlo_text)
self.assertIn('add', hlo_text)
self.assertIn('multiply', hlo_text)
# Check that the HLO parameters are in the order we specified in the
# jax_to_hlo call.
self.assertIn('f32[128]{0} parameter(0)', hlo_text)
self.assertIn('f32[] parameter(1)', hlo_text)
self.assertIn('f32[128,2]{1,0} parameter(2)', hlo_text)
# Check that the parameters are in the expected order.
# TODO(jlebar): Ideally we'd check that hlo_proto can be deserialized to a
# valid HLO proto, but we don't seem to have access to hlo_pb2 at the
# moment, so the best we seem to be able to do is check that it's nonempty.
assert hlo_proto
def test_convert_with_constants(self):
def fn(a, b, x, y):
return a / b * x + y
_, hlo_text = jax_to_hlo(
fn,
input_shapes=[
('x', xla_client.Shape('f32[128]')),
('y', xla_client.Shape('f32[128]')),
],
constants={
'a': 123456,
'b': 4,
})
# Because we passed `a` and `b` as constants, they get constant-folded away
# by Python/JAX to a/b = 30864.
self.assertIn('constant(30864)', hlo_text)
self.assertNotIn('123456', hlo_text)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/jax_to_hlo_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import scipy.special as osp_special
import scipy.stats as osp_stats
from jax import api
from jax import test_util as jtu
from jax.scipy import special as lsp_special
from jax.scipy import stats as lsp_stats
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
all_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]
float_dtypes = [onp.float32, onp.float64]
complex_dtypes = [onp.complex64]
int_dtypes = [onp.int32, onp.int64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
numeric_dtypes = float_dtypes + complex_dtypes + int_dtypes
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "rng_factory", "test_autodiff", "test_name"])
def op_record(name, nargs, dtypes, rng_factory, test_grad, test_name=None):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, rng_factory, test_grad, test_name)
JAX_SPECIAL_FUNCTION_RECORDS = [
# TODO: digamma has no JVP implemented.
op_record("betaln", 2, float_dtypes, jtu.rand_positive, False),
op_record("digamma", 1, float_dtypes, jtu.rand_positive, False),
op_record("erf", 1, float_dtypes, jtu.rand_small_positive, True),
op_record("erfc", 1, float_dtypes, jtu.rand_small_positive, True),
op_record("erfinv", 1, float_dtypes, jtu.rand_small_positive, True),
op_record("expit", 1, float_dtypes, jtu.rand_small_positive, True),
# TODO: gammaln has slightly high error.
op_record("gammaln", 1, float_dtypes, jtu.rand_positive, False),
op_record("logit", 1, float_dtypes, jtu.rand_small_positive, False),
op_record("log_ndtr", 1, float_dtypes, jtu.rand_default, True),
op_record("ndtri", 1, float_dtypes, partial(jtu.rand_uniform, 0.05, 0.95),
True),
op_record("ndtr", 1, float_dtypes, jtu.rand_default, True),
# TODO(phawkins): gradient of entr yields NaNs.
op_record("entr", 1, float_dtypes, jtu.rand_default, False),
]
CombosWithReplacement = itertools.combinations_with_replacement
class LaxBackedScipyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Scipy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes):
return lambda: [rng(shape, dtype) for shape, dtype in zip(shapes, dtypes)]
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
# TODO(b/133842870): re-enable when exp(nan) returns NaN on CPU.
"rng_factory": jtu.rand_some_inf_and_nan
if jtu.device_under_test() != "cpu"
else jtu.rand_default,
"shape": shape, "dtype": dtype,
"axis": axis, "keepdims": keepdims}
for shape in all_shapes for dtype in float_dtypes
for axis in range(-len(shape), len(shape))
for keepdims in [False, True]))
@jtu.skip_on_flag("jax_xla_backend", "xrt")
def testLogSumExp(self, rng_factory, shape, dtype, axis, keepdims):
rng = rng_factory()
# TODO(mattjj): test autodiff
def scipy_fun(array_to_reduce):
return osp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims)
def lax_fun(array_to_reduce):
return lsp_special.logsumexp(array_to_reduce, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"test_autodiff": rec.test_autodiff,
"scipy_op": getattr(osp_special, rec.name),
"lax_op": getattr(lsp_special, rec.name)}
for shapes in CombosWithReplacement(all_shapes, rec.nargs)
for dtypes in CombosWithReplacement(rec.dtypes, rec.nargs))
for rec in JAX_SPECIAL_FUNCTION_RECORDS))
def testScipySpecialFun(self, scipy_op, lax_op, rng_factory, shapes, dtypes,
test_autodiff):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
args = args_maker()
self.assertAllClose(scipy_op(*args), lax_op(*args), atol=1e-3, rtol=1e-3,
check_dtypes=False)
self._CompileAndCheck(lax_op, args_maker, check_dtypes=True, rtol=1e-5)
if test_autodiff:
jtu.check_grads(lax_op, args, order=1, atol=1e-3, rtol=3e-3, eps=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_d={}".format(
jtu.format_shape_dtype_string(shape, dtype), d),
"rng_factory": jtu.rand_positive, "shape": shape, "dtype": dtype,
"d": d}
for shape in all_shapes
for dtype in float_dtypes
for d in [1, 2, 5]))
def testMultigammaln(self, rng_factory, shape, dtype, d):
def scipy_fun(a):
return osp_special.multigammaln(a, d)
def lax_fun(a):
return lsp_special.multigammaln(a, d)
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype) + (d - 1) / 2.]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,
tol={onp.float32: 1e-3, onp.float64: 1e-14})
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
def testIssue980(self):
x = onp.full((4,), -1e20, dtype=onp.float32)
self.assertAllClose(onp.zeros((4,), dtype=onp.float32),
lsp_special.expit(x), check_dtypes=True)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/lax_scipy_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from unittest import SkipTest
from absl.testing import absltest
import numpy as onp
import jax
import jax.numpy as np
from jax import lax
from jax import test_util as jtu
from jax.lib import xla_bridge
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
prev_xla_flags = None
# Run all tests with 8 CPU devices.
def setUpModule():
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
class MultiDeviceTest(jtu.JaxTestCase):
def test_computation_follows_data(self):
if len(jax.devices()) < 2:
raise SkipTest("test requires multiple devices")
# computation follows data explicitly placed on device 1
x = jax.device_put(1, jax.devices()[1])
y = x.reshape((1, 1))
self.assertEqual(y.device_buffer.device(), jax.devices()[1])
z = y.reshape((1, 1))
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
# multiple arguments explicitly placed on device 0 are compatible
x = jax.device_put(1, jax.devices()[0])
y = jax.device_put(2, jax.devices()[0])
z = x + y
self.assertEqual(z, 3)
self.assertEqual(z.device_buffer.device(), jax.devices()[0])
w = z + x
self.assertEqual(w.device_buffer.device(), jax.devices()[0])
f = jax.jit(lambda x: x + 1, device=jax.devices()[0])
z = f(1) + f(2)
self.assertEqual(z, 5)
self.assertEqual(z.device_buffer.device(), jax.devices()[0])
w = z + z
self.assertEqual(z.device_buffer.device(), jax.devices()[0])
# multiple arguments explicitly placed on device 1 are compatible
x = jax.device_put(1, jax.devices()[1])
y = jax.device_put(2, jax.devices()[1])
z = x + y
self.assertEqual(z, 3)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
w = z + x
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
f = jax.jit(lambda x: x + 1, device=jax.devices()[1])
z = f(1) + f(2)
self.assertEqual(z, 5)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
w = z + z
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
# an argument explicitly placed on one device still works with values that
# aren't device-committed (and computaiton follows device-committed values)
z = jax.device_put(1., jax.devices()[1]) + 4
self.assertEqual(z, 5.)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
w = z + 3
self.assertEqual(w, 8.)
self.assertEqual(w.device_buffer.device(), jax.devices()[1])
z = jax.device_put(1., jax.devices()[1]) + np.ones(3)
self.assertAllClose(z, 1 + onp.ones(3), check_dtypes=False)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
w = z - 3
self.assertAllClose(w, 1 + onp.ones(3) - 3, check_dtypes=False)
self.assertEqual(w.device_buffer.device(), jax.devices()[1])
z = jax.device_put(1., jax.devices()[1]) + np.array([1, 2])
self.assertAllClose(z, 1 + onp.array([1, 2]), check_dtypes=False)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
w = z * 2
self.assertAllClose(w, (1 + onp.array([1, 2])) * 2, check_dtypes=False)
self.assertEqual(w.device_buffer.device(), jax.devices()[1])
z = jax.device_put(1., jax.devices()[1]) + jax.device_put(2)
self.assertAllClose(z, 3., check_dtypes=False)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
z = jax.device_put(1., jax.devices()[1]) + jax.jit(lambda x: x + 1)(3)
self.assertAllClose(z, 5., check_dtypes=False)
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
# multiple arguments explicitly placed on distinct devices cause errors
x = jax.device_put(1, jax.devices()[0])
y = jax.device_put(2, jax.devices()[1])
self.assertRaisesRegex(
ValueError,
"primitive arguments must be colocated on the same device",
lambda: x + y)
f = jax.jit(lambda x: x + 1, device=jax.devices()[0])
g = jax.jit(lambda x: x + 1, device=jax.devices()[1])
self.assertRaisesRegex(
ValueError,
"primitive arguments must be colocated on the same device",
lambda: f(1) + g(2))
def test_primitive_compilation_cache(self):
if len(jax.devices()) < 2:
raise SkipTest("test requires multiple devices")
x = jax.device_put(1, jax.devices()[1])
with jtu.count_primitive_compiles() as count:
y = lax.add(x, x)
z = lax.add(y, y)
self.assertEqual(count[0], 1)
self.assertEqual(y.device_buffer.device(), jax.devices()[1])
self.assertEqual(z.device_buffer.device(), jax.devices()[1])
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/multi_device_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax.numpy as np
from jax import test_util as jtu
from jax.abstract_arrays import ShapedArray
from jax import lax
from jax import lax_linalg
from jax import random
from jax.api import jit, grad, jvp, vjp, make_jaxpr, jacfwd, jacrev, hessian
from jax.api import vmap
from jax.core import unit
from jax.interpreters import partial_eval as pe
from jax.util import partial, curry
import jax.ops
from jax.config import config
config.parse_flags_with_absl()
# These are 'manual' tests for batching (vmap). The more exhaustive, more
# systematic tests are in lax_test.py's LaxVmapTest class.
class BatchingTest(jtu.JaxTestCase):
def testConstantFunction(self):
ans = vmap(lambda x: 3)(onp.ones(4))
expected = 3 * onp.ones(4)
self.assertAllClose(ans, expected, check_dtypes=False)
def testNestedBatchingMatMat(self):
matvec = vmap(np.vdot, in_axes=(0, None))
matmat = vmap(matvec, in_axes=(None, 1), out_axes=1)
R = onp.random.RandomState(0).randn
A = R(4, 3)
B = R(3, 2)
ans = matmat(A, B)
expected = onp.dot(A, B)
self.assertAllClose(
ans, expected, check_dtypes=False,
rtol={onp.float32:1e-2} if jtu.device_under_test() == "tpu" else None)
jaxpr = make_jaxpr(matmat)(A, B)
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
def testPerExampleGradients(self):
def predict(params, inputs):
for W, b in params:
outputs = np.dot(W, inputs) + b
inputs = np.tanh(outputs)
return outputs
def loss(params, data):
inputs, targets = data
predictions = predict(params, inputs)
return np.sum((predictions - targets)**2)
batch_size = 5
layer_sizes = [3, 2, 4]
R = onp.random.RandomState(0).randn
params = [(R(m, n), R(m))
for m, n in zip(layer_sizes[1:], layer_sizes[:-1])]
input_vec = R(3)
target_vec = R(4)
datum = (input_vec, target_vec)
input_batch = R(5, 3)
target_batch = R(5, 4)
batch = (input_batch, target_batch)
ans = vmap(partial(grad(loss), params))(batch)
for ans_pair, param_pair in zip(ans, params):
dW, db = ans_pair
W, b = param_pair
self.assertEqual(dW.shape, (batch_size,) + W.shape)
self.assertEqual(db.shape, (batch_size,) + b.shape)
def testJacobians(self):
def jacbwd(f, x):
y, pullback = vjp(f, x)
std_basis = onp.eye(onp.size(y)).reshape((-1,) + onp.shape(y))
jac_flat, = vmap(pullback, out_axes=onp.ndim(y))(std_basis)
return jac_flat.reshape(onp.shape(y) + onp.shape(x))
def jacfwd(f, x):
pushfwd = lambda v: jvp(f, (x,), (v,))
std_basis = onp.eye(onp.size(x)).reshape((-1,) + onp.shape(x))
y, jac_flat = vmap(pushfwd, out_axes=(None, 0))(std_basis)
return jac_flat.reshape(onp.shape(y) + onp.shape(x))
R = onp.random.RandomState(0).randn
A = R(4, 3)
b = R(4)
f = lambda x: np.tanh(np.dot(A, x) + b)
x = R(3)
self.assertAllClose(jacfwd(f, x), jacbwd(f, x), check_dtypes=False)
def testBatchOfCompile(self):
side = []
@jit
def f(x):
side.append(None)
return x + x
g = jit(vmap(f))
self.assertAllClose(g(onp.ones(2)), 2 * onp.ones(2), check_dtypes=False)
self.assertEqual(len(side), 1)
self.assertAllClose(g(2 * onp.ones(2)), 4 * onp.ones(2),
check_dtypes=False)
self.assertEqual(len(side), 1)
def testSliceLax(self):
fun = lambda x: lax.slice(x, (2,), (4,))
R = onp.random.RandomState(0).randn
x = R(5, 10)
ans = vmap(fun)(x)
expected_ans = x[:, 2:4]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testSliceNumpy(self):
fun = lambda x: x[:, 2]
R = onp.random.RandomState(0).randn
x = R(10, 5, 3, 7)
ans = vmap(fun)(x)
expected_ans = x[:, :, 2]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testRevLax(self):
fun = lambda x: lax.rev(x, [0])
R = onp.random.RandomState(0).randn
x = R(2, 3)
ans = vmap(fun)(x)
expected_ans = x[:, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (1,), 1)(x)
expected_ans = x[::-1, :]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testRevNumpy(self):
fun = lambda x: x[:, ::-1]
R = onp.random.RandomState(0).randn
x = R(3, 2, 4)
ans = vmap(fun)(x)
expected_ans = x[:, :, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (1,), 1)(x)
expected_ans = x[:, :, ::-1]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
ans = vmap(fun, (2,), 2)(x)
expected_ans = x[:, ::-1, :]
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testNpMaximum(self):
fun = lambda x: np.maximum(x, 0.0)
R = onp.random.RandomState(0).randn
x = R(10, 5, 3, 7)
ans = vmap(fun)(x)
expected_ans = onp.maximum(x, 0.0)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testNpGtrThan(self):
R = onp.random.RandomState(0).randn
x = R(10, 5, 3, 7)
ans = vmap(lambda x: x > 1.0)(x)
expected_ans = x > 1.0
self.assertAllClose(ans, expected_ans, check_dtypes=True)
def testNpMaximumPerExampleGrad(self):
R = onp.random.RandomState(0).randn
x = R(10, 5)
W = R(5, 5)
fun = lambda W, x: np.sum(np.maximum(np.dot(x, W), 0.0) ** 2)
ans = vmap(partial(grad(fun), W))(x)
W_t = np.transpose(W)
for i in range(10):
x_ex = x[i:i + 1]
expected_ans = 2.0 * np.dot(
np.maximum(np.dot(W_t, np.transpose(x_ex)), 0.0), x_ex)
expected_ans = np.transpose(expected_ans)
self.assertAllClose(
ans[i], expected_ans, check_dtypes=False,
atol={onp.float32:5e-2} if jtu.device_under_test() == "tpu" else None)
def testDotGeneral(self):
R = onp.random.RandomState(0).randn
x = R(10, 3, 4, 5)
y = R(10, 3, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun)(x, y)
expected = lax.dot_general(x, y, [((3,), (2,)), ((0, 1), (0, 1))])
self.assertAllClose(ans, expected, check_dtypes=True)
x = R(3, 4, 10, 5)
y = R(3, 10, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(2, 1))(x, y)
expected = onp.stack([fun(x[..., i, :], y[:, i, ...]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
x = R(3, 4, 5, 10)
y = R(3, 5, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(3, None))(x, y)
expected = onp.stack([fun(x[..., i], y) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
x = R(3, 4, 5)
y = R(3, 5, 10, 6)
fun = lambda x, y: lax.dot_general(x, y, [((2,), (1,)), ((0,), (0,))])
ans = vmap(fun, in_axes=(None, 2))(x, y)
expected = onp.stack([fun(x, y[..., i, :]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
x = R(4)
y = R(4, 10)
fun = lambda x, y: lax.dot_general(x, y, [((0,), (0,)), ((), ())])
ans = vmap(fun, in_axes=(None, 1))(x, y)
expected = onp.stack([fun(x, y[..., i]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
def testDot(self):
# these tests are based on @shoyer's notebook studying gufuncs
def vecvec(a, b):
dot = np.dot
for ndim in range(1, max(a.ndim, b.ndim)):
a_ax = 0 if a.ndim > ndim else None
b_ax = 0 if b.ndim > ndim else None
dot = vmap(dot, in_axes=(a_ax, b_ax))
return dot(a, b)
assert vecvec(np.zeros((3,)), np.zeros((3,))).shape == ()
assert vecvec(np.zeros((2, 3)), np.zeros((3,))).shape == (2,)
assert vecvec(np.zeros((4, 2, 3)), np.zeros((3,))).shape == (4, 2)
def testDot2(self):
R = onp.random.RandomState(0).randn
xs = R(10, 3)
ys = R(10, 3)
ans = vmap(np.dot)(xs, ys)
expected = onp.einsum('ni,ni->n', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot3(self):
R = onp.random.RandomState(0).randn
xs = R(5, 8, 10)
ys = R(10, 1)
ans = vmap(np.dot, in_axes=(1, None))(xs, ys)
expected = onp.einsum('inj,jk->nik', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot4(self):
R = onp.random.RandomState(0).randn
xs = R(3, 2)
ys = R(3)
ans = vmap(np.dot, in_axes=(1, None))(xs, ys)
expected = onp.einsum('ij,i->j', xs, ys)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDot5(self):
f = vmap(partial(np.einsum, 'ij,j->i'), (None, 0))
jaxpr = make_jaxpr(f)(np.zeros((1000, 1000)), np.zeros((1000, 1000)))
assert "broadcast" not in str(jaxpr)
def testPad(self):
R = onp.random.RandomState(0).randn
fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1)])
x = R(5, 10).astype(onp.float32)
ans = vmap(fun)(x)
expected_ans = np.stack(list(map(fun, x)))
self.assertAllClose(ans, expected_ans, check_dtypes=False)
fun = lambda x: lax.pad(x, onp.float32(0), [(1, 2, 1), (0, 1, 0)])
x = R(5, 10, 3).astype(onp.float32)
ans = vmap(fun)(x)
expected_ans = np.stack(list(map(fun, x)))
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testConcatenate(self):
R = lambda *shape: onp.random.RandomState(0).randn(*shape).astype(onp.float32)
fun = lambda *args: lax.concatenate(args, dimension=0)
x, y, z = R(10, 2, 3), R(1, 10, 3), R(4, 3)
ans = vmap(fun, in_axes=(0, 1, None))(x, y, z)
expected_ans = onp.concatenate([x, onp.swapaxes(y, 0, 1),
onp.broadcast_to(z, (10, 4, 3))], 1)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
fun = lambda *args: lax.concatenate(args, dimension=1)
x, y, z = R(10, 2, 1), R(2, 3), R(2, 4, 10)
ans = vmap(fun, in_axes=(0, None, 2))(x, y, z)
expected_ans = onp.concatenate([x, onp.broadcast_to(y, (10, 2, 3)),
onp.moveaxis(z, 2, 0)], 2)
self.assertAllClose(ans, expected_ans, check_dtypes=False)
def testJacobianIssue54(self):
# test modeling the code in https://github.com/google/jax/issues/54
def func(xs):
return np.array([x for x in xs])
xs = np.ones((5, 1))
jacrev(func)(xs) # don't crash
jacfwd(func)(xs) # don't crash
def testAny(self):
# test modeling the code in https://github.com/google/jax/issues/108
ans = vmap(np.any)(np.array([[True, False], [False, False]]))
expected = np.array([True, False])
self.assertAllClose(ans, expected, check_dtypes=True)
@jtu.skip_on_devices("tpu")
def testHessian(self):
# test based on code from sindhwani@google
def fun(x, t):
return np.sum(np.power(np.maximum(x, 0.0), 2)) + t
x = onp.array([-1., -0.5, 0., 0.5, 1.0])
ans = hessian(lambda x: fun(x, 0.0))(x)
expected = onp.array([[0., 0., 0., 0., 0.],
[0., 0., 0., 0., 0.],
[0., 0.,0.5, 0., 0.],
[0., 0., 0., 2., 0.],
[0., 0., 0., 0., 2.]])
self.assertAllClose(ans, expected, check_dtypes=False)
def testDynamicSlice(self):
# test dynamic_slice via numpy indexing syntax
# see https://github.com/google/jax/issues/1613 for an explanation of why we
# need to use np rather than onp to create x and idx
x = np.arange(30).reshape((10, 3))
ans = vmap(lambda x, i: x[i], in_axes=(0, None))(x, 1)
expected = x[:, 1]
self.assertAllClose(ans, expected, check_dtypes=False)
idx = np.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, i: x[i], in_axes=(0, 0))(x, idx)
expected = x[onp.arange(10), idx]
self.assertAllClose(ans, expected, check_dtypes=False)
x = np.arange(3)
idx = np.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, i: x[i], in_axes=(None, 0))(x, idx)
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testDynamicUpdateSlice(self):
x = onp.random.randn(10, 3)
y = onp.random.randn(10)
ans = vmap(lambda x, y, i: lax.dynamic_update_index_in_dim(x, y, i, axis=0),
in_axes=(0, 0, None))(x, y, 1)
expected = x.copy()
expected[:, 1] = y
self.assertAllClose(ans, expected, check_dtypes=False)
x = onp.random.randn(3)
idx = onp.array([0, 1, 2, 1, 0] * 2)
ans = vmap(lambda x, y, i: lax.dynamic_update_index_in_dim(x, y, i, axis=0),
in_axes=(None, 0, 0))(x, y, idx)
expected = onp.broadcast_to(x, (10, 3)).copy()
expected[onp.arange(10), idx] = y
self.assertAllClose(ans, expected, check_dtypes=False)
def testRandom(self):
seeds = vmap(random.PRNGKey)(onp.arange(10))
ans = vmap(partial(random.normal, shape=(3, 2)))(seeds)
expected = onp.stack([random.normal(random.PRNGKey(seed), (3, 2))
for seed in onp.arange(10)])
self.assertAllClose(ans, expected, check_dtypes=False)
assert len(onp.unique(ans)) == 10 * 3 * 2
def testSort(self):
v = onp.arange(12)[::-1].reshape(3, 4)
sv = vmap(partial(lax.sort, dimension=0), (0,))(v)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sv = vmap(partial(lax.sort, dimension=-1), (0,))(v)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sv = vmap(partial(lax.sort, dimension=0), (1,))(v)
self.assertAllClose(sv, v[::-1, :].T, check_dtypes=True)
sv = vmap(partial(lax.sort, dimension=0), (1,), 1)(v)
self.assertAllClose(sv, v[::-1, :], check_dtypes=True)
def testSortKeyVal(self):
k = onp.arange(12)[::-1].reshape(3, 4)
v = onp.random.RandomState(0).permutation(12).reshape(3, 4)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 0))(k, v)
self.assertAllClose(sk, k[:, ::-1], check_dtypes=True)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 1), 1)(k, v)
self.assertAllClose(sk, k[::-1, :], check_dtypes=True)
self.assertAllClose(sv, v[::-1, :], check_dtypes=True)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (0, 1))(k, v.T)
self.assertAllClose(sk, k[:, ::-1], check_dtypes=True)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, 0))(k.T, v)
self.assertAllClose(sk, k[:, ::-1], check_dtypes=True)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (None, 0))(k[0], v)
self.assertAllClose(sk, onp.broadcast_to(k[0, ::-1], (3, 4)),
check_dtypes=True)
self.assertAllClose(sv, v[:, ::-1], check_dtypes=True)
sk, sv = vmap(partial(lax.sort_key_val, dimension=0), (1, None))(k.T, v[0])
self.assertAllClose(sk, k[:, ::-1], check_dtypes=True)
self.assertAllClose(sv, onp.broadcast_to(v[0, ::-1], (3, 4)),
check_dtypes=True)
def testConvGeneralDilated(self):
W = np.array(onp.random.randn(3, 3, 1, 5), dtype=onp.float32)
X = np.array(onp.random.randn(10, 5, 5, 1), dtype=onp.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
return y
grad_loss = grad(lambda params, x: np.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example = np.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, np.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
np.reshape(g, (1,) + g.shape)]
per_example_direct = np.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True,
rtol=2e-2)
def testConvGeneralDilatedBatchNotMajor(self):
W = np.array(onp.random.randn(3, 3, 1, 4), dtype=onp.float32)
x = np.array(onp.random.randn(3, 5, 7, 5, 1), dtype=onp.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('HNWC', 'HWIO', 'HWNC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
return y
per_example = vmap(partial(f, W))(x)
per_example = np.reshape(np.transpose(per_example, (1, 2, 0, 3, 4)),
(5, 5, 21, 4))
per_example_direct = f(W, np.reshape(np.transpose(x, (1, 0, 2, 3, 4)),
(5, 21, 5, 1)))
self.assertAllClose(per_example, per_example_direct, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": "_op={}".format(name), "op": op, "unit": unit}
for name, op, unit in [("max", lax.max, -np.inf), ("min", lax.min, np.inf)])
def testMinMaxPool(self, op, unit):
W = np.array(onp.random.randn(3, 3, 1, 5), dtype=onp.float32)
X = np.array(onp.random.randn(10, 5, 5, 1), dtype=onp.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
y = lax.reduce_window(
y, unit, op, (1, 2, 2, 1), (1, 1, 1, 1), 'SAME')
return y
grad_loss = grad(lambda params, x: np.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example = np.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, np.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
np.reshape(g, (1,) + g.shape)]
per_example_direct = np.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True,
rtol=5e-2)
def testSumPool(self):
W = np.array(onp.random.randn(3, 3, 1, 5), dtype=onp.float32)
X = np.array(onp.random.randn(10, 5, 5, 1), dtype=onp.float32)
def f(params, x):
one = (1, 1)
dimension_numbers = ('NHWC', 'HWIO', 'NHWC')
y = lax.conv_general_dilated(
x, params, one, 'SAME', one, one, dimension_numbers)
y = lax.reduce_window(
y, 0.0, lax.add, (1, 2, 2, 1), (1, 1, 1, 1), 'SAME')
return y
grad_loss = grad(lambda params, x: np.mean(f(params, x) ** 2))
# Test forward prop.
per_example = vmap(partial(f, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example = np.reshape(per_example, (10, 5, 5, 5))
per_example_direct = f(W, X)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True)
# Test gradients.
per_example = vmap(partial(grad_loss, W))(np.reshape(X, (10, 1, 5, 5, 1)))
per_example_direct = []
for i in range(10):
g = grad_loss(W, np.reshape(X[i], (1, 5, 5, 1)))
per_example_direct += [
np.reshape(g, (1,) + g.shape)]
per_example_direct = np.concatenate(per_example_direct, axis=0)
self.assertAllClose(per_example, per_example_direct, check_dtypes=True,
rtol=3e-2)
def testCumProd(self):
x = np.arange(9).reshape(3, 3) + 1
y = vmap(lambda x: np.cumprod(x, axis=-1))(x)
self.assertAllClose(onp.cumprod(x, axis=1, dtype=np.int_), y,
check_dtypes=True)
def testSelect(self):
pred = onp.array([True, False])
on_true = onp.array([0, 1])
on_false = onp.array([2, 3])
ans = vmap(lax.select)(pred, on_true, on_false)
expected = onp.array([0, 3])
self.assertAllClose(ans, expected, check_dtypes=True)
pred = onp.array([False, True])
on_true = onp.array([0, 1])
on_false = onp.array([2, 3])
ans = vmap(lax.select, (0, None, None))(pred, on_true, on_false)
expected = onp.array([[2, 3],
[0, 1]])
self.assertAllClose(ans, expected, check_dtypes=True)
pred = True
on_true = onp.array([0, 1], onp.float32)
on_false = onp.array(3, onp.float32)
ans = vmap(lax.select, (None, 0, None))(pred, on_true, on_false)
expected = onp.array([0, 1], onp.float32)
self.assertAllClose(ans, expected, check_dtypes=True)
pred = onp.array([False, True])
on_true = onp.array([0, 1], onp.float32)
on_false = onp.array(3, onp.float32)
ans = vmap(lax.select, (0, 0, None))(pred, on_true, on_false)
expected = onp.array([3, 1], onp.float32)
self.assertAllClose(ans, expected, check_dtypes=True)
pred = onp.array([False, True])
on_true = onp.array([2], onp.float32)
on_false = onp.array([[3, 4]], onp.float32)
ans = vmap(lax.select, (0, None, 1), 1)(pred, on_true, on_false)
expected = onp.array([[3, 2]], onp.float32)
self.assertAllClose(ans, expected, check_dtypes=True)
def testLaxLinalgCholesky(self):
a = onp.random.RandomState(0).randn(10, 5, 5).astype(onp.float32)
a = onp.matmul(a, onp.conj(onp.swapaxes(a, -1, -2)))
ans = vmap(lax_linalg.cholesky)(a)
expected = onp.linalg.cholesky(a)
self.assertAllClose(ans, expected, check_dtypes=False, rtol=1e-4)
b = onp.random.RandomState(0).randn(10, 5, 5).astype(onp.float32)
b = onp.matmul(b, onp.conj(onp.swapaxes(b, -1, -2)))
b_trans = onp.swapaxes(b, 0, 1) # shape is (5, 10, 5)
ans = vmap(lax_linalg.cholesky, in_axes=1, out_axes=0)(b_trans)
expected = onp.linalg.cholesky(b)
self.assertAllClose(ans, expected, check_dtypes=False, rtol=1e-4)
def testLaxLinalgTriangularSolve(self):
a = onp.random.RandomState(0).randn(4, 10, 4).astype(onp.float32)
a += onp.eye(4, dtype=np.float32)[:, None, :]
b = onp.random.RandomState(0).randn(5, 4, 10).astype(onp.float32)
ans = vmap(lax_linalg.triangular_solve, in_axes=(1, 2))(a, b)
expected = onp.stack(
[lax_linalg.triangular_solve(a[:, i], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
ans = vmap(lax_linalg.triangular_solve, in_axes=(None, 2))(a[:, 0], b)
expected = onp.stack(
[lax_linalg.triangular_solve(a[:, 0], b[..., i]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
ans = vmap(lax_linalg.triangular_solve, in_axes=(1, None))(a, b[..., 0])
expected = onp.stack(
[lax_linalg.triangular_solve(a[:, i], b[..., 0]) for i in range(10)])
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32, onp.int32]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (3, 5), onp.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, (10, 3), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(1, (10, 3, 5), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, (10, 5, 3), onp.array([[0, 2], [1, 0]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,),
start_index_map=(0, 1)),
(1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherBatchedOperand(self, axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
ans = vmap(fun, (axis, None))(operand, idxs)
expected = onp.stack([fun(operand[(slice(None),) * axis + (i,)], idxs)
for i in range(operand.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32, onp.float64]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (3, 5), onp.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, (10, 3), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(1, (10, 3, 5), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, (10, 5, 3), onp.array([[0, 2], [1, 0]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,),
start_index_map=(0, 1)),
(1, 3)), ]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherGradBatchedOperand(self, axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))
operand = rng(shape, dtype)
ans = vmap(gfun, (axis, None))(operand, idxs)
expected = onp.stack([gfun(operand[(slice(None),) * axis + (i,)], idxs)
for i in range(operand.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32, onp.int32]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (5,), onp.array([[[0], [2]], [[1], [3]]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)), (1,)),
(1, (10,), onp.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)), (2,)),
(1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)), (1, 3)),
(0, (10, 5), onp.array([[[0, 1], [2, 0]],
[[1, 0], [2, 3]]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)), (1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherBatchedIndices(self, axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
ans = vmap(fun, (None, axis))(operand, idxs)
expected = onp.stack([fun(operand, idxs[(slice(None),) * axis + (i,)])
for i in range(idxs.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, idxs, dnums,
slice_sizes),
"axis": axis, "shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32, onp.float64]
for axis, shape, idxs, dnums, slice_sizes in [
(0, (5,), onp.array([[[0], [2]], [[1], [3]]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)), (1,)),
(1, (10,), onp.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)), (2,)),
(1, (10, 5), onp.array([[0, 2, 1], [0, 3, 3]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)), (1, 3)),
(0, (10, 5), onp.array([[[0, 1], [2, 0]],
[[1, 0], [2, 3]]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)), (1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherGradBatchedIndices(self, axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))
operand = rng(shape, dtype)
ans = vmap(gfun, (None, axis))(operand, idxs)
expected = onp.stack([gfun(operand, idxs[(slice(None),) * axis + (i,)])
for i in range(idxs.shape[axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,
dnums, slice_sizes),
"op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype":
dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32, onp.int32]
for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [
(0, 0, (2, 5), onp.array([[[0], [2]], [[1], [3]]]),
lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(0, 1, (2, 10, 5,), onp.array([[[0, 2, 1], [0, 3, 3]]]).T,
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]],
[[1, 0], [2, 0]]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
operand = rng(shape, dtype)
assert operand.shape[op_axis] == idxs.shape[idxs_axis]
ans = vmap(fun, (op_axis, idxs_axis))(operand, idxs)
expected = onp.stack([fun(operand[(slice(None),) * op_axis + (i,)],
idxs[(slice(None),) * idxs_axis + (i,)])
for i in range(idxs.shape[idxs_axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_shape={}_op_axis={}_idxs_axis={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), op_axis, idxs_axis, idxs,
dnums, slice_sizes),
"op_axis": op_axis, "idxs_axis": idxs_axis, "shape": shape, "dtype":
dtype, "idxs": idxs, "dnums": dnums, "slice_sizes": slice_sizes,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in [onp.float32]
for op_axis, idxs_axis, shape, idxs, dnums, slice_sizes in [
(0, 0, (2, 5), onp.array([[[0], [2]], [[1], [3]]]),
lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
(1, 1, (10, 2), onp.array([[0, 0, 0], [0, 2, 1]]).T[..., None],
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
(0, 1, (2, 10, 5,), onp.array([[[0, 2, 1], [0, 3, 3]]]).T,
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
(2, 0, (10, 5, 2), onp.array([[[0, 2], [1, 0]],
[[1, 0], [2, 0]]]),
lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default])
def testGatherGradBatchedBoth(self, op_axis, idxs_axis, shape, dtype, idxs, dnums,
slice_sizes, rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
gfun = grad(lambda x, idx: np.sum(np.sin(fun(x, idx))))
operand = rng(shape, dtype)
assert operand.shape[op_axis] == idxs.shape[idxs_axis]
ans = vmap(gfun, (op_axis, idxs_axis))(operand, idxs)
expected = onp.stack([gfun(operand[(slice(None),) * op_axis + (i,)],
idxs[(slice(None),) * idxs_axis + (i,)])
for i in range(idxs.shape[idxs_axis])])
self.assertAllClose(ans, expected, check_dtypes=False)
def testNumpyIndexing1(self):
a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
ind = onp.array([[0, 1],
[2, 0]])
def f(a, ind):
return a[:, ind]
expected = onp.stack([f(a, ind[i, :]) for i in range(ind.shape[0])])
ans = vmap(f, (None, 0))(a, ind)
assert onp.all(ans == expected)
def testNumpyIndexing2(self):
a = np.arange(2 * 3 * 4).reshape((2, 3, 4))
def f(a):
inds = np.array([0, 2])
return a[:, inds]
ans = vmap(f)(a)
expected = onp.stack([f(a[:, i, :]) for i in range(a.shape[1])], axis=1)
assert onp.all(ans == expected)
def testTranspose(self):
x = onp.arange(4 * 3 * 3).reshape((4, 3, 3))
ans = vmap(lambda x: x + x.T)(x)
expected = x + onp.swapaxes(x, -1, -2)
self.assertAllClose(ans, expected, check_dtypes=False)
def testTransposePermutation(self):
x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))
ans = vmap(lambda x: np.transpose(x, (1, 0, 2)))(x)
expected = onp.transpose(x, (0, 2, 1, 3))
self.assertAllClose(ans, expected, check_dtypes=False)
x = onp.arange(6 * 3 * 4 * 5).reshape((6, 3, 4, 5))
ans = vmap(lambda x: np.transpose(x, (1, 2, 0)))(x)
expected = onp.transpose(x, (0, 2, 3, 1))
self.assertAllClose(ans, expected, check_dtypes=False)
x = onp.arange(6 * 3 * 4 * 5).reshape((3, 4, 6, 5))
ans = vmap(lambda x: np.transpose(x, (1, 2, 0)), in_axes=2)(x)
expected = onp.transpose(x, (2, 1, 3, 0))
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue354(self):
psd_mat = onp.random.randn(20, 10)
psd_mat = psd_mat.T.dot(psd_mat)
vec = onp.random.randn(10)
def f(scale):
scaled_mat = scale * psd_mat
chol = np.linalg.cholesky(scaled_mat)
return -0.5 * np.sum((np.einsum('ij,j->i', chol, vec))**2)
vmapped_f = vmap(f)
vmapped_f_grad = grad(lambda x: np.sum(vmapped_f(x)))
scales = onp.array([[0.1], [0.2], [0.3], [0.4], [0.5]])
ans = vmapped_f_grad(scales) # don't crash!
expected = onp.stack([grad(f)(scale) for scale in scales])
self.assertAllClose(ans, expected, check_dtypes=False,
rtol=jtu.default_gradient_tolerance)
def testIssue387(self):
# https://github.com/google/jax/issues/387
R = onp.random.RandomState(0).rand(100, 2)
def dist_sq(R):
dR = R[:, np.newaxis, :] - R[np.newaxis, :, :]
zero = np.zeros_like(dR)
dR = dR - np.where(np.abs(dR) < 0.5, zero, 0.5 * np.sign(dR))
return np.sum(dR ** 2, axis=2)
@jit
def f(R):
dr = dist_sq(R)
return np.sum(R ** 2)
H = hessian(f)(R) # don't crash on UnshapedArray
def testIssue489(self):
def f(key):
def body_fn(uk):
key = uk[1]
u = random.uniform(key, (), dtype=np.float64)
key, _ = random.split(key)
return u, key
u, _ = lax.while_loop(lambda uk: uk[0] > 0.5, body_fn,
(np.float64(1.), key))
return u
print(vmap(f)(random.split(random.PRNGKey(0), 2))) # no crash
def testEmptyTuples(self):
# Ensure there is no crash when a vectorized input contains empty tuples.
result = vmap(lambda x, _: x + 1)(onp.array([0, 1]), ())
self.assertAllClose(result, onp.array([1, 2]), check_dtypes=False)
# Ensure there is no crash when a vectorized output contains empty tuples.
result, empty_tuple = vmap(lambda x: (x + 1, ()))(onp.array([0, 1]))
self.assertAllClose(result, onp.array([1, 2]), check_dtypes=False)
self.assertEqual((), empty_tuple)
def testIndexAddBatchedIndexesOnly(self):
f = lambda x, idx, y: jax.ops.index_add(x, jax.ops.index[idx], y)
result = vmap(f, (None, 0, None))(onp.zeros((10,)), onp.arange(10,), 1.)
self.assertAllClose(result, onp.eye(10), check_dtypes=False)
def testIssue1170(self):
def f(index1, index2):
return np.arange(36).reshape(6, 6)[index1, index2]
g = jax.jit(jax.pmap(f))
ans = g(index1=onp.asarray([1]), index2=onp.asarray([2]))
expected = g(onp.asarray([1]), onp.asarray([2]))
self.assertAllClose(ans, expected, check_dtypes=True)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/batching_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import defaultdict
import itertools
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
from jax import lax
import jax.numpy as np
import jax.test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
def rng():
return onp.random.RandomState(0)
class EinsumTest(jtu.JaxTestCase):
def _check(self, s, *ops):
a = onp.einsum(s, *ops)
b = np.einsum(s, *ops, precision=lax.Precision.HIGHEST)
self.assertAllClose(a, b, atol=1e-4, rtol=1e-4, check_dtypes=True)
def test_three_operands_1(self):
r = rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_three_operands_2(self):
r = rng()
x = r.randn(3)
y = r.randn(4)
z = r.randn(5)
s = 'i,j,k->ijk'
self._check(s, x, y, z)
def test_two_operands_1(self):
r = rng()
x = r.randn(3, 4)
y = r.randn(4)
s = 'ij,j->i'
self._check(s, x, y)
def test_two_operands_2(self):
r = rng()
x = r.randn(3, 4, 5)
y = r.randn(4)
s = 'ijk,j->i'
self._check(s, x, y)
def test_two_operands_3(self):
r = rng()
x = r.randn(3, 4, 3)
y = r.randn(3)
s = 'iji,i->j'
self._check(s, x, y)
def test_two_operands_4(self):
r = rng()
x = r.randn(3, 4)
y = r.randn(3, 4)
s = 'ij,ij->'
self._check(s, x, y)
def test_two_operands_5(self):
r = rng()
x = r.randn(10, 2, 3)
y = r.randn(3, 4)
s = 'nij,jk->nik'
self._check(s, x, y)
def test_two_operands_6(self):
# based on https://github.com/google/jax/issues/37#issuecomment-448572187
r = rng()
x = r.randn(2, 1)
y = r.randn(2, 3, 4)
s = 'sa,shb->shab'
self._check(s, x, y)
def test_one_operand_1(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->j'
self._check(s, x)
def test_one_operand_2(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->kij'
self._check(s, x)
def test_one_operand_3(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_4(self):
r = rng()
x = r.randn(3, 4, 5)
s = 'ijk->ki'
self._check(s, x)
def test_one_operand_5(self):
r = rng()
x = r.randn(2, 3, 4, 5)
s = '...ijk->...ki'
self._check(s, x)
def test_one_operand_6(self):
r = rng()
x = r.randn(3, 4, 5)
s = '...ijk->ki'
self._check(s, x)
def test_one_operand_7(self):
r = rng()
x = r.randn(3, 3)
s = 'ii->'
self._check(s, x)
def test_one_operand_8(self):
r = rng()
x = r.randn(3, 3)
s = 'ij->'
self._check(s, x)
def test_one_operand_9(self):
r = rng()
x = r.randn(3, 3, 3)
s = 'iii->'
self._check(s, x)
def test_one_operand_10(self):
r = rng()
x = r.randn(3, 3)
s = 'ii->i'
self._check(s, x)
def test_one_operand_11(self):
r = rng()
x = r.randn(3, 3, 4)
s = 'iij->i'
self._check(s, x)
def test_one_operand_12(self):
r = rng()
x = r.randn(3, 3, 3)
s = 'iii->i'
self._check(s, x)
def test_one_operand_13(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->i'
self._check(s, x)
def test_one_operand_14(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkk->ik'
self._check(s, x)
def test_one_operand_15(self):
r = rng()
x = r.randn(3, 3, 5, 4, 4)
s = 'iijkl->il'
self._check(s, x)
def test_one_operand_16(self):
r = rng()
x = r.randn(3, 3)
s = 'ij->ij'
self._check(s, x)
def test_tf_unsupported_1(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3, 5, 1)
y = r.randn(3, 4, 5, 1)
s = 'ij...,jk...->ik...'
self._check(s, x, y)
def test_tf_unsupported_2(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3, 3)
y = r.randn(4)
s = 'ijj,k->ik'
self._check(s, x, y)
def test_tf_unsupported_3(self):
# from https://www.tensorflow.org/api_docs/python/tf/einsum
r = rng()
x = r.randn(2, 3)
y = r.randn(2, 3)
z = r.randn(3, 4)
s = 'ij,ij,jk->ik'
self._check(s, x, y, z)
# these tests are based on https://github.com/dask/dask/pull/3412/files
@parameterized.named_parameters(
{"testcase_name": "_{}_dtype={}".format(einstr, dtype.__name__),
"einstr": einstr, "dtype": dtype}
for einstr in [
'abc,bad->abcd',
'abcdef,bcdfg->abcdeg',
'ea,fb,abcd,gc,hd->efgh',
'ab,b',
'aa',
'a,a->',
'a,a->a',
'a,a',
'a,b',
'a,b,c',
'a',
'ba,b',
'ba,b->',
'defab,fedbc->defac',
'ab...,bc...->ac...',
'a...a',
'abc...->cba...',
'...ab->...a',
'a...a->a...',
# Following 2 from # https://stackoverflow.com/a/19203475/1611416
'...abc,...abcd->...d',
'ab...,b->ab...',
# https://github.com/dask/dask/pull/3412#discussion_r182413444
'aa->a',
'ab,ab,c->c',
'aab,bc->ac',
'aab,bcc->ac',
'fdf,cdd,ccd,afe->ae',
'fff,fae,bef,def->abd',
]
for dtype in [np.float32, np.int32, np.complex64, np.bool_])
def test_from_dask(self, einstr, dtype):
r = jtu.rand_default()
if '->' in einstr:
input_str, result_names = einstr.split('->')
else:
input_str = einstr
input_names = input_str.split(',')
dims = itertools.cycle([2, 3, 4])
shapes = defaultdict(lambda: next(dims))
input_shapes = [tuple(shapes[c] for c in names.replace('...', '01'))
for names in input_names]
operands = [r(shape, dtype) for shape in input_shapes]
self._check(einstr, *operands)
def test_ordered_front_batch_dim_case(self):
x = onp.ones((1,8,20,4))
y = onp.ones((1,8,20,4))
s = 'ijkl,ijml->ijkm'
self._check(s, x, y)
def test_einsum_path(self):
# just check examples from onp.einsum_path docstring
a = onp.random.rand(2, 2)
b = onp.random.rand(2, 5)
c = onp.random.rand(5, 2)
path_info = onp.einsum_path('ij,jk,kl->il', a, b, c, optimize='greedy')
self.assertEqual(str(path_info[0]), "['einsum_path', (1, 2), (0, 1)]")
self.assertEqual(path_info[1].split('\n')[0],
' Complete contraction: ij,jk,kl->il')
# check this doesn't crash
I = onp.random.rand(10, 10, 10, 10)
C = onp.random.rand(10, 10)
onp.einsum_path('ea,fb,abcd,gc,hd->efgh', C, C, I, C, C, optimize='greedy')
def test_einsum_kpmurphy_example(self):
# code from an email with @murphyk
N = 2; C = 3; D = 4; K = 5; T = 6;
r = rng()
S = r.randn(N, T, K)
W = r.randn(K, D)
V = r.randn(D, C)
L = onp.zeros((N, C))
for n in range(N):
for c in range(C):
s = 0
for d in range(D):
for k in range(K):
for t in range(T):
s += S[n,t,k] * W[k,d] * V[d,c]
L[n,c] = s
path = np.einsum_path('ntk,kd,dc->nc', S, W, V, optimize='optimal')[0]
rtol = 1e-2 if jtu.device_under_test() == "tpu" else None
self.assertAllClose(L, np.einsum('ntk,kd,dc->nc', S, W, V, optimize=path),
check_dtypes=False, rtol=rtol)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/lax_numpy_einsum_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import numpy.random as npr
import six
from unittest import SkipTest
from jax import api
from jax import test_util as jtu
from jax import numpy as np
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
npr.seed(0)
class MultiBackendTest(jtu.JaxTestCase):
"""Tests jit targeting to different backends."""
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_backend={}".format(backend),
"backend": backend,
}
for backend in ['cpu', 'gpu', 'tpu', None]
))
def testMultiBackend(self, backend):
if backend not in ('cpu', jtu.device_under_test(), None):
raise SkipTest()
@partial(api.jit, backend=backend)
def fun(x, y):
return np.matmul(x, y)
x = npr.uniform(size=(10,10))
y = npr.uniform(size=(10,10))
z_host = onp.matmul(x, y)
z = fun(x, y)
self.assertAllClose(z, z_host, check_dtypes=True, rtol=1e-2)
correct_platform = backend if backend else jtu.device_under_test()
self.assertEqual(z.device_buffer.platform(), correct_platform)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ordering={}".format(ordering),
"ordering": ordering,}
for ordering in [('cpu', None), ('gpu', None), ('tpu', None), (None, None)]))
def testMultiBackendNestedJit(self, ordering):
outer, inner = ordering
if outer not in ('cpu', jtu.device_under_test(), None):
raise SkipTest()
@partial(api.jit, backend=outer)
def fun(x, y):
@partial(api.jit, backend=inner)
def infun(x, y):
return np.matmul(x, y)
return infun(x, y) + np.ones_like(x)
x = npr.uniform(size=(10,10))
y = npr.uniform(size=(10,10))
z_host = onp.matmul(x, y) + onp.ones_like(x)
z = fun(x, y)
self.assertAllClose(z, z_host, check_dtypes=True, rtol=1e-2)
correct_platform = outer if outer else jtu.device_under_test()
self.assertEqual(z.device_buffer.platform(), correct_platform)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ordering={}".format(ordering),
"ordering": ordering,}
for ordering in [
('cpu', 'gpu'), ('gpu', 'cpu'),
('cpu', 'tpu'), ('tpu', 'cpu'),
(None, 'cpu'), (None, 'gpu'), (None, 'tpu'),
]))
def testMultiBackendNestedJitConflict(self, ordering):
outer, inner = ordering
if outer not in ('cpu', jtu.device_under_test(), None):
raise SkipTest()
if inner not in ('cpu', jtu.device_under_test(), None):
raise SkipTest()
@partial(api.jit, backend=outer)
def fun(x, y):
@partial(api.jit, backend=inner)
def infun(x, y):
return np.matmul(x, y)
return infun(x, y) + np.ones_like(x)
x = npr.uniform(size=(10,10))
y = npr.uniform(size=(10,10))
self.assertRaises(ValueError, lambda: fun(x, y))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_backend={}".format(backend),
"backend": backend,}
for backend in ['cpu', 'gpu', 'tpu']
))
def testGpuMultiBackendOpByOpReturn(self, backend):
if backend not in ('cpu', jtu.device_under_test()):
raise SkipTest()
@partial(api.jit, backend=backend)
def fun(x, y):
return np.matmul(x, y)
x = npr.uniform(size=(10,10))
y = npr.uniform(size=(10,10))
z = fun(x, y)
w = np.sin(z)
self.assertEqual(z.device_buffer.platform(), backend)
self.assertEqual(w.device_buffer.platform(), backend)
@jtu.skip_on_devices("cpu") # test can only fail with non-cpu backends
def testJitCpu(self):
@partial(api.jit, backend='cpu')
def get_arr(scale):
return scale + np.ones((2, 2))
x = get_arr(0.1)
a = x / x.shape[0]
b = x + np.ones_like(x)
c = x + np.eye(2)
self.assertEqual(a.device_buffer.device(), api.devices('cpu')[0])
self.assertEqual(b.device_buffer.device(), api.devices('cpu')[0])
self.assertEqual(c.device_buffer.device(), api.devices('cpu')[0])
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/multibackend_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Stax library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from jax import test_util as jtu
from jax import random
from jax.experimental import stax
from jax.config import config
config.parse_flags_with_absl()
def random_inputs(rng, input_shape):
if type(input_shape) is tuple:
return rng.randn(*input_shape).astype(onp.float32)
elif type(input_shape) is list:
return [random_inputs(rng, shape) for shape in input_shape]
else:
raise TypeError(type(input_shape))
def _CheckShapeAgreement(test_case, init_fun, apply_fun, input_shape):
rng_key = random.PRNGKey(0)
rng_key, init_key = random.split(rng_key)
result_shape, params = init_fun(init_key, input_shape)
inputs = random_inputs(onp.random.RandomState(0), input_shape)
result = apply_fun(params, inputs, rng=rng_key)
test_case.assertEqual(result.shape, result_shape)
class StaxTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(shape), "shape": shape}
for shape in [(2, 3), (5,)]))
def testRandnInitShape(self, shape):
key = random.PRNGKey(0)
out = stax.randn()(key, shape)
self.assertEqual(out.shape, shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(shape), "shape": shape}
for shape in [(2, 3), (2, 3, 4)]))
def testGlorotInitShape(self, shape):
key = random.PRNGKey(0)
out = stax.glorot()(key, shape)
self.assertEqual(out.shape, shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_channels={}_filter_shape={}_padding={}_strides={}_input_shape={}"
.format(channels, filter_shape, padding, strides, input_shape),
"channels": channels, "filter_shape": filter_shape, "padding": padding,
"strides": strides, "input_shape": input_shape}
for channels in [2, 3]
for filter_shape in [(1, 1), (2, 3)]
for padding in ["SAME", "VALID"]
for strides in [None, (2, 1)]
for input_shape in [(2, 10, 11, 1)]))
def testConvShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.Conv(channels, filter_shape, strides=strides,
padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_channels={}_filter_shape={}_padding={}_strides={}_input_shape={}"
.format(channels, filter_shape, padding, strides, input_shape),
"channels": channels, "filter_shape": filter_shape, "padding": padding,
"strides": strides, "input_shape": input_shape}
for channels in [2, 3]
for filter_shape in [(1, 1), (2, 3), (3, 3)]
for padding in ["SAME", "VALID"]
for strides in [None, (2, 1), (2, 2)]
for input_shape in [(2, 10, 11, 1)]))
def testConvTransposeShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.ConvTranspose(channels, filter_shape, # 2D
strides=strides, padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_channels={}_filter_shape={}_padding={}_strides={}_input_shape={}"
.format(channels, filter_shape, padding, strides, input_shape),
"channels": channels, "filter_shape": filter_shape, "padding": padding,
"strides": strides, "input_shape": input_shape}
for channels in [2, 3]
for filter_shape in [(1,), (2,), (3,)]
for padding in ["SAME", "VALID"]
for strides in [None, (1,), (2,)]
for input_shape in [(2, 10, 1)]))
def testConv1DTransposeShape(self, channels, filter_shape, padding, strides,
input_shape):
init_fun, apply_fun = stax.Conv1DTranspose(channels, filter_shape,
strides=strides, padding=padding)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_out_dim={}_input_shape={}"
.format(out_dim, input_shape),
"out_dim": out_dim, "input_shape": input_shape}
for out_dim in [3, 4]
for input_shape in [(2, 3), (3, 4)]))
def testDenseShape(self, out_dim, input_shape):
init_fun, apply_fun = stax.Dense(out_dim)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_input_shape={}_nonlinear={}"
.format(input_shape, nonlinear),
"input_shape": input_shape, "nonlinear": nonlinear}
for input_shape in [(2, 3), (2, 3, 4)]
for nonlinear in ["Relu", "Sigmoid", "Elu", "LeakyRelu"]))
def testNonlinearShape(self, input_shape, nonlinear):
init_fun, apply_fun = getattr(stax, nonlinear)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_window_shape={}_padding={}_strides={}_input_shape={}"
"_maxpool={}_spec={}"
.format(window_shape, padding, strides, input_shape,
max_pool, spec),
"window_shape": window_shape, "padding": padding, "strides": strides,
"input_shape": input_shape, "max_pool": max_pool, "spec": spec}
for window_shape in [(1, 1), (2, 3)]
for padding in ["VALID"]
for strides in [None, (2, 1)]
for input_shape in [(2, 5, 6, 4)]
for max_pool in [False, True]
for spec in ["NHWC", "NCHW", "WHNC", "WHCN"]))
def testPoolingShape(self, window_shape, padding, strides, input_shape,
max_pool, spec):
layer = stax.MaxPool if max_pool else stax.AvgPool
init_fun, apply_fun = layer(window_shape, padding=padding, strides=strides,
spec=spec)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(input_shape),
"input_shape": input_shape}
for input_shape in [(2, 3), (2, 3, 4)]))
def testFlattenShape(self, input_shape):
init_fun, apply_fun = stax.Flatten
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_input_shape={}_spec={}".format(input_shape, i),
"input_shape": input_shape, "spec": spec}
for input_shape in [(2, 5, 6, 1)]
for i, spec in enumerate([
[stax.Conv(3, (2, 2))],
[stax.Conv(3, (2, 2)), stax.Flatten, stax.Dense(4)]])))
def testSerialComposeLayersShape(self, input_shape, spec):
init_fun, apply_fun = stax.serial(*spec)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_input_shape={}".format(input_shape),
"input_shape": input_shape}
for input_shape in [(3, 4), (2, 5, 6, 1)]))
def testDropoutShape(self, input_shape):
init_fun, apply_fun = stax.Dropout(0.9)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_input_shape={}".format(input_shape),
"input_shape": input_shape}
for input_shape in [(3, 4), (2, 5, 6, 1)]))
def testFanInSum(self, input_shape):
init_fun, apply_fun = stax.FanInSum
_CheckShapeAgreement(self, init_fun, apply_fun, [input_shape, input_shape])
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshapes={}_axis={}".format(input_shapes, axis),
"input_shapes": input_shapes, "axis": axis}
for input_shapes, axis in [
([(2, 3), (2, 1)], 1),
([(2, 3), (2, 1)], -1),
([(1, 2, 4), (1, 1, 4)], 1),
]))
def testFanInConcat(self, input_shapes, axis):
init_fun, apply_fun = stax.FanInConcat(axis)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shapes)
def testIssue182(self):
key = random.PRNGKey(0)
init_fun, apply_fun = stax.Softmax
input_shape = (10, 3)
inputs = onp.arange(30.).astype("float32").reshape(input_shape)
out_shape, params = init_fun(key, input_shape)
out = apply_fun(params, inputs)
assert out_shape == out.shape
assert onp.allclose(onp.sum(onp.asarray(out), -1), 1.)
def testBatchNormShapeNHWC(self):
key = random.PRNGKey(0)
init_fun, apply_fun = stax.BatchNorm(axis=(0, 1, 2))
input_shape = (4, 5, 6, 7)
inputs = random_inputs(onp.random.RandomState(0), input_shape)
out_shape, params = init_fun(key, input_shape)
out = apply_fun(params, inputs)
self.assertEqual(out_shape, input_shape)
beta, gamma = params
self.assertEqual(beta.shape, (7,))
self.assertEqual(gamma.shape, (7,))
self.assertEqual(out_shape, out.shape)
def testBatchNormShapeNCHW(self):
key = random.PRNGKey(0)
# Regression test for https://github.com/google/jax/issues/461
init_fun, apply_fun = stax.BatchNorm(axis=(0, 2, 3))
input_shape = (4, 5, 6, 7)
inputs = random_inputs(onp.random.RandomState(0), input_shape)
out_shape, params = init_fun(key, input_shape)
out = apply_fun(params, inputs)
self.assertEqual(out_shape, input_shape)
beta, gamma = params
self.assertEqual(beta.shape, (5,))
self.assertEqual(gamma.shape, (5,))
self.assertEqual(out_shape, out.shape)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/stax_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
from functools import partial
import numpy.random as npr
from absl.testing import absltest
from absl.testing import parameterized
import itertools as it
import jax.numpy as np
from jax import jit, jvp, vjp
import jax.test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
npr.seed(0)
from jax.util import unzip2, safe_zip, safe_map
map = safe_map
zip = safe_zip
subfun_prob = 0.5
thin_prob = 0.1
size_reduction_factor = 3
Eqn = namedtuple('Eqn', ['in_vars', 'out_vars', 'fun'])
Prim = namedtuple('Prim', ['fun'])
ArrayType = namedtuple('ArrayType', ['shape', 'dtype'])
Var = namedtuple('Var', ['name', 'vartype'])
Fun = namedtuple('Fun', ['in_vars', 'out_vars', 'eqns'])
def gen_fun_and_types(size):
in_types = [gen_array_type(size) for _ in range(gen_nonneg_int(size))]
fun, _ = gen_function(size, in_types)
return fun
def gen_function(size, in_types):
eqns = []
in_vars = map(fresh_var, in_types)
cur_vars = in_vars[:]
for _ in range(gen_nonneg_int(size)):
if not cur_vars:
break
if npr.rand() < subfun_prob:
arg_vars = gen_subset(cur_vars)
arg_types = [v.vartype for v in arg_vars]
fun, out_types = gen_function(size / size_reduction_factor, arg_types)
fun = partial(eval_fun, fun)
fun = maybe_jit(fun, len(arg_types))
else:
arity = choice(list(primitive_generators))
arg_vars = gen_sized_subset(cur_vars, arity)
arg_types = [v.vartype for v in arg_vars]
prim_gen = weighted_choice(primitive_generators[arity])
fun, out_type = prim_gen(size, *arg_types)
fun = wrap_singleton(fun)
out_types = [out_type]
out_vars = map(fresh_var, out_types)
eqns.append(Eqn(arg_vars, out_vars, fun))
cur_vars.extend(out_vars)
cur_vars = thin(cur_vars, thin_prob)
out_vars = gen_subset(cur_vars)
return Fun(in_vars, out_vars, eqns), [v.vartype for v in out_vars]
def eval_fun(fun, *args):
def read(v):
return env[v]
def write(v, x):
env[v] = x
env = {}
map(write, fun.in_vars, args)
for in_vars, out_vars, f in fun.eqns:
out_vals = f(*map(read, in_vars))
map(write, out_vars, out_vals)
return map(read, fun.out_vars)
def maybe_jit(f, num_args):
static_argnums = thin(range(num_args), 0.5)
return jit(f, static_argnums=static_argnums)
counter = it.count()
def fresh_var(ty):
return Var(next(counter), ty)
def gen_array_type(size):
# TODO(dougalm): randomize this
return ArrayType((2,2), np.float32)
def gen_array_val(array_type):
# TODO(dougalm): different sizes and dtypes
return npr.randn(*array_type.shape)
def gen_neg(size, t):
return (lambda x: -x), t
def gen_trig(size, t):
op = choice([np.sin, np.cos])
return op, t
def gen_binop(size, t1, t2):
unifier, t_out = gen_broadcasting_unifier(t1, t2)
binop = choice([lambda x, y: x + y,
lambda x, y: x * y])
def unify_and_binop(x, y):
x_, y_ = unifier(x, y)
return binop(x_, y_)
return unify_and_binop, t_out
def thin(xs, p):
return [x for x in xs if npr.rand() > p]
def gen_broadcasting_unifier(t1, t2):
assert t1.shape == t2.shape
return lambda x, y: (x,y), t1
# TODO: generate slices and paddings to match shapes
def wrap_singleton(f):
return lambda *xs: (f(*xs),)
unary_primitive_generators = [
(3, gen_trig),
(1, gen_neg) ]
binary_primitive_generators = [
(1, gen_binop)]
primitive_generators = { 1: unary_primitive_generators,
2: binary_primitive_generators }
def gen_nonneg_int(size):
return npr.randint(size)
def choice(xs, weights=None):
# npr.choice isn't actually RS -> [a] -> a
# because it inspects the components to see if they're array-like
assert xs
n = len(xs)
if weights is None:
i = npr.randint(n)
else:
normalizer = float(sum(weights))
weights = [w / normalizer for w in weights]
i = npr.choice(range(n), p=weights)
return xs[i]
def weighted_choice(weighted_choices):
weights, choices = unzip2(weighted_choices)
return choice(choices, weights)
def gen_sized_subset(xs, size):
return [choice(xs) for _ in range(size)]
def gen_subset(xs):
if not xs:
return []
return gen_sized_subset(xs, npr.randint(len(xs) + 1))
def gen_vals(vs):
return [gen_array_val(v.vartype) for v in vs]
def inner_prod(xs, ys):
xys = zip(xs, ys)
assert all(x.shape == y.shape for x, y in xys)
return sum(np.sum(x * y) for x, y in xys)
def jvp_fd(fun, args, tangents):
EPS = 1e-4
def eval_eps(eps):
return fun(*[x if t is None else x + eps * t
for x, t in zip(args, tangents)])
ys_neg = eval_eps(-EPS)
ys_pos = eval_eps(EPS)
ys = eval_eps(0.0)
deriv = [(y_pos - y_neg) / (2 * EPS) for y_neg, y_pos in zip(ys_neg, ys_pos)]
return ys, deriv
def check_all_close(xs, ys, tol=1e-3):
for x, y in zip(xs, ys):
check_close(x, y, tol)
def check_close(x, y, tol=1e-3):
assert np.shape(x) == np.shape(y)
# TODO(dougalm): re-enable once we've tackled the less pendantic bugs
# assert x.dtype == y.dtype
assert np.allclose(x, y, rtol=tol, atol=tol), \
"Value mismatch:\n{}\n vs\n{}\n".format(x, y)
def partial_argnums(f, args, dyn_argnums):
fixed_args = [None if i in dyn_argnums else arg for i, arg in enumerate(args)]
def f_(*dyn_args):
args = fixed_args[:]
for i, arg in zip(dyn_argnums, dyn_args):
args[i] = arg
return f(*args)
dyn_args = [args[i] for i in dyn_argnums]
return f_, dyn_args
class GeneratedFunTest(jtu.JaxTestCase):
"""Tests of transformations on randomly generated functions."""
@parameterized.named_parameters(jtu.cases_from_gens(gen_fun_and_types))
def testJitIsIdentity(self, fun):
vals = gen_vals(fun.in_vars)
fun = partial(eval_fun, fun)
ans = fun(*vals)
static_argnums = thin(range(len(vals)), 0.5)
ans_jitted = jit(fun, static_argnums=static_argnums)(*vals)
try:
check_all_close(ans, ans_jitted)
except:
print(fun)
raise
@parameterized.named_parameters(jtu.cases_from_gens(gen_fun_and_types))
def testJVPMatchesFD(self, fun):
vals = gen_vals(fun.in_vars)
tangents = gen_vals(fun.in_vars)
fun = partial(eval_fun, fun)
dyn_argnums = thin(range(len(vals)), 0.5)
tangents = [tangents[i] for i in dyn_argnums]
fun, vals = partial_argnums(fun, vals, dyn_argnums)
ans1, deriv1 = jvp_fd(fun, vals, tangents)
ans2, deriv2 = jvp(fun, tuple(vals), tuple(tangents))
check_all_close(ans1, ans2)
check_all_close(deriv1, deriv2)
@parameterized.named_parameters(jtu.cases_from_gens(gen_fun_and_types))
def vjp_matches_fd(self, fun):
vals = gen_vals(fun.in_vars)
in_tangents = gen_vals(fun.in_vars)
in_cotangents = gen_vals(fun.out_vars)
fun = partial(eval_fun, fun)
dyn_argnums = thin(range(len(vals)), 0.5)
in_tangents = [in_tangents[i] for i in dyn_argnums]
fun, vals = partial_argnums(fun, vals, dyn_argnums)
ans1, out_tangents = jvp_fd(fun, vals, in_tangents)
ans2, vjpfun = vjp(fun, *vals)
out_cotangents = vjpfun(in_cotangents)
check_all_close(ans1, ans2)
inner_prod_fd = inner_prod(out_tangents, in_cotangents)
inner_prod_ad = inner_prod(in_tangents, out_cotangents)
check_close(inner_prod_fd, inner_prod_ad)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/generated_fun_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the optimizers module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
from absl.testing import absltest
import numpy as onp
import jax.numpy as np
import jax.test_util as jtu
from jax import jit, grad, jacfwd, jacrev
from jax import core, tree_util
from jax import lax
from jax.experimental import optimizers
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
class OptimizerTests(jtu.JaxTestCase):
def _CheckOptimizer(self, optimizer, loss, x0, num_steps, *args, **kwargs):
self._CheckFuns(optimizer, loss, x0, *args)
self._CheckRun(optimizer, loss, x0, num_steps, *args, **kwargs)
def _CheckFuns(self, optimizer, loss, x0, *args):
init_fun, update_fun, get_params = optimizer(*args)
opt_state = init_fun(x0)
self.assertAllClose(x0, get_params(opt_state), check_dtypes=True)
opt_state2 = update_fun(0, grad(loss)(x0), opt_state) # doesn't crash
self.assertEqual(tree_util.tree_structure(opt_state),
tree_util.tree_structure(opt_state2))
@jtu.skip_on_devices('gpu')
def _CheckRun(self, optimizer, loss, x0, num_steps, *args, **kwargs):
init_fun, update_fun, get_params = optimizer(*args)
opt_state = init_fun(x0)
for i in range(num_steps):
x = get_params(opt_state)
g = grad(loss)(x)
opt_state = update_fun(i, g, opt_state)
xstar = get_params(opt_state)
self.assertLess(loss(xstar), 1e-2)
update_fun_jitted = jit(update_fun)
opt_state = init_fun(x0)
for i in range(num_steps):
x = get_params(opt_state)
g = grad(loss)(x)
opt_state = update_fun_jitted(i, g, opt_state)
xstar = get_params(opt_state)
self.assertLess(loss(xstar), 1e-2)
def testSgdScalar(self):
def loss(x): return x**2
x0 = 1.
num_iters = 100
step_size = 0.1
self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)
def testSgdVector(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
num_iters = 100
step_size = 0.1
self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)
def testSgdNestedTuple(self):
def loss(xyz):
x, (y, z) = xyz
return sum(np.dot(a, a) for a in [x, y, z])
x0 = (np.ones(2), (np.ones(2), np.ones(2)))
num_iters = 100
step_size = 0.1
self._CheckOptimizer(optimizers.sgd, loss, x0, num_iters, step_size)
def testMomentumVector(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
num_iters = 100
step_size = 0.1
mass = 0.
self._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_size, mass)
def testMomentumDict(self):
def loss(dct): return np.dot(dct['x'], dct['x'])
x0 = {'x': np.ones(2)}
num_iters = 100
step_size = 0.1
mass = 0.
self._CheckOptimizer(optimizers.momentum, loss, x0, num_iters, step_size, mass)
def testRmspropVector(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
num_iters = 100
step_size = 0.1
self._CheckOptimizer(optimizers.rmsprop, loss, x0, num_iters, step_size)
def testAdamVector(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
num_iters = 100
step_size = 0.1
self._CheckOptimizer(optimizers.adam, loss, x0, num_iters, step_size)
def testSgdClosure(self):
def loss(y, x): return y**2 * x**2
x0 = 1.
y = 1.
num_iters = 20
step_size = 0.1
partial_loss = functools.partial(loss, y)
self._CheckRun(optimizers.sgd, partial_loss, x0, num_iters, step_size)
def testAdagrad(self):
def loss(xs):
x1, x2 = xs
return np.sum(x1**2) + np.sum(x2**2)
num_iters = 100
step_size = 0.1
x0 = (np.ones(2), np.ones((2, 2)))
self._CheckOptimizer(optimizers.adagrad, loss, x0, num_iters, step_size)
def testSM3(self):
def loss(xs):
x1, x2 = xs
return np.sum(x1 ** 2) + np.sum(x2 ** 2)
num_iters = 100
step_size = 0.1
x0 = (np.ones(2), np.ones((2, 2)))
self._CheckOptimizer(optimizers.sm3, loss, x0, num_iters, step_size)
def testSgdVectorExponentialDecaySchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_schedule = optimizers.exponential_decay(0.1, 3, 2.)
self._CheckFuns(optimizers.sgd, loss, x0, step_schedule)
def testSgdVectorInverseTimeDecaySchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)
self._CheckFuns(optimizers.sgd, loss, x0, step_schedule)
def testAdamVectorInverseTimeDecaySchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_schedule = optimizers.inverse_time_decay(0.1, 3, 2.)
self._CheckFuns(optimizers.adam, loss, x0, step_schedule)
def testMomentumVectorInverseTimeDecayStaircaseSchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_sched = optimizers.inverse_time_decay(0.1, 3, 2., staircase=True)
mass = 0.9
self._CheckFuns(optimizers.momentum, loss, x0, step_sched, mass)
def testRmspropmomentumVectorPolynomialDecaySchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_schedule = optimizers.polynomial_decay(1.0, 50, 0.1)
self._CheckFuns(optimizers.rmsprop_momentum, loss, x0, step_schedule)
def testRmspropVectorPiecewiseConstantSchedule(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_schedule = optimizers.piecewise_constant([25, 75], [1.0, 0.5, 0.1])
self._CheckFuns(optimizers.rmsprop, loss, x0, step_schedule)
def testTracedStepSize(self):
def loss(x): return np.dot(x, x)
x0 = np.ones(2)
step_size = 0.1
init_fun, _, _ = optimizers.sgd(step_size)
opt_state = init_fun(x0)
@jit
def update(opt_state, step_size):
_, update_fun, get_params = optimizers.sgd(step_size)
x = get_params(opt_state)
g = grad(loss)(x)
return update_fun(0, g, opt_state)
update(opt_state, 0.9) # doesn't crash
# TODO(mattjj): re-enable
# def testDeviceTupleState(self):
# init_fun, update_fun, _ = optimizers.sgd(0.1)
# opt_state = init_fun(np.zeros(3))
# self.assertIsInstance(opt_state, optimizers.OptimizerState)
# self.assertIsInstance(opt_state.packed_state, core.JaxTuple)
# opt_state = jit(update_fun)(0, np.zeros(3), opt_state)
# self.assertIsInstance(opt_state, optimizers.OptimizerState)
# self.assertIsInstance(opt_state.packed_state, xla.DeviceTuple)
def testUpdateFunStructureMismatchErrorMessage(self):
@optimizers.optimizer
def opt_maker():
def init_fun(x0):
return {'x': x0}
def update_fun(i, g, opt_state):
x = opt_state['x']
return {'x': x - 0.1 * g, 'v': g} # bug!
def get_params(opt_state):
return opt_state['x']
return init_fun, update_fun, get_params
init_fun, update_fun, get_params = opt_maker()
opt_state = init_fun(np.zeros(3))
self.assertRaises(TypeError, lambda: update_fun(opt_state))
def testUtilityNorm(self):
x0 = (np.ones(2), (np.ones(3), np.ones(4)))
norm = optimizers.l2_norm(x0)
expected = onp.sqrt(onp.sum(onp.ones(2+3+4)**2))
self.assertAllClose(norm, expected, check_dtypes=False)
def testUtilityClipGrads(self):
g = (np.ones(2), (np.ones(3), np.ones(4)))
norm = optimizers.l2_norm(g)
ans = optimizers.clip_grads(g, 1.1 * norm)
expected = g
self.assertAllClose(ans, expected, check_dtypes=False)
ans = optimizers.l2_norm(optimizers.clip_grads(g, 0.9 * norm))
expected = 0.9 * norm
self.assertAllClose(ans, expected, check_dtypes=False)
def testIssue758(self):
# code from https://github.com/google/jax/issues/758
# this is more of a scan + jacfwd/jacrev test, but it lives here to use the
# optimizers.py code
def harmonic_bond(conf, params):
return np.sum(conf * params)
opt_init, opt_update, get_params = optimizers.sgd(5e-2)
x0 = onp.array([0.5], dtype=onp.float64)
params = onp.array([0.3], dtype=onp.float64)
def minimize_structure(test_params):
energy_fn = functools.partial(harmonic_bond, params=test_params)
grad_fn = grad(energy_fn, argnums=(0,))
opt_state = opt_init(x0)
def apply_carry(carry, _):
i, x = carry
g = grad_fn(get_params(x))[0]
new_state = opt_update(i, g, x)
new_carry = (i+1, new_state)
return new_carry, _
carry_final, _ = lax.scan(apply_carry, (0, opt_state), np.zeros((75, 0)))
trip, opt_final = carry_final
assert trip == 75
return opt_final
initial_params = np.float64(0.5)
minimize_structure(initial_params)
def loss(test_params):
opt_final = minimize_structure(test_params)
return 1.0 - get_params(opt_final)[0]
loss_opt_init, loss_opt_update, loss_get_params = optimizers.sgd(5e-2)
J1 = jacrev(loss, argnums=(0,))(initial_params)
J2 = jacfwd(loss, argnums=(0,))(initial_params)
self.assertAllClose(J1, J2, check_dtypes=True, rtol=1e-6)
def testUnpackPackRoundTrip(self):
opt_init, _, _ = optimizers.momentum(0.1, mass=0.9)
params = [{'w': onp.random.randn(1, 2), 'bias': onp.random.randn(2)}]
expected = opt_init(params)
ans = optimizers.pack_optimizer_state(
optimizers.unpack_optimizer_state(expected))
self.assertEqual(ans, expected)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/optimizers_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import operator
import unittest
import six
if six.PY3:
import enum
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import jax
from jax import dtypes
from jax import numpy as np
from jax import test_util as jtu
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
bool_dtypes = [onp.dtype('bool')]
signed_dtypes = [onp.dtype('int8'), onp.dtype('int16'), onp.dtype('int32'),
onp.dtype('int64')]
unsigned_dtypes = [onp.dtype('uint8'), onp.dtype('uint16'), onp.dtype('uint32'),
onp.dtype('uint64')]
onp_float_dtypes = [onp.dtype('float16'), onp.dtype('float32'),
onp.dtype('float64')]
float_dtypes = [onp.dtype(dtypes.bfloat16)] + onp_float_dtypes
complex_dtypes = [onp.dtype('complex64'), onp.dtype('complex128')]
all_dtypes = (bool_dtypes + signed_dtypes + unsigned_dtypes + float_dtypes +
complex_dtypes)
scalar_types = [np.bool_, np.int8, np.int16, np.int32, np.int64,
np.uint8, np.uint16, np.uint32, np.uint64,
np.bfloat16, np.float16, np.float32, np.float64,
np.complex64, np.complex128]
class DtypesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_type={}".format(type.__name__), "type": type,
"dtype": dtype}
for type, dtype in [(bool, np.bool_), (int, np.int_), (float, np.float_),
(complex, np.complex_)])
def testDefaultTypes(self, type, dtype):
for f in [np.array, jax.jit(np.array), jax.jit(lambda x: x)]:
y = f(type(0))
self.assertTrue(isinstance(y, np.ndarray), msg=(f, y))
self.assertEqual(y.dtype, dtypes.canonicalize_dtype(dtype), msg=(f, y))
@parameterized.named_parameters(
{"testcase_name": "_swap={}_jit={}".format(swap, jit),
"swap": swap, "jit": jit}
for swap in [False, True] for jit in [False, True])
@jtu.skip_on_devices("tpu") # F16 not supported on TPU
def testBinaryPromotion(self, swap, jit):
testcases = [
(np.array(1.), 0., np.float_),
(np.array(1.), np.array(0.), np.float_),
(np.array(1.), np.array(0., dtype=np.float16), np.float_),
(np.array(1.), np.array(0., dtype=np.float32), np.float_),
(np.array(1.), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float16), 0., np.float16),
(np.array(1., dtype=np.float32), 0., np.float32),
(np.array(1., dtype=np.float64), 0., np.float64),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float16), np.float16),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float16), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float32), np.float32),
(np.array(1., dtype=np.float32), np.array(0., dtype=np.float64), np.float64),
(np.array(1., dtype=np.float64), np.array(0., dtype=np.float64), np.float64),
(np.array([1.]), 0., np.float_),
(np.array([1.]), np.array(0.), np.float_),
(np.array([1.]), np.array(0., dtype=np.float16), np.float_),
(np.array([1.]), np.array(0., dtype=np.float32), np.float_),
(np.array([1.]), np.array(0., dtype=np.float64), np.float64),
(np.array([1.], dtype=np.float32), np.array(0., dtype=np.float16), np.float32),
(np.array([1.], dtype=np.float16), np.array(0., dtype=np.float32), np.float32),
(np.array([1.], dtype=np.float16), 0., np.float16),
]
op = jax.jit(operator.add) if jit else operator.add
for x, y, dtype in testcases:
x, y = (y, x) if swap else (x, y)
z = x + y
self.assertTrue(isinstance(z, np.ndarray), msg=(x, y, z))
self.assertEqual(z.dtype, dtypes.canonicalize_dtype(dtype), msg=(x, y, z))
def testPromoteDtypes(self):
for t1 in all_dtypes:
self.assertEqual(t1, dtypes.promote_types(t1, t1))
self.assertEqual(t1, dtypes.promote_types(t1, onp.bool_))
self.assertEqual(onp.dtype(onp.complex128),
dtypes.promote_types(t1, onp.complex128))
for t2 in all_dtypes:
# Symmetry
self.assertEqual(dtypes.promote_types(t1, t2),
dtypes.promote_types(t2, t1))
self.assertEqual(onp.dtype(onp.float32),
dtypes.promote_types(onp.float16, dtypes.bfloat16))
# Promotions of non-inexact types against inexact types always prefer
# the inexact types.
for t in float_dtypes + complex_dtypes:
for i in bool_dtypes + signed_dtypes + unsigned_dtypes:
self.assertEqual(t, dtypes.promote_types(t, i))
# Promotions between exact types, or between inexact types, match NumPy.
for groups in [bool_dtypes + signed_dtypes + unsigned_dtypes,
onp_float_dtypes + complex_dtypes]:
for t1, t2 in itertools.combinations(groups, 2):
self.assertEqual(onp.promote_types(t1, t2),
dtypes.promote_types(t1, t2))
def testScalarInstantiation(self):
for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]:
a = t(1)
self.assertEqual(a.dtype, np.dtype(t))
self.assertIsInstance(a, xla.DeviceArray)
self.assertEqual(0, np.ndim(a))
def testIsSubdtype(self):
for t in scalar_types:
self.assertTrue(dtypes.issubdtype(t, t))
self.assertTrue(dtypes.issubdtype(onp.dtype(t).type, t))
self.assertTrue(dtypes.issubdtype(t, onp.dtype(t).type))
if t != np.bfloat16:
for category in [onp.generic, np.inexact, np.integer, np.signedinteger,
np.unsignedinteger, np.floating, np.complexfloating]:
self.assertEqual(dtypes.issubdtype(t, category),
onp.issubdtype(onp.dtype(t).type, category))
self.assertEqual(dtypes.issubdtype(t, category),
onp.issubdtype(onp.dtype(t).type, category))
def testArrayCasts(self):
for t in [np.bool_, np.int32, np.bfloat16, np.float32, np.complex64]:
a = onp.array([1, 2.5, -3.7])
self.assertEqual(a.astype(t).dtype, np.dtype(t))
self.assertEqual(np.array(a).astype(t).dtype, np.dtype(t))
@unittest.skipIf(six.PY2, "Test requires Python 3")
def testEnumPromotion(self):
class AnEnum(enum.IntEnum):
A = 42
B = 101
onp.testing.assert_equal(onp.array(42), onp.array(AnEnum.A))
onp.testing.assert_equal(np.array(42), np.array(AnEnum.A))
onp.testing.assert_equal(onp.int32(101), onp.int32(AnEnum.B))
onp.testing.assert_equal(np.int32(101), np.int32(AnEnum.B))
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/dtypes_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
import operator
import unittest
from unittest import SkipTest
import warnings
from absl.testing import absltest
from absl.testing import parameterized
import six
import numpy as onp
import jax
import jax.ops
from jax import api
from jax import lax
from jax import linear_util
from jax import numpy as lnp
from jax import test_util as jtu
from jax import dtypes
from jax import tree_util
from jax.interpreters import partial_eval
from jax.test_util import check_grads
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
nonempty_nonscalar_array_shapes = [(4,), (3, 4), (3, 1), (1, 4), (2, 1, 4), (2, 3, 4)]
nonempty_array_shapes = [()] + nonempty_nonscalar_array_shapes
empty_array_shapes = [(0,), (0, 4), (3, 0),]
scalar_shapes = [jtu.NUMPY_SCALAR_SHAPE, jtu.PYTHON_SCALAR_SHAPE]
array_shapes = nonempty_array_shapes + empty_array_shapes
nonzerodim_shapes = nonempty_nonscalar_array_shapes + empty_array_shapes
nonempty_shapes = scalar_shapes + nonempty_array_shapes
all_shapes = scalar_shapes + array_shapes
float_dtypes = list(jtu.supported_dtypes().intersection(
{lnp.bfloat16, onp.float16, onp.float32, onp.float64}))
complex_dtypes = [onp.complex64, onp.complex128]
int_dtypes = [onp.int32, onp.int64]
unsigned_dtypes = [onp.uint32, onp.uint64]
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
inexact_dtypes = float_dtypes + complex_dtypes
number_dtypes = float_dtypes + complex_dtypes + int_dtypes
all_dtypes = number_dtypes + bool_dtypes
python_scalar_dtypes = [lnp.bool_, lnp.int_, lnp.float_, lnp.complex_]
def _valid_dtypes_for_shape(shape, dtypes):
# Not all (shape, dtype) pairs are valid. In particular, Python scalars only
# have one type in each category (float, bool, etc.)
if shape is jtu.PYTHON_SCALAR_SHAPE:
return [t for t in dtypes if t in python_scalar_dtypes]
return dtypes
def _shape_and_dtypes(shapes, dtypes):
for shape in shapes:
for dtype in _valid_dtypes_for_shape(shape, dtypes):
yield (shape, dtype)
OpRecord = collections.namedtuple(
"OpRecord",
["name", "nargs", "dtypes", "shapes", "rng_factory", "diff_modes",
"test_name", "check_dtypes", "tolerance", "inexact"])
def op_record(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name=None, check_dtypes=True, tolerance=None, inexact=False):
test_name = test_name or name
return OpRecord(name, nargs, dtypes, shapes, rng_factory, diff_modes,
test_name, check_dtypes, tolerance, inexact)
JAX_ONE_TO_ONE_OP_RECORDS = [
op_record("abs", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("add", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("ceil", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("conj", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("exp", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("fabs", 1, float_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("float_power", 2, inexact_dtypes, all_shapes,
partial(jtu.rand_default, scale=1), ["rev"],
tolerance={lnp.bfloat16: 1e-2, onp.float32: 1e-3,
onp.float64: 1e-12, onp.complex64: 2e-4,
onp.complex128: 1e-12}, check_dtypes=False),
op_record("floor", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("greater", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("greater_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("less_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, []),
op_record("log", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("logical_and", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_not", 1, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_or", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("logical_xor", 2, all_dtypes, all_shapes, jtu.rand_bool, []),
op_record("maximum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("minimum", 2, all_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("multiply", 2, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("negative", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("nextafter", 2, [f for f in float_dtypes if f != lnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"], inexact=True, tolerance=0),
op_record("not_equal", 2, all_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("array_equal", 2, number_dtypes, all_shapes, jtu.rand_some_equal, ["rev"]),
op_record("reciprocal", 1, inexact_dtypes, all_shapes, jtu.rand_default, []),
op_record("subtract", 2, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("signbit", 1, default_dtypes + bool_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"]),
op_record("sin", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cos", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("tan", 1, number_dtypes, all_shapes,
partial(jtu.rand_uniform, -1.5, 1.5), ["rev"], inexact=True),
op_record("sinh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("cosh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.float64: 1e-7, onp.complex128: 1e-7},
inexact=True),
op_record("arcsin", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arccos", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan", 1, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arctan2", 2, float_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
op_record("arcsinh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arccosh", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("arctanh", 1, number_dtypes, all_shapes, jtu.rand_small, ["rev"],
inexact=True),
]
JAX_COMPOUND_OP_RECORDS = [
# angle has inconsistent 32/64-bit return types across numpy versions.
op_record("angle", 1, number_dtypes, all_shapes, jtu.rand_default, [],
check_dtypes=False, inexact=True),
op_record("atleast_1d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_2d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("atleast_3d", 1, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("cbrt", 1, default_dtypes, all_shapes, jtu.rand_default, ["rev"],
inexact=True),
op_record("conjugate", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("deg2rad", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero, ["rev"],
inexact=six.PY3),
op_record("divmod", 2, int_dtypes + float_dtypes, all_shapes,
jtu.rand_nonzero, []),
op_record("exp2", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"],
tolerance={lnp.bfloat16: 2e-2, onp.float16: 1e-2}, inexact=True),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="expm1_large", tolerance={onp.float64: 1e-8}, inexact=True),
op_record("expm1", 1, number_dtypes, all_shapes, jtu.rand_small_positive,
[], tolerance={onp.float64: 1e-8}, inexact=True),
op_record("fix", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("floor_divide", 2, number_dtypes, all_shapes, jtu.rand_nonzero,
["rev"]),
op_record("heaviside", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("hypot", 2, default_dtypes, all_shapes, jtu.rand_default, [],
inexact=True),
op_record("kron", 2, number_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("outer", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("imag", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("iscomplex", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isfinite", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isinf", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isnan", 1, inexact_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isneginf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isposinf", 1, float_dtypes, all_shapes, jtu.rand_some_inf_and_nan, []),
op_record("isreal", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("isrealobj", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("log2", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log10", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_positive, [],
test_name="log1p_large", tolerance={onp.float64: 1e-12},
inexact=True),
op_record("log1p", 1, number_dtypes, all_shapes, jtu.rand_small_positive, [],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float64: 1e-12}, inexact=True),
op_record("logaddexp2", 2, float_dtypes, all_shapes,
jtu.rand_some_inf_and_nan, ["rev"],
tolerance={onp.float16: 1e-2}, inexact=True),
op_record("polyval", 2, number_dtypes, nonempty_nonscalar_array_shapes,
jtu.rand_default, [], check_dtypes=False,
tolerance={onp.float16: 1e-2, onp.float64: 1e-12}),
op_record("positive", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("power", 2, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
tolerance={onp.complex128: 1e-14}),
op_record("rad2deg", 1, float_dtypes, all_shapes, jtu.rand_default, []),
op_record("ravel", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("real", 1, number_dtypes, all_shapes, jtu.rand_some_inf, []),
op_record("remainder", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-2}),
op_record("mod", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("sinc", 1, [t for t in number_dtypes if t != lnp.bfloat16],
all_shapes, jtu.rand_default, ["rev"],
tolerance={onp.complex64: 1e-5}, inexact=True,
check_dtypes=False),
op_record("square", 1, number_dtypes, all_shapes, jtu.rand_default, ["rev"]),
op_record("sqrt", 1, number_dtypes, all_shapes, jtu.rand_positive, ["rev"],
inexact=True),
op_record("transpose", 1, all_dtypes, all_shapes, jtu.rand_default, ["rev"],
check_dtypes=False),
op_record("true_divide", 2, all_dtypes, all_shapes, jtu.rand_nonzero,
["rev"], inexact=True),
op_record("diff", 1, number_dtypes, nonzerodim_shapes, jtu.rand_default, ["rev"]),
]
JAX_BITWISE_OP_RECORDS = [
op_record("bitwise_and", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_not", 1, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_or", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
op_record("bitwise_xor", 2, int_dtypes + unsigned_dtypes, all_shapes,
jtu.rand_bool, []),
]
JAX_REDUCER_RECORDS = [
op_record("mean", 1, number_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("prod", 1, all_dtypes, all_shapes, jtu.rand_small_positive, []),
op_record("sum", 1, all_dtypes, all_shapes, jtu.rand_default, []),
op_record("nanmean", 1, inexact_dtypes, nonempty_shapes, jtu.rand_some_nan,
[], inexact=True),
op_record("nanprod", 1, inexact_dtypes, all_shapes, jtu.rand_some_nan, []),
op_record("nansum", 1, number_dtypes, all_shapes, jtu.rand_some_nan, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS = [
op_record("all", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("any", 1, all_dtypes, all_shapes, jtu.rand_some_zero, []),
op_record("max", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("min", 1, all_dtypes, nonempty_shapes, jtu.rand_default, []),
op_record("var", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
op_record("std", 1, all_dtypes, nonempty_shapes, jtu.rand_default, [],
inexact=True),
]
JAX_ARGMINMAX_RECORDS = [
op_record("argmin", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
op_record("argmax", 1, all_dtypes, nonempty_shapes, jtu.rand_some_equal, []),
]
JAX_OPERATOR_OVERLOADS = [
op_record("__add__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__sub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__mul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__eq__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ne__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__lt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__gt__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__ge__", 2, default_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pos__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__neg__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__pow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 2e-4, onp.complex128: 1e-14}),
op_record("__mod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__floordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__truediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
op_record("__abs__", 1, number_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): __invert__ fails on bool dtypes because ~True == -2
op_record("__invert__", 1, int_dtypes, all_shapes, jtu.rand_default, []),
# TODO(mattjj): investigate these failures
# op_record("__or__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__and__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__xor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__divmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
# TODO(mattjj): lshift, rshift
]
JAX_RIGHT_OPERATOR_OVERLOADS = [
op_record("__radd__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rsub__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rmul__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
op_record("__rpow__", 2, inexact_dtypes, all_shapes, jtu.rand_positive, [],
tolerance={onp.float32: 2e-4, onp.complex64: 1e-3}),
op_record("__rmod__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, [],
tolerance={onp.float16: 1e-1}),
op_record("__rfloordiv__", 2, default_dtypes, all_shapes, jtu.rand_nonzero, []),
op_record("__rtruediv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, [],
inexact=True),
# op_record("__ror__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rand__", 2, number_dtypes, all_shapes, jtu.rand_default, []),
# op_record("__rxor__", 2, number_dtypes, all_shapes, jtu.rand_bool, []),
# op_record("__rdivmod__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
numpy_version = tuple(map(int, onp.version.version.split('.')))
if numpy_version >= (1, 15):
JAX_COMPOUND_OP_RECORDS += [
op_record("isclose", 2, [t for t in all_dtypes if t != lnp.bfloat16],
all_shapes, jtu.rand_small_positive, []),
op_record("gcd", 2, int_dtypes, all_shapes, jtu.rand_default, []),
op_record("lcm", 2, int_dtypes, all_shapes, jtu.rand_default, []),
]
JAX_REDUCER_NO_DTYPE_RECORDS += [
op_record("ptp", 1, number_dtypes, nonempty_shapes, jtu.rand_default, []),
]
if six.PY2:
JAX_OPERATOR_OVERLOADS += [
op_record("__div__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
JAX_RIGHT_OPERATOR_OVERLOADS += [
op_record("__rdiv__", 2, number_dtypes, all_shapes, jtu.rand_nonzero, []),
]
CombosWithReplacement = itertools.combinations_with_replacement
def _dtypes_are_compatible_for_bitwise_ops(args):
if len(args) <= 1:
return True
is_signed = lambda dtype: lnp.issubdtype(dtype, onp.signedinteger)
width = lambda dtype: lnp.iinfo(dtype).bits
x, y = args
if width(x) > width(y):
x, y = y, x
# The following condition seems a little ad hoc, but seems to capture what
# numpy actually implements.
return (
is_signed(x) == is_signed(y)
or (width(x) == 32 and width(y) == 32)
or (width(x) == 32 and width(y) == 64 and is_signed(y)))
def _shapes_are_broadcast_compatible(shapes):
accumulator = onp.zeros([])
for shape in shapes:
try:
accumulator = accumulator + onp.zeros(shape)
except ValueError:
return False
return True
def _shapes_are_equal_length(shapes):
return all(len(shape) == len(shapes[0]) for shape in shapes[1:])
def _promote_like_lnp(fun, inexact=False):
"""Decorator that promotes the arguments of `fun` to `lnp.result_type(*args)`.
lnp and onp have different type promotion semantics; this decorator allows
tests make an onp reference implementation act more like an lnp
implementation.
"""
def wrapper(*args, **kw):
flat_args = tree_util.tree_leaves(args)
if inexact and not any(lnp.issubdtype(lnp.result_type(x), lnp.inexact)
for x in flat_args):
dtype = lnp.result_type(lnp.float_, *flat_args)
else:
dtype = lnp.result_type(*flat_args)
args = tree_util.tree_map(lambda a: onp.asarray(a, dtype), args)
return fun(*args, **kw)
return wrapper
class LaxBackedNumpyTests(jtu.JaxTestCase):
"""Tests for LAX-backed Numpy implementation."""
def _GetArgsMaker(self, rng, shapes, dtypes, onp_arrays=True):
def f():
out = [rng(shape, dtype or lnp.float_)
for shape, dtype in zip(shapes, dtypes)]
return out if onp_arrays else [lnp.asarray(a) for a in out]
return f
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"check_dtypes": rec.check_dtypes, "tolerance": rec.tolerance,
"inexact": rec.inexact}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in itertools.chain(JAX_ONE_TO_ONE_OP_RECORDS,
JAX_COMPOUND_OP_RECORDS)))
def testOp(self, onp_op, lnp_op, rng_factory, shapes, dtypes, check_dtypes,
tolerance, inexact):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
tol = max(jtu.tolerance(dtype, tolerance) for dtype in dtypes)
tol = functools.reduce(jtu.join_tolerance,
[tolerance, tol, jtu.default_tolerance()])
self._CheckAgainstNumpy(_promote_like_lnp(onp_op, inexact), lnp_op,
args_maker, check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"tol": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_OPERATOR_OVERLOADS))
def testOperatorOverload(self, name, rng_factory, shapes, dtypes, tol):
rng = rng_factory()
# onp and lnp arrays have different type promotion rules; force the use of
# lnp arrays.
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda *xs: getattr(operator, name.strip('_'))(*xs)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, #not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(rec.test_name, shapes,
dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes, "name": rec.name,
"op_tolerance": rec.tolerance}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in itertools.product(
*(_valid_dtypes_for_shape(s, rec.dtypes) for s in shapes)))
for rec in JAX_RIGHT_OPERATOR_OVERLOADS))
def testRightOperatorOverload(self, name, rng_factory, shapes, dtypes,
op_tolerance):
if shapes[1] is jtu.PYTHON_SCALAR_SHAPE:
raise SkipTest() # TODO(mattjj): clean up
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, shapes, dtypes, onp_arrays=False)
fun = lambda fst, snd: getattr(snd, name)(fst)
tol = max(jtu.tolerance(dtype, op_tolerance) for dtype in dtypes)
scalar_arg = (jtu.PYTHON_SCALAR_SHAPE in shapes or
jtu.NUMPY_SCALAR_SHAPE in shapes or
() in shapes)
empty_shape = any(isinstance(s, tuple) and 0 in s for s in shapes)
self._CompileAndCheck(
fun, args_maker, check_dtypes=True, # not scalar_arg and not empty_shape,
atol=tol, rtol=tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.test_name, shapes, dtypes),
"rng_factory": rec.rng_factory, "shapes": shapes, "dtypes": dtypes,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name)}
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(rec.shapes, rec.nargs))
for dtypes in filter(
_dtypes_are_compatible_for_bitwise_ops,
CombosWithReplacement(rec.dtypes, rec.nargs)))
for rec in JAX_BITWISE_OP_RECORDS))
def testBitwiseOp(self, onp_op, lnp_op, rng_factory, shapes, dtypes):
rng = rng_factory()
if not FLAGS.jax_enable_x64 and any(
lnp.iinfo(dtype).bits == 64 for dtype in dtypes):
self.skipTest("x64 types are disabled by jax_enable_x64")
args_maker = self._GetArgsMaker(rng, shapes, dtypes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=jtu.PYTHON_SCALAR_SHAPE not in shapes)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_dtype={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis,
"None" if out_dtype is None else onp.dtype(out_dtype).name, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for shape in rec.shapes for dtype in rec.dtypes
for out_dtype in [None] + rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True])
for rec in JAX_REDUCER_RECORDS))
def testReducer(self, onp_op, lnp_op, rng_factory, shape, dtype, out_dtype,
axis, keepdims, inexact):
rng = rng_factory()
def onp_fun(x):
x_cast = x if dtype != lnp.bfloat16 else x.astype(onp.float32)
t = out_dtype if out_dtype != lnp.bfloat16 else onp.float32
return onp_op(x_cast, axis, dtype=t, keepdims=keepdims)
onp_fun = _promote_like_lnp(onp_fun, inexact)
lnp_fun = lambda x: lnp_op(x, axis, dtype=out_dtype, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-3, onp.complex64: 1e-3,
onp.float64: 1e-5, onp.complex128: 1e-5}
tol = jtu.tolerance(dtype, tol_spec)
tol = max(tol, jtu.tolerance(out_dtype, tol_spec)) if out_dtype else tol
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=lnp.bfloat16 not in (dtype, out_dtype),
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}_keepdims={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis, keepdims),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis, "keepdims": keepdims, "inexact": rec.inexact}
for rec in JAX_REDUCER_NO_DTYPE_RECORDS
for shape in rec.shapes for dtype in rec.dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])
for keepdims in [False, True]))
def testReducerNoDtype(self, onp_op, lnp_op, rng_factory, shape, dtype, axis,
keepdims, inexact):
rng = rng_factory()
onp_fun = lambda x: onp_op(x, axis, keepdims=keepdims)
onp_fun = _promote_like_lnp(onp_fun, inexact)
lnp_fun = lambda x: lnp_op(x, axis, keepdims=keepdims)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"shape": shape, "dtype": dtype, "axis": axis}
for shape in all_shapes for dtype in all_dtypes
for axis in set(range(-len(shape), len(shape))) | set([None])))
def testCountNonzero(self, shape, dtype, axis):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.count_nonzero(x, axis)
lnp_fun = lambda x: lnp.count_nonzero(x, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testNonzero(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.nonzero(x)
lnp_fun = lambda x: lnp.nonzero(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "{}_inshape={}_axis={}".format(
rec.test_name.capitalize(),
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rec.rng_factory, "shape": shape, "dtype": dtype,
"onp_op": getattr(onp, rec.name), "lnp_op": getattr(lnp, rec.name),
"axis": axis}
for rec in JAX_ARGMINMAX_RECORDS
for shape, dtype in _shape_and_dtypes(rec.shapes, rec.dtypes)
for axis in range(-len(shape), len(shape))))
def testArgMinMax(self, onp_op, lnp_op, rng_factory, shape, dtype, axis):
rng = rng_factory()
if dtype == onp.complex128 and jtu.device_under_test() == "gpu":
raise unittest.SkipTest("complex128 reductions not supported on GPU")
def onp_fun(array_to_reduce):
return onp_op(array_to_reduce, axis).astype(lnp.int_)
def lnp_fun(array_to_reduce):
return lnp_op(array_to_reduce, axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2,), (2,), (-1, -1, -1, None)], # scalar output
[(2, 4), (2, 4), (-1, -1, -1, 0)], # 2D vectors
[(3, 4), (3, 4), (-1, -1, -1, 0)], # 3D vectors
[(3, 4), (3, 6, 5, 4), (-1, -1, -1, 0)], # broadcasting
[(4, 3), (3, 6, 5, 4), (1, 0, -1, None)], # different axes
[(6, 1, 3), (5, 3), (-1, -1, -1, None)], # more broadcasting
[(6, 1, 2), (5, 3), (-1, -1, -1, None)], # mixed 2D and 3D vectors
[(10, 5, 2, 8), (1, 5, 1, 3), (-2, -1, -3, None)], # axes/broadcasting
[(4, 5, 2), (4, 5, 2), (-1, -1, 0, None)], # axisc should do nothing
[(4, 5, 2), (4, 5, 2), (-1, -1, -1, None)] # same as before
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testCross(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
axisa, axisb, axisc, axis = axes
lnp_fun = lambda a, b: lnp.cross(a, b, axisa, axisb, axisc, axis)
def onp_fun(a, b):
a = a.astype(onp.float32) if lhs_dtype == lnp.bfloat16 else a
b = b.astype(onp.float32) if rhs_dtype == lnp.bfloat16 else b
out = onp.cross(a, b, axisa, axisb, axisc, axis)
return out.astype(lnp.promote_types(lhs_dtype, rhs_dtype))
tol_spec = {dtypes.bfloat16: 3e-1, onp.float16: 0.15}
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("matrix-scalar", (3, 3), ()),
("scalar-matrix", (), (3, 3)),
("matrix-vector", (4, 5), (5,)),
("vector-matrix", (6,), (6, 4)),
("matrix-matrix", (3, 4), (4, 5)),
("tensor-vector", (4, 3, 2), (2,)),
("vector-tensor", (2,), (3, 2, 4)),
("tensor-matrix", (4, 3, 2), (2, 5)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-tensor", (2, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testDot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-14,
onp.complex128: 1e-14}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
def onp_dot(x, y):
x = x.astype(onp.float32) if lhs_dtype == lnp.bfloat16 else x
y = y.astype(onp.float32) if rhs_dtype == lnp.bfloat16 else y
return onp.dot(x, y).astype(lnp.promote_types(lhs_dtype, rhs_dtype))
self._CheckAgainstNumpy(onp_dot, lnp.dot, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp.dot, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
name,
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for name, lhs_shape, rhs_shape in [
("vector-vector", (3,), (3,)),
("matrix-vector", (3, 3), (3,)),
("vector-matrix", (3,), (3, 3)),
("matrix-matrix", (3, 3), (3, 3)),
("vector-tensor", (3,), (5, 3, 2)),
("tensor-vector", (5, 3, 2), (2,)),
("matrix-tensor", (5, 2), (3, 2, 4)),
("tensor-matrix", (5, 2, 3), (3, 2)),
("tensor-tensor", (5, 3, 4), (5, 4, 1)),
("tensor-tensor-broadcast", (3, 1, 3, 4), (5, 4, 1))]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testMatmul(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
def onp_fun(x, y):
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.matmul(x, y).astype(dtype)
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
tol = {onp.float16: 1e-2, onp.float32: 2e-2, onp.float64: 1e-12,
onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 4e-2
self._CheckAgainstNumpy(onp_fun, lnp.matmul, args_maker,
check_dtypes=True, tol=tol)
self._CompileAndCheck(lnp.matmul, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype),
axes),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"axes": axes, "rng_factory": rng_factory}
for rng_factory in [jtu.rand_default]
for lhs_shape, rhs_shape, axes in [
[(2, 3, 4), (5, 6, 7), 0], # from issue #740
[(2, 3, 4), (3, 4, 5, 6), 2],
[(2, 3, 4), (5, 4, 3, 6), [1, 2]],
[(2, 3, 4), (5, 4, 3, 6), [[1, 2], [2, 1]]],
[(1, 2, 3, 4), (4, 5, 3, 6), [[2, 3], [2, 0]]],
]
for lhs_dtype, rhs_dtype in CombosWithReplacement(number_dtypes, 2)))
def testTensordot(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, axes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
lnp_fun = lambda a, b: lnp.tensordot(a, b, axes)
def onp_fun(a, b):
a = a if lhs_dtype != lnp.bfloat16 else a.astype(onp.float32)
b = b if rhs_dtype != lnp.bfloat16 else b.astype(onp.float32)
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.tensordot(a, b, axes).astype(dtype)
tol = {onp.float16: 1e-1, onp.float32: 1e-3, onp.float64: 1e-12,
onp.complex64: 1e-3, onp.complex128: 1e-12}
if jtu.device_under_test() == "tpu":
tol[onp.float32] = tol[onp.complex64] = 2e-1
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(
jtu.format_shape_dtype_string(lhs_shape, lhs_dtype),
jtu.format_shape_dtype_string(rhs_shape, rhs_dtype)),
"lhs_shape": lhs_shape, "lhs_dtype": lhs_dtype,
"rhs_shape": rhs_shape, "rhs_dtype": rhs_dtype,
"rng_factory": jtu.rand_default}
# TODO(phawkins): support integer dtypes too.
for lhs_shape, lhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
for rhs_shape, rhs_dtype in _shape_and_dtypes(all_shapes, inexact_dtypes)
if len(jtu._dims_of_shape(lhs_shape)) == 0
or len(jtu._dims_of_shape(rhs_shape)) == 0
or lhs_shape[-1] == rhs_shape[-1]))
def testInner(self, lhs_shape, lhs_dtype, rhs_shape, rhs_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, lhs_dtype), rng(rhs_shape, rhs_dtype)]
def onp_fun(lhs, rhs):
lhs = lhs if lhs_dtype != lnp.bfloat16 else lhs.astype(onp.float32)
rhs = rhs if rhs_dtype != lnp.bfloat16 else rhs.astype(onp.float32)
dtype = lnp.promote_types(lhs_dtype, rhs_dtype)
return onp.inner(lhs, rhs).astype(dtype)
lnp_fun = lambda lhs, rhs: lnp.inner(lhs, rhs)
tol_spec = {onp.float16: 1e-2, onp.float32: 1e-5, onp.float64: 1e-13}
if jtu.device_under_test() == "tpu":
tol_spec[onp.float32] = tol_spec[onp.complex64] = 2e-1
tol = max(jtu.tolerance(lhs_dtype, tol_spec),
jtu.tolerance(rhs_dtype, tol_spec))
# TODO(phawkins): there are float32/float64 disagreements for some inputs.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False, atol=tol,
rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_amin={}_amax={}".format(
jtu.format_shape_dtype_string(shape, dtype), a_min, a_max),
"shape": shape, "dtype": dtype, "a_min": a_min, "a_max": a_max,
"rng_factory": jtu.rand_default}
for shape in all_shapes for dtype in number_dtypes
for a_min, a_max in [(-1, None), (None, 1), (-1, 1),
(-onp.ones(1), None),
(None, onp.ones(1)),
(-onp.ones(1), onp.ones(1))]))
def testClipStaticBounds(self, shape, dtype, a_min, a_max, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.clip(x, a_min=a_min, a_max=a_max)
lnp_fun = lambda x: lnp.clip(x, a_min=a_min, a_max=a_max)
args_maker = lambda: [rng(shape, dtype)]
# TODO(phawkins): the promotion behavior changed in Numpy 1.17.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_decimals={}".format(
jtu.format_shape_dtype_string(shape, dtype), decimals),
"shape": shape, "dtype": dtype, "decimals": decimals,
"rng_factory": jtu.rand_default}
for shape, dtype in _shape_and_dtypes(all_shapes, number_dtypes)
for decimals in [0, 1, -2]))
def testRoundStaticDecimals(self, shape, dtype, decimals, rng_factory):
rng = rng_factory()
if lnp.issubdtype(dtype, onp.integer) and decimals < 0:
self.skipTest("Integer rounding with decimals < 0 not implemented")
onp_fun = lambda x: onp.round(x, decimals=decimals)
lnp_fun = lambda x: lnp.round(x, decimals=decimals)
args_maker = lambda: [rng(shape, dtype)]
tol = {lnp.bfloat16: 5e-2, onp.float16: 1e-2}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=check_dtypes,
atol=tol, rtol=tol)
def testOperatorRound(self):
self.assertAllClose(round(onp.float32(7.532), 1),
round(lnp.float32(7.5), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(lnp.float32(1.234), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(lnp.float32(1.234)), check_dtypes=False)
self.assertAllClose(round(onp.float32(7.532), 1),
round(lnp.array(7.5, lnp.float32), 1), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234), 2),
round(lnp.array(1.234, lnp.float32), 2), check_dtypes=True)
self.assertAllClose(round(onp.float32(1.234)),
round(lnp.array(1.234, lnp.float32)),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_mode={}_rpadwidth={}_rconstantvalues={}".format(
jtu.format_shape_dtype_string(shape, dtype), mode, pad_width_rank,
constant_values_rank),
"shape": shape, "dtype": dtype, "mode": mode,
"pad_width_rank": pad_width_rank,
"constant_values_rank": constant_values_rank,
"rng_factory": jtu.rand_default,
"irng_factory": partial(jtu.rand_int, 3)}
for mode, constant_values_rank, shapes in [
('constant', 0, all_shapes),
('constant', 1, all_shapes),
('constant', 2, all_shapes),
('symmetric', None, nonempty_shapes),
('reflect', None, nonempty_shapes),
('wrap', None, nonempty_shapes),
]
for shape, dtype in _shape_and_dtypes(shapes, all_dtypes)
for pad_width_rank in range(3)))
def testPad(self, shape, dtype, mode, pad_width_rank, constant_values_rank,
rng_factory, irng_factory):
rng = rng_factory()
irng = irng_factory()
pad_width = irng([len(shape), 2][2 - pad_width_rank:], onp.int32)
def onp_fun(x, kwargs):
if pad_width.size == 0:
return x
return onp.pad(x, pad_width, mode=mode, **kwargs)
def lnp_fun(x, kwargs):
return lnp.pad(x, pad_width, mode=mode, **kwargs)
def args_maker():
kwargs = {}
if constant_values_rank:
kwargs["constant_values"] = rng(
[len(shape), 2][2 - constant_values_rank:], dtype)
return rng(shape, dtype), kwargs
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_reps={}".format(
jtu.format_shape_dtype_string(shape, dtype), reps),
"shape": shape, "dtype": dtype, "reps": reps,
"rng_factory": jtu.rand_default}
for reps in [(), (2,), (3, 4), (2, 3, 4)]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
))
def testTile(self, shape, dtype, reps, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.tile(arg, reps)
lnp_fun = lambda arg: lnp.tile(arg, reps)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=shape is not jtu.PYTHON_SCALAR_SHAPE)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for num_arrs in [3]
for arg_dtypes in CombosWithReplacement(default_dtypes, num_arrs)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testConcatenate(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(*args):
args = [x if x.dtype != lnp.bfloat16 else x.astype(onp.float32)
for x in args]
dtype = functools.reduce(lnp.promote_types, arg_dtypes)
return onp.concatenate(args, axis=axis).astype(dtype)
lnp_fun = lambda *args: lnp.concatenate(args, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_axis={}_baseshape=[{}]_dtypes=[{}]".format(
axis, ",".join(str(d) for d in base_shape),
",".join(onp.dtype(dtype).name for dtype in arg_dtypes)),
"axis": axis, "base_shape": base_shape, "arg_dtypes": arg_dtypes,
"rng_factory": jtu.rand_default}
for arg_dtypes in CombosWithReplacement(default_dtypes, 2)
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for axis in range(-len(base_shape)+1, len(base_shape))))
def testAppend(self, axis, base_shape, arg_dtypes, rng_factory):
rng = rng_factory()
wrapped_axis = axis % len(base_shape)
shapes = [base_shape[:wrapped_axis] + (size,) + base_shape[wrapped_axis+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), arg_dtypes)]
def onp_fun(arr, values):
arr = arr.astype(onp.float32) if arr.dtype == lnp.bfloat16 else arr
values = (values.astype(onp.float32) if values.dtype == lnp.bfloat16
else values)
out = onp.append(arr, values, axis=axis)
return out.astype(lnp.promote_types(*arg_dtypes))
lnp_fun = lambda arr, values: lnp.append(arr, values, axis=axis)
def args_maker():
return [rng(shape, dtype) for shape, dtype in zip(shapes, arg_dtypes)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape=[{}]_axis={}_repeats={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis, repeats),
"axis": axis, "shape": shape, "dtype": dtype, "repeats": repeats,
"rng_factory": jtu.rand_default}
for repeats in [0, 1, 2]
for shape, dtype in _shape_and_dtypes(all_shapes, default_dtypes)
for axis in [None] + list(range(-len(shape), len(shape)))))
def testRepeat(self, axis, shape, dtype, repeats, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.repeat(arg, repeats=repeats, axis=axis)
onp_fun = _promote_like_lnp(onp_fun)
lnp_fun = lambda arg: lnp.repeat(arg, repeats=repeats, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
def testIssue1233(self):
'''
Following numpy test suite from `test_repeat` at https://github.com/numpy/numpy/blob/master/numpy/core/tests/test_multiarray.py
'''
tol = 1e-5
def test_single(m, args_maker, repeats, axis):
lax_ans = lnp.repeat(m, repeats, axis)
numpy_ans = onp.repeat(m, repeats, axis)
self.assertAllClose(lax_ans, numpy_ans, check_dtypes=True, rtol=tol, atol=tol)
lnp_fun = lambda arg: lnp.repeat(arg, repeats = repeats, axis=axis)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
m = lnp.array([1,2,3,4,5,6])
args_maker = lambda: [m]
for repeats in [2, [1,3,2,1,1,2], [1,3,0,1,1,2], [2], lnp.array([1,3,2,1,1,2]), lnp.array([2])]:
test_single(m, args_maker, repeats, None)
m_rect = m.reshape((2,3))
args_maker = lambda: [m_rect]
for repeats in [2, [2,1], [2], lnp.array([2,1]), lnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=0)
for repeats in [2, [1,3,2], [2], lnp.array([1,3,2]), lnp.array([2])]:
test_single(m_rect, args_maker, repeats, axis=1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "op={}_shape=[{}]_axis={}_out_dtype={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), axis, out_dtype),
"axis": axis, "shape": shape, "dtype": dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default, "lnp_op": getattr(lnp, op),
"onp_op": getattr(onp, op)}
for op in ["cumsum", "cumprod"]
for dtype in default_dtypes
for out_dtype in default_dtypes
for shape in all_shapes
for axis in [None] + list(range(-len(shape), len(shape)))))
def testCumSumProd(self, axis, shape, dtype, out_dtype, onp_op, lnp_op, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp_op(arg, axis=axis, dtype=out_dtype)
lnp_fun = lambda arg: lnp_op(arg, axis=axis, dtype=out_dtype)
args_maker = lambda: [rng(shape, dtype)]
tol = max(jtu.tolerance(dtype), jtu.tolerance(out_dtype))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_m={}_n={}_k={}".format(
onp.dtype(dtype).name, m, n, k),
"m": m, "n": n, "k": k, "dtype": dtype, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for n in [0, 4]
for m in [None, 0, 1, 3, 4]
for k in list(range(-4, 4))))
def testTri(self, m, n, k, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda: onp.tri(n, M=m, k=k, dtype=dtype)
lnp_fun = lambda: lnp.tri(n, M=m, k=k, dtype=dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_shape={}_k={}".format(
op, jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "op": op, "k": k,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for op in ["tril", "triu"]
for k in list(range(-3, 3))))
def testTriLU(self, dtype, shape, op, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: getattr(onp, op)(arg, k=k)
lnp_fun = lambda arg: getattr(lnp, op)(arg, k=k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_ndim={}_n={}".format(ndim, n),
"ndim": ndim, "n": n}
for ndim in [0, 1, 4]
for n in [0, 1, 7]))
def testDiagIndices(self, ndim, n):
onp.testing.assert_equal(onp.diag_indices(n, ndim),
lnp.diag_indices(n, ndim))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_k={}".format(
jtu.format_shape_dtype_string(shape, dtype), k),
"dtype": dtype, "shape": shape, "k": k, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) in (1, 2)]
for k in list(range(-4, 4))))
def testDiag(self, shape, dtype, k, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diag(arg, k)
lnp_fun = lambda arg: lnp.diag(arg, k)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype), offset, axis1, axis2),
"dtype": dtype, "shape": shape, "offset": offset, "axis1": axis1,
"axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in [a for a in range(-len(shape), len(shape))
if a % len(shape) != axis1 % len(shape)]
for offset in list(range(-4, 4))))
def testDiagonal(self, shape, dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
onp_fun = lambda arg: onp.diagonal(arg, offset, axis1, axis2)
lnp_fun = lambda arg: lnp.diagonal(arg, offset, axis1, axis2)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}".format(onp.dtype(dtype).name, n),
"dtype": dtype, "n": n}
for dtype in default_dtypes
for n in list(range(4))))
def testIdentity(self, n, dtype):
onp_fun = lambda: onp.identity(n, dtype)
lnp_fun = lambda: lnp.identity(n, dtype)
args_maker = lambda: []
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype_{}_offset={}_axis1={}_axis2={}".format(
jtu.format_shape_dtype_string(shape, dtype),
out_dtype, offset, axis1, axis2),
"dtype": dtype, "out_dtype": out_dtype, "shape": shape, "offset": offset,
"axis1": axis1, "axis2": axis2, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for out_dtype in [None] + number_dtypes
for shape in [shape for shape in all_shapes if len(shape) >= 2]
for axis1 in range(-len(shape), len(shape))
for axis2 in range(-len(shape), len(shape))
if (axis1 % len(shape)) != (axis2 % len(shape))
for offset in list(range(-4, 4))))
def testTrace(self, shape, dtype, out_dtype, offset, axis1, axis2, rng_factory):
rng = rng_factory()
def onp_fun(arg):
if out_dtype == lnp.bfloat16:
return onp.trace(arg, offset, axis1, axis2, onp.float32).astype(lnp.bfloat16)
else:
return onp.trace(arg, offset, axis1, axis2, out_dtype)
lnp_fun = lambda arg: lnp.trace(arg, offset, axis1, axis2, out_dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes), axis),
"shape": shape, "axis": axis, "dtypes": dtypes, "rng_factory": rng_factory}
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100)]
for axis in range(-len(shape), len(shape) + 1)
for rng_factory in [jtu.rand_default]))
def testStack(self, shape, axis, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_lnp(partial(onp.stack, axis=axis))
lnp_fun = partial(lnp.stack, axis=axis)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_{}".format(
op, jtu.format_test_name_suffix("", [shape] * len(dtypes), dtypes)),
"shape": shape, "op": op, "dtypes": dtypes, "rng_factory": rng_factory}
for op in ["hstack", "vstack", "dstack"]
for dtypes in [
[onp.float32],
[onp.float32, onp.float32],
[onp.float32, onp.int32, onp.float32],
[onp.float32, onp.int64, onp.float32],
[onp.float32, onp.int32, onp.float64],
]
for shape in [(), (2,), (3, 4), (1, 100), (2, 3, 4)]
for rng_factory in [jtu.rand_default]))
def testHVDStack(self, shape, op, dtypes, rng_factory):
rng = rng_factory()
args_maker = lambda: [[rng(shape, dtype) for dtype in dtypes]]
onp_fun = _promote_like_lnp(getattr(onp, op))
lnp_fun = getattr(lnp, op)
self._CheckAgainstNumpy(lnp_fun, onp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, fill_value_dtype),
onp.dtype(out_dtype).name if out_dtype else "None"),
"shape": shape, "fill_value_dtype": fill_value_dtype,
"out_dtype": out_dtype, "rng_factory": jtu.rand_default}
for shape in array_shapes + [3, onp.array(7, dtype=onp.int32)]
for fill_value_dtype in default_dtypes
for out_dtype in [None] + default_dtypes))
def testFull(self, shape, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda fill_value: onp.full(shape, fill_value, dtype=out_dtype)
lnp_fun = lambda fill_value: lnp.full(shape, fill_value, dtype=out_dtype)
args_maker = lambda: [rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_op={}_shape={}_dtype={}").format(op, shape, dtype),
"onp_op": getattr(onp, op), "lnp_op": getattr(lnp, op),
"shape": shape, "dtype": dtype}
for op in ["zeros", "ones"]
for shape in [2, (), (2,), (3, 0), onp.array((4, 5, 6), dtype=onp.int32),
onp.array(4, dtype=onp.int32)]
for dtype in all_dtypes))
def testZerosOnes(self, onp_op, lnp_op, shape, dtype):
rng = jtu.rand_default()
def args_maker(): return []
onp_op = partial(onp_op, shape, dtype)
lnp_op = partial(lnp_op, shape, dtype)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_filldtype={}_outdtype={}".format(
jtu.format_shape_dtype_string(shape, in_dtype),
onp.dtype(fill_value_dtype).name,
onp.dtype(out_dtype).name),
"shape": shape, "in_dtype": in_dtype,
"fill_value_dtype": fill_value_dtype, "out_dtype": out_dtype,
"rng_factory": jtu.rand_default}
for shape in array_shapes
for in_dtype in default_dtypes
for fill_value_dtype in default_dtypes
for out_dtype in default_dtypes))
def testFullLike(self, shape, in_dtype, fill_value_dtype, out_dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x, fill_value: onp.full_like(x, fill_value, dtype=out_dtype)
lnp_fun = lambda x, fill_value: lnp.full_like(x, fill_value, dtype=out_dtype)
args_maker = lambda: [rng(shape, in_dtype), rng((), fill_value_dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((3,), 0, 3), ((12,), 0, 3), ((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), -1, 2), ((2, 3, 4), -2, 3)]
for dtype in default_dtypes))
def testSplitStaticInt(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.split(x, num_sections, axis=axis)
lnp_fun = lambda x: lnp.split(x, num_sections, axis=axis)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}_{}sections".format(
jtu.format_shape_dtype_string(shape, dtype), axis, num_sections),
"shape": shape, "num_sections": num_sections, "axis": axis,
"dtype": dtype, "rng_factory": jtu.rand_default}
for shape, axis, num_sections in [
((12, 4), 0, 4), ((12, 4), 1, 2),
((2, 3, 4), 2, 2), ((4, 3, 4), 0, 2)]
for dtype in default_dtypes))
def testHVDSplit(self, shape, num_sections, axis, dtype, rng_factory):
rng = rng_factory()
def fn(module, axis):
if axis == 0:
return module.vsplit
elif axis == 1:
return module.hsplit
else:
assert axis == 2
return module.dsplit
onp_fun = lambda x: fn(onp, axis)(x, num_sections)
lnp_fun = lambda x: fn(lnp, axis)(x, num_sections)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_order={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
order),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"order": order, "rng_factory": jtu.rand_default}
for dtype in default_dtypes
for order in ["C", "F"]
for arg_shape, out_shape in [
(jtu.NUMPY_SCALAR_SHAPE, (1, 1, 1)),
((), (1, 1, 1)),
((7, 0), (0, 42, 101)),
((3, 4), 12),
((3, 4), (12,)),
((3, 4), -1),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshape(self, arg_shape, out_shape, dtype, order, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape, order=order)
lnp_fun = lambda x: lnp.reshape(x, out_shape, order=order)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for dtype in default_dtypes
for arg_shape, out_shape in [
((7, 0), (0, 42, 101)),
((2, 1, 4), (-1,)),
((2, 2, 4), (2, 8))
]))
def testReshapeMethod(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.reshape(x, out_shape)
lnp_fun = lambda x: x.reshape(*out_shape)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_expanddim={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), dim),
"arg_shape": arg_shape, "dtype": dtype, "dim": dim,
"rng_factory": jtu.rand_default}
for arg_shape in [(), (3,), (3, 4)]
for dtype in default_dtypes
for dim in range(-len(arg_shape)+1, len(arg_shape))))
def testExpandDimsStaticDim(self, arg_shape, dtype, dim, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.expand_dims(x, dim)
lnp_fun = lambda x: lnp.expand_dims(x, dim)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axes=({},{})".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax1, ax2),
"arg_shape": arg_shape, "dtype": dtype, "ax1": ax1, "ax2": ax2,
"rng_factory": jtu.rand_default}
for arg_shape, ax1, ax2 in [
((3, 4), 0, 1), ((3, 4), 1, 0), ((3, 4, 5), 1, 2),
((3, 4, 5), -1, -2), ((3, 4, 5), 0, 1)]
for dtype in default_dtypes))
def testSwapAxesStaticAxes(self, arg_shape, dtype, ax1, ax2, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.swapaxes(x, ax1, ax2)
lnp_fun = lambda x: lnp.swapaxes(x, ax1, ax2)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_axis={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype), ax),
"arg_shape": arg_shape, "dtype": dtype, "ax": ax,
"rng_factory": jtu.rand_default}
for arg_shape, ax in [
((3, 1), None),
((3, 1), 1),
((1, 3, 1), (0, 2)),
((1, 4, 1), (0,))]
for dtype in default_dtypes))
def testSqueeze(self, arg_shape, dtype, ax, rng_factory):
rng = rng_factory()
onp_fun = lambda x: onp.squeeze(x, ax)
lnp_fun = lambda x: lnp.squeeze(x, ax)
args_maker = lambda: [rng(arg_shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}_weights={}_returned={}".format(
jtu.format_shape_dtype_string(shape, dtype),
axis,
(None if weights_shape is None else jtu.format_shape_dtype_string(weights_shape, dtype)),
returned),
"rng_factory": jtu.rand_default, "shape": shape, "dtype": dtype, "axis": axis,
"weights_shape": weights_shape, "returned": returned}
for shape, dtype in _shape_and_dtypes(nonempty_shapes, number_dtypes)
for axis in set(range(-len(shape), len(shape))) | set([None])
# `weights_shape` is either `None`, same as the averaged axis, or same as
# that of the input
for weights_shape in ([None, shape] if axis is None or len(shape) == 1
else [None, (shape[axis],), shape])
for returned in [False, True]))
def testAverage(self, shape, dtype, axis, weights_shape, returned, rng_factory):
rng = rng_factory()
if weights_shape is None:
onp_fun = lambda x: onp.average(x, axis, returned=returned)
lnp_fun = lambda x: lnp.average(x, axis, returned=returned)
args_maker = lambda: [rng(shape, dtype)]
else:
onp_fun = lambda x, weights: onp.average(x, axis, weights, returned)
lnp_fun = lambda x, weights: lnp.average(x, axis, weights, returned)
args_maker = lambda: [rng(shape, dtype), rng(weights_shape, dtype)]
onp_fun = _promote_like_lnp(onp_fun, inexact=True)
tol = {lnp.bfloat16: 1e-1, onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-10, onp.complex64: 1e-3, onp.complex128: 1e-10}
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
try:
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker,
check_dtypes=check_dtypes, tol=tol)
except ZeroDivisionError:
self.skipTest("don't support checking for ZeroDivisionError")
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=check_dtypes,
rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_arg{}_ndmin={}".format(i, ndmin),
"arg": arg, "ndmin": ndmin, "dtype": dtype}
for i, (arg, dtype) in enumerate([
([True, False, True], lnp.bool_),
(3., lnp.float_),
([1, 2, 3], lnp.int_),
([1., 2., 3.], lnp.float_),
([[1, 2], [3, 4], [5, 6]], lnp.int_),
([[1, 2.], [3, 4], [5, 6]], lnp.float_),
([[1., 2j], [3., 4.], [5., 6.]], lnp.complex_),
([[3, onp.array(2, dtype=lnp.float_), 1],
onp.arange(3., dtype=lnp.float_)], lnp.float_),
])
for ndmin in [None, onp.ndim(arg), onp.ndim(arg) + 1, onp.ndim(arg) + 2]))
def testArray(self, arg, ndmin, dtype):
args_maker = lambda: [arg]
dtype = dtypes.canonicalize_dtype(dtype)
if ndmin is not None:
onp_fun = partial(onp.array, ndmin=ndmin, dtype=dtype)
lnp_fun = partial(lnp.array, ndmin=ndmin)
else:
onp_fun = partial(onp.array, dtype=dtype)
lnp_fun = lnp.array
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
def testIssue121(self):
assert not onp.isscalar(lnp.array(3))
def testArrayMethod(self):
class arraylike(object):
dtype = onp.float32
def __array__(self, dtype=None):
return 3.
a = arraylike()
ans = lnp.array(a)
assert ans == 3.
@jtu.skip_on_devices("tpu") # TODO(b/32368900): TPUs don't support uint8 yet.
def testMemoryView(self):
ans = lnp.array(bytearray(b'\x2a'))
self.assertAllClose(
ans,
onp.array([0x2a], dtype=onp.uint8),
check_dtypes=True)
def testAllClose(self):
rng = onp.random.RandomState(0)
x = rng.randn(2, 2)
y = rng.randn(2)
def same(list1, list2):
allclose = functools.partial(lnp.allclose, atol=1e-3, rtol=1e-3)
elements_close = list(map(allclose, list1, list2))
return lnp.all(lnp.array(elements_close))
csame = api.jit(same)
a1 = same((x, y), (x, y))
a2 = csame((x, y), (x, y))
a3 = csame((x, y), (x, 2 * y))
self.assertTrue(a1)
self.assertTrue(a2)
self.assertFalse(a3)
@jtu.skip_on_devices("tpu") # TODO(mattjj): investigate this failure
def testOnesBroadcastingConstantHandler(self):
# TODO(mattjj): update this test for jax3
self.skipTest("test needs jax3 update")
def fun(x):
ones = lnp.ones((3, 4))
assert isinstance(ones, onp.ndarray) and ones.strides == (0, 0)
# To check that the constant handler generates a Broadcast for stride-zero
# arrays, we monkey-patch the client instance.
# TODO(mattjj): once we have better HLO dumping and inspecting facilities,
# we can check the HLO more directly.
c = x._node.c
Broadcast = c.Broadcast # pylint: disable=invalid-name
was_called = []
c.Broadcast = lambda *args: was_called.append(True) or Broadcast(*args)
out = x + ones # the ndarray constant handler should call Broadcast here
assert was_called, "Broadcast was not called."
return out
fun = api.jit(fun)
out_val = fun(lnp.ones(4))
self.assertAllClose(out_val, onp.full((3, 4), 2.), check_dtypes=False)
def testZeroStridesConstantHandler(self):
raw_const = onp.random.RandomState(0).randn(1, 2, 1, 1, 5, 1)
const = onp.broadcast_to(raw_const, (3, 2, 3, 4, 5, 6))
def fun(x):
return x * const
fun = api.jit(fun)
out_val = fun(3.)
self.assertAllClose(out_val, 3. * const, check_dtypes=False)
def testIsInstanceNdarrayDuringTracing(self):
arr = onp.ones(3)
@api.jit
def f(x):
self.assertIsInstance(x, lnp.ndarray)
return lnp.sum(x)
f(arr)
def testNonArrayErrorMessage(self):
x = [1., 2.]
y = onp.array([3., 4.])
def g(x, y):
return lnp.add(x, y)
def f(x, y):
return lnp.dot(x, y)
self.assertRaises(TypeError, lambda: g(x, y))
self.assertRaises(TypeError, lambda: f(x, y))
self.assertRaises(TypeError, lambda: api.jit(g)(x, y))
self.assertRaises(TypeError, lambda: api.jit(f)(x, y))
def testAbstractionErrorMessage(self):
@api.jit
def f(x, n):
for _ in range(n):
x = x * x
return x
self.assertRaises(TypeError, lambda: f(3., 3))
@api.jit
def g(x):
if x > 0.:
return x * 2
else:
return x + 2
self.assertRaises(TypeError, lambda: g(3.))
def testTracingPrimitiveWithNoTranslationErrorMessage(self):
# TODO(mattjj): update this for jax3
self.skipTest("test needs jax3 update")
foo = lnp._not_implemented(lambda x: x)
# No error if there's no tracing.
foo(onp.arange(3))
cfoo = api.jit(foo)
self.assertRaises(NotImplementedError, lambda: cfoo(onp.arange(3)))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for shape in [(3,), (2, 3)]
for dtype in default_dtypes
for axis in list(range(-len(shape), len(shape))) + [None] # Test negative axes
for rng_factory in [jtu.rand_default]))
def testFlip(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flip(x, axis)
onp_op = lambda x: onp.flip(x, axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3,), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFlipud(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.flipud(x)
onp_op = lambda x: onp.flipud(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype}
for shape in [(3, 2), (2, 3), (3, 2, 4)]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testFliplr(self, shape, dtype, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.fliplr(x)
onp_op = lambda x: onp.fliplr(x)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_k={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), k, axes),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "k": k, "axes": axes}
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3), (1, 0)],
[(4, 3, 2), (0, 2)],
[(4, 3, 2), (2, 1)],
]
for k in range(-3, 4)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testRot90(self, shape, dtype, k, axes, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
lnp_op = lambda x: lnp.rot90(x, k, axes)
onp_op = lambda x: onp.rot90(x, k, axes)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
# TODO(mattjj): test infix operator overrides
def testRavel(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
self._CompileAndCheck(lambda x: x.ravel(), args_maker, check_dtypes=True)
def testAstype(self):
rng = onp.random.RandomState(0)
args_maker = lambda: [rng.randn(3, 4).astype("float32")]
op = lambda x: x.astype(lnp.int32)
self._CheckAgainstNumpy(op, op, args_maker, check_dtypes=True)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
# TODO(mattjj): test other ndarray-like method overrides
def testOnpMean(self):
# from https://github.com/google/jax/issues/125
x = lax.add(lnp.eye(3, dtype=lnp.float_), 0.)
ans = onp.mean(x)
self.assertAllClose(ans, onp.array(1./3), check_dtypes=False)
def testArangeOnFloats(self):
# from https://github.com/google/jax/issues/145
expected = onp.arange(0.0, 1.0, 0.1, dtype=lnp.float_)
ans = lnp.arange(0.0, 1.0, 0.1)
self.assertAllClose(expected, ans, check_dtypes=True)
def testSortManually(self):
# manual tests for sort are nice because we don't have to worry about ties.
# lax.sort is tested combinatorially.
ans = lnp.sort(onp.array([16, 15, 23, 42, 8, 4]))
expected = onp.array([4, 8, 15, 16, 23, 42])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=None)
expected = onp.array([1, 1, 3, 4])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a) # last axis
expected = onp.array([[1, 4], [1, 3]])
self.assertAllClose(expected, ans, check_dtypes=True)
a = onp.array([[1, 4], [3, 1]])
ans = lnp.sort(a, axis=0)
expected = onp.array([[1, 1], [3, 4]])
self.assertAllClose(expected, ans, check_dtypes=True)
def testArgsortManually(self):
x = onp.array([16, 15, 23, 42, 8, 4])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=0)
expected = onp.argsort(x, axis=0)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=1)
expected = onp.argsort(x, axis=1)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x, axis=None)
expected = onp.argsort(x, axis=None)
self.assertAllClose(expected, ans, check_dtypes=False)
x = onp.array([[16, 15, 23], [42, 8, 4]])
ans = lnp.argsort(x)
expected = onp.argsort(x)
self.assertAllClose(expected, ans, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_shifts={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype),
shifts, axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "shifts": shifts,
"axis": axis}
for dtype in all_dtypes
for shape in [(3, 4), (3, 4, 5), (7, 4, 0)]
for shifts, axis in [
(3, None),
(1, 1),
((3,), (0,)),
((-2,), (-2,)),
((1, 2), (0, -1))
]
for rng_factory in [jtu.rand_default]))
def testRoll(self, shape, dtype, shifts, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(shifts)]
lnp_op = partial(lnp.roll, axis=axis)
onp_op = partial(onp.roll, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_index={}_axis={}_mode={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(index_shape, index_dtype),
axis, mode),
"rng_factory": rng_factory, "rng_indices_factory": rng_indices_factory,
"shape": shape, "index_shape": index_shape, "dtype": dtype,
"index_dtype": index_dtype, "axis": axis, "mode": mode}
for shape in [(3,), (3, 4), (3, 4, 5)]
for index_shape in scalar_shapes + [(3,), (2, 1, 3)]
for axis in itertools.chain(range(-len(shape), len(shape)), [None])
for dtype in all_dtypes
for index_dtype in int_dtypes
for mode in ['wrap', 'clip']
for rng_factory in [jtu.rand_default]
for rng_indices_factory in [partial(jtu.rand_int, -5, 5)]))
def testTake(self, shape, dtype, index_shape, index_dtype, axis, mode,
rng_factory, rng_indices_factory):
def args_maker():
x = rng(shape, dtype)
i = rng_indices(index_shape, index_dtype)
return x, i
rng = rng_factory()
rng_indices = rng_indices_factory()
lnp_op = lambda x, i: lnp.take(x, i, axis=axis, mode=mode)
onp_op = lambda x, i: onp.take(x, i, axis=axis, mode=mode)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_ishape={}_axis={}".format(
jtu.format_shape_dtype_string(x_shape, dtype), i_shape, axis),
"rng_factory": rng_factory, "x_shape": x_shape, "i_shape": i_shape, "dtype": dtype,
"axis": axis}
for x_shape, i_shape in filter(
_shapes_are_equal_length,
filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(nonempty_nonscalar_array_shapes, 2)))
for axis in itertools.chain(range(len(x_shape)), [-1], [None])
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTakeAlongAxis(self, x_shape, i_shape, dtype, axis, rng_factory):
rng = rng_factory()
i_shape = onp.array(i_shape)
if axis is None:
i_shape = [onp.prod(i_shape, dtype=onp.int64)]
else:
# Test the case where the size of the axis doesn't necessarily broadcast.
i_shape[axis] *= 3
i_shape = list(i_shape)
def args_maker():
x = rng(x_shape, dtype)
n = onp.prod(x_shape, dtype=onp.int32) if axis is None else x_shape[axis]
i = rng(i_shape, onp.int32) % (2 * n - 1) - (n - 1)
return x, i
lnp_op = lambda x, i: lnp.take_along_axis(x, i, axis=axis)
if hasattr(onp, "take_along_axis"):
onp_op = lambda x, i: onp.take_along_axis(x, i, axis=axis)
self._CheckAgainstNumpy(lnp_op, onp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_n={}_increasing={}".format(
jtu.format_shape_dtype_string([shape], dtype),
n, increasing),
"dtype": dtype, "shape": shape, "n": n, "increasing": increasing,
"rng_factory": jtu.rand_default}
for dtype in inexact_dtypes
for shape in [0, 5]
for n in [2, 4]
for increasing in [False, True]))
def testVander(self, shape, dtype, n, increasing, rng_factory):
rng = rng_factory()
def onp_fun(arg):
arg = arg.astype(onp.float32) if dtype == lnp.bfloat16 else arg
return onp.vander(arg, N=n, increasing=increasing)
lnp_fun = lambda arg: lnp.vander(arg, N=n, increasing=increasing)
args_maker = lambda: [rng([shape], dtype)]
# np.vander seems to return float64 for all floating types. We could obey
# those semantics, but they seem like a bug.
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol={onp.float32: 1e-3})
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("nan_to_num", [shape],
[dtype]),
"rng_factory": jtu.rand_some_inf_and_nan, "shape": shape,
"dtype": dtype}
for shape in all_shapes
for dtype in inexact_dtypes))
def testNanToNum(self, rng_factory, shape, dtype):
rng = rng_factory()
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
def onp_fun(x):
if dtype == lnp.bfloat16:
x = onp.where(onp.isnan(x), dtype(0), x)
x = onp.where(onp.isposinf(x), lnp.finfo(dtype).max, x)
x = onp.where(onp.isneginf(x), lnp.finfo(dtype).min, x)
return x
else:
return onp.nan_to_num(x).astype(dtype)
args_maker = lambda: [rng(shape, dtype)]
check_dtypes = shape is not jtu.PYTHON_SCALAR_SHAPE
self._CheckAgainstNumpy(onp_fun, lnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
self._CompileAndCheck(lnp.nan_to_num, args_maker,
check_dtypes=check_dtypes)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("ix_", shapes, dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes, dtypes in (
((), ()),
(((7,),), (onp.int32,)),
(((3,), (4,)), (onp.int32, onp.int32)),
(((3,), (1,), (4,)), (onp.int32, onp.int32, onp.int32)),
)))
def testIx_(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)
for shape, dtype in zip(shapes, dtypes)]
self._CheckAgainstNumpy(onp.ix_, lnp.ix_, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.ix_, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_op={}_a_shape={}_q_shape={}_axis={}_keepdims={}".format(
op,
jtu.format_shape_dtype_string(a_shape, a_dtype),
jtu.format_shape_dtype_string(q_shape, q_dtype),
axis, keepdims),
"a_rng": jtu.rand_default(), "q_rng": q_rng, "op": op,
"a_shape": a_shape, "a_dtype": a_dtype,
"q_shape": q_shape, "q_dtype": q_dtype, "axis": axis,
"keepdims": keepdims}
for (op, q_rng) in (
("percentile", jtu.rand_uniform(low=0., high=100.)),
("quantile", jtu.rand_uniform(low=0., high=1.)),
("median", jtu.rand_uniform(low=0., high=1.)),
)
for a_dtype in float_dtypes
for a_shape, axis in (
((7,), None),
((47, 7), 0),
((4, 101), 1),
)
for q_dtype in [onp.float32]
for q_shape in scalar_shapes + [(4,)]
for keepdims in [False, True]))
def testQuantile(self, op, a_rng, q_rng, a_shape, a_dtype, q_shape, q_dtype,
axis, keepdims):
if op == "quantile" and numpy_version < (1, 15):
raise SkipTest("Numpy < 1.15 does not have np.quantile")
if op == "median":
args_maker = lambda: [a_rng(a_shape, a_dtype)]
else:
args_maker = lambda: [a_rng(a_shape, a_dtype), q_rng(q_shape, q_dtype)]
def onp_fun(*args):
args = [x if lnp.result_type(x) != lnp.bfloat16 else
onp.asarray(x, onp.float32) for x in args]
return getattr(onp, op)(*args, axis=axis, keepdims=keepdims)
lnp_fun = partial(getattr(lnp, op), axis=axis, keepdims=keepdims)
# TODO(phawkins): we currently set dtype=False because we aren't as
# aggressive about promoting to float64. It's not clear we want to mimic
# Numpy here.
tol_spec = {onp.float32: 2e-4, onp.float64: 5e-6}
tol = max(jtu.tolerance(a_dtype, tol_spec),
jtu.tolerance(q_dtype, tol_spec))
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype}
for shape in all_shapes for dtype in all_dtypes))
def testWhereOneArgument(self, shape, dtype):
rng = jtu.rand_some_zero()
onp_fun = lambda x: onp.where(x)
lnp_fun = lambda x: lnp.where(x)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}".format("_".join(
jtu.format_shape_dtype_string(shape, dtype)
for shape, dtype in zip(shapes, dtypes))),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for shapes in filter(_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 3))
for dtypes in CombosWithReplacement(all_dtypes, 3)))
def testWhereThreeArgument(self, rng_factory, shapes, dtypes):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng_factory(), shapes, dtypes)
def onp_fun(cond, x, y):
return _promote_like_lnp(partial(onp.where, cond))(x, y)
self._CheckAgainstNumpy(onp_fun, lnp.where, args_maker,
check_dtypes=True)
self._CompileAndCheck(lnp.where, args_maker, check_dtypes=True)
def testWhereScalarPromotion(self):
x = lnp.where(lnp.array([True, False]), 3,
lnp.ones((2,), dtype=lnp.float32))
self.assertEqual(x.dtype, onp.dtype(onp.float32))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes,
(onp.bool_,) * n + dtypes),
"rng_factory": jtu.rand_default, "shapes": shapes, "dtypes": dtypes}
for n in range(0, 3)
for shapes in filter(
_shapes_are_broadcast_compatible,
CombosWithReplacement(all_shapes, 2 * n + 1))
for dtypes in CombosWithReplacement(all_dtypes, n + 1)))
def testSelect(self, rng_factory, shapes, dtypes):
rng = rng_factory()
n = len(dtypes) - 1
def args_maker():
condlist = [rng(shape, onp.bool_) for shape in shapes[:n]]
choicelist = [rng(shape, dtype)
for shape, dtype in zip(shapes[n:-1], dtypes[:n])]
default = rng(shapes[-1], dtypes[-1])
return condlist, choicelist, default
# TODO(phawkins): float32/float64 type mismatches
def onp_fun(condlist, choicelist, default):
choicelist = [x if lnp.result_type(x) != lnp.bfloat16
else x.astype(onp.float32) for x in choicelist]
dtype = lnp.result_type(default, *choicelist)
return onp.select(condlist,
[onp.asarray(x, dtype=dtype) for x in choicelist],
onp.asarray(default, dtype=dtype))
self._CheckAgainstNumpy(onp_fun, lnp.select, args_maker,
check_dtypes=False)
self._CompileAndCheck(lnp.select, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-7, onp.complex128: 1e-7})
def testIssue330(self):
x = lnp.full((1, 1), lnp.array([1])[0]) # doesn't crash
self.assertEqual(x[0, 0], 1)
def testScalarDtypePromotion(self):
orig_numpy_result = (1 + onp.eye(1, dtype=onp.float32)).dtype
jax_numpy_result = (1 + lnp.eye(1, dtype=lnp.float32)).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testSymmetrizeDtypePromotion(self):
x = onp.eye(3, dtype=onp.float32)
orig_numpy_result = ((x + x.T) / 2).dtype
x = lnp.eye(3, dtype=lnp.float32)
jax_numpy_result = ((x + x.T) / 2).dtype
self.assertEqual(orig_numpy_result, jax_numpy_result)
def testIssue347(self):
# https://github.com/google/jax/issues/347
def test_fail(x):
x = lnp.sqrt(lnp.sum(x ** 2, axis=1))
ones = lnp.ones_like(x)
x = lnp.where(x > 0.5, x, ones)
return lnp.sum(x)
x = lnp.array([[1, 2], [3, 4], [0, 0]], dtype=lnp.float64)
result = api.grad(test_fail)(x)
assert not onp.any(onp.isnan(result))
def testIssue453(self):
# https://github.com/google/jax/issues/453
a = onp.arange(6) + 1
ans = lnp.reshape(a, (3, 2), order='F')
expected = onp.reshape(a, (3, 2), order='F')
self.assertAllClose(ans, expected, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}".format(op, pytype.__name__),
"pytype": pytype, "dtype": dtype, "op": op}
for pytype, dtype in [(int, lnp.int_), (float, lnp.float_),
(bool, lnp.bool_), (complex, lnp.complex_)]
for op in ["atleast_1d", "atleast_2d", "atleast_3d"]))
def testAtLeastNdLiterals(self, pytype, dtype, op):
# Fixes: https://github.com/google/jax/issues/634
onp_fun = lambda arg: getattr(onp, op)(arg).astype(dtype)
lnp_fun = lambda arg: getattr(lnp, op)(arg)
args_maker = lambda: [pytype(2)]
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
def testLongLong(self):
self.assertAllClose(onp.int64(7), api.jit(lambda x: x)(onp.longlong(7)),
check_dtypes=True)
def testArange(self):
# test cases inspired by dask tests at
# https://github.com/dask/dask/blob/master/dask/array/tests/test_creation.py#L92
self.assertAllClose(lnp.arange(77),
onp.arange(77, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(2, 13),
onp.arange(2, 13, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(4, 21, 9),
onp.arange(4, 21, 9, dtype=lnp.int_), check_dtypes=True)
self.assertAllClose(lnp.arange(53, 5, -3),
onp.arange(53, 5, -3, dtype=lnp.int_),
check_dtypes=True)
# TODO(mattjj): make these tests work when jax_enable_x64=True
# self.assertAllClose(lnp.arange(77, dtype=float),
# onp.arange(77, dtype=float), check_dtypes=True)
# self.assertAllClose(lnp.arange(2, 13, dtype=int),
# onp.arange(2, 13, dtype=int), check_dtypes=True)
self.assertAllClose(lnp.arange(0, 1, -0.5),
onp.arange(0, 1, -0.5, dtype=lnp.float_),
check_dtypes=True)
self.assertRaises(TypeError, lambda: lnp.arange())
# test that lnp.arange(N) doesn't instantiate an ndarray
self.assertFalse(type(lnp.arange(77)) == type(onp.arange(77)))
self.assertTrue(type(lnp.arange(77)) == type(lax.iota(onp.int32, 77)))
# test that lnp.arange(N, dtype=int32) doesn't instantiate an ndarray
self.assertFalse(type(lnp.arange(77, dtype=lnp.int32)) ==
type(onp.arange(77, dtype=onp.int32)))
self.assertTrue(type(lnp.arange(77, dtype=lnp.int32)) ==
type(lax.iota(onp.int32, 77)))
def testIssue830(self):
a = lnp.arange(4, dtype=lnp.complex64)
self.assertEqual(a.dtype, lnp.complex64)
def testIssue728(self):
assert lnp.allclose(lnp.eye(5000), onp.eye(5000))
self.assertEqual(0, onp.sum(lnp.eye(1050) - onp.eye(1050)))
def testIssue746(self):
lnp.arange(12).reshape(3, 4) # doesn't crash
def testIssue764(self):
x = lnp.linspace(190, 200, 4)
f = api.grad(lambda x: lnp.sum(lnp.tanh(x)))
# Expected values computed with autograd in float64 precision.
expected = onp.array([3.71669453e-165, 4.72999108e-168, 6.01954653e-171,
7.66067839e-174], onp.float64)
self.assertAllClose(f(x), expected, check_dtypes=False)
def testIssue776(self):
"""Tests that the scatter-add transpose rule instantiates symbolic zeros."""
def f(u):
y = jax.ops.index_add(onp.ones(10,), [2, 4, 5], u)
# The transpose rule for lax.tie_in returns a symbolic zero for its first
# argument.
return lax.tie_in(y, 7.)
self.assertAllClose(onp.zeros(3,), api.grad(f)(onp.ones(3,)),
check_dtypes=True)
def testIssue777(self):
x = lnp.linspace(-200, 0, 4, dtype=onp.float32)
f = api.grad(lambda x: lnp.sum(1 / (1 + lnp.exp(-x))))
self.assertAllClose(f(x), onp.array([0., 0., 0., 0.25], dtype=onp.float32),
check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(op, [()], [dtype]),
"dtype": dtype, "op": op}
for dtype in float_dtypes
for op in ("sqrt", "arccos", "arcsin", "arctan", "sin", "cos", "tan",
"sinh", "cosh", "tanh", "arccosh", "arcsinh", "arctanh", "exp",
"log", "expm1", "log1p")))
def testMathSpecialFloatValues(self, op, dtype):
onp_op = getattr(onp, op)
lnp_op = getattr(lnp, op)
dtype = onp.dtype(dtypes.canonicalize_dtype(dtype)).type
for x in (onp.nan, -onp.inf, -100., -2., -1., 0., 1., 2., 100., onp.inf,
lnp.finfo(dtype).max, onp.sqrt(lnp.finfo(dtype).max),
onp.sqrt(lnp.finfo(dtype).max) * 2.):
if onp.isnan(x) and op in ("sinh", "cosh", "expm1", "exp"):
# TODO(b/133842876, b/133842870): these return wrong outputs on CPU for
# NaN inputs.
continue
if (op in ("sin", "cos", "tan", "arctan") and
jtu.device_under_test() == "tpu"):
continue # TODO(b/132196789, b/134175194): fix and reenable.
x = dtype(x)
expected = onp_op(x)
actual = lnp_op(x)
tol = jtu.tolerance(dtype, {onp.float32: 1e-3, onp.float64: 1e-7})
self.assertAllClose(expected, actual, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue883(self):
# from https://github.com/google/jax/issues/883
@partial(api.jit, static_argnums=(1,))
def f(x, v):
return x
x = lnp.ones((10, 10))
v = lnp.array([1, 2, 3])
first_call = f(x, v)
second_call = f(x, v) # doesn't crash
def testReductionOfOutOfBoundsAxis(self): # Issue 888
x = lnp.ones((3, 4))
self.assertRaises(ValueError, lambda: lnp.sum(x, axis=2))
def testIssue956(self):
self.assertRaises(TypeError, lambda: lnp.ndarray((1, 1)))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name":
"_shape={}_dtype={}_out_dtype={}_axis={}_ddof={}_keepdims={}"
.format(shape, dtype, out_dtype, axis, ddof, keepdims),
"shape": shape, "dtype": dtype, "out_dtype": out_dtype, "axis": axis,
"ddof": ddof, "keepdims": keepdims, "rng_factory": rng_factory}
for shape in [(5,), (10, 5)]
for dtype in all_dtypes
for out_dtype in inexact_dtypes
for axis in [None, 0, -1]
for ddof in [0, 1, 2]
for keepdims in [False, True]
for rng_factory in [jtu.rand_default]))
def testVar(self, shape, dtype, out_dtype, axis, ddof, keepdims, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
def onp_fun(x):
out = onp.var(x.astype(lnp.promote_types(onp.float32, dtype)),
axis=axis, ddof=ddof, keepdims=keepdims)
return out.astype(out_dtype)
lnp_fun = partial(lnp.var, dtype=out_dtype, axis=axis, ddof=ddof, keepdims=keepdims)
tol = jtu.tolerance(out_dtype, {onp.float16: 1e-1, onp.float32: 1e-3,
onp.float64: 1e-3, onp.complex128: 1e-6})
self._CheckAgainstNumpy(onp_fun, lnp_fun, args_maker, check_dtypes=True,
tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, rtol=tol,
atol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (5, 10)]
for dtype in all_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu") # TODO(b/138003641): test fails on GPU.
def testCov(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
onp_fun = partial(onp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
lnp_fun = partial(lnp.cov, rowvar=rowvar, ddof=ddof, bias=bias)
tol = {onp.float32: 1e-5, onp.float64: 1e-13, onp.complex128: 1e-13}
tol = 7e-2 if jtu.device_under_test() == "tpu" else tol
tol = jtu.join_tolerance(tol, jtu.tolerance(dtype))
self._CheckAgainstNumpy(
onp_fun, lnp_fun, args_maker, check_dtypes=False, tol=tol)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True, atol=tol,
rtol=tol)
def testIssue967(self):
self.assertRaises(TypeError, lambda: lnp.zeros(1.5))
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_rowvar={}_ddof={}_bias={}".format(
shape, dtype, rowvar, ddof, bias),
"shape": shape, "dtype": dtype, "rowvar": rowvar, "ddof": ddof,
"bias": bias, "rng_factory": rng_factory}
for shape in [(5,), (10, 5), (3, 10)]
for dtype in number_dtypes
for rowvar in [True, False]
for bias in [True, False]
for ddof in [None, 2, 3]
for rng_factory in [jtu.rand_default]))
def testCorrCoef(self, shape, dtype, rowvar, ddof, bias, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [shape], [dtype])
mat = onp.asarray([rng(shape, dtype)])
onp_fun = partial(onp.corrcoef, rowvar=rowvar, ddof=ddof, bias=bias)
lnp_fun = partial(lnp.corrcoef, rowvar=rowvar, ddof=ddof, bias=bias)
if not onp.any(onp.isclose(onp.std(mat), 0.0)):
self._CheckAgainstNumpy(
onp_fun, lnp_fun, args_maker, check_dtypes=False,
tol=1e-2 if jtu.device_under_test() == "tpu" else None)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": "_shapes={}_dtype={}_indexing={}_sparse={}".format(
shapes, dtype, indexing, sparse),
"shapes": shapes, "dtype": dtype, "indexing": indexing,
"sparse": sparse, "rng_factory": rng_factory}
for shapes in [(), (5,), (5, 3)]
for dtype in number_dtypes
for indexing in ['xy', 'ij']
for sparse in [True, False]
for rng_factory in [jtu.rand_default]))
def testMeshGrid(self, shapes, dtype, indexing, sparse, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [(x,) for x in shapes],
[dtype] * len(shapes))
onp_fun = partial(onp.meshgrid, indexing=indexing, sparse=sparse)
lnp_fun = partial(lnp.meshgrid, indexing=indexing, sparse=sparse)
self._CompileAndCheck(lnp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_retstep={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, retstep, dtype),
"start_shape": start_shape, "stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "retstep": retstep,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for retstep in [True, False]
for dtype in number_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLinspace(self, start_shape, stop_shape, num, endpoint,
retstep, dtype, rng_factory):
rng = rng_factory()
# relax default tolerances slightly
tol = jtu.tolerance(dtype if dtype else onp.float32) * 10
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
lnp_op = lambda start, stop: lnp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.linspace(
start, stop, num,
endpoint=endpoint, retstep=retstep, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
# floating-point compute between jitted platforms and non-jit + rounding
# cause unavoidable variation in integer truncation for some inputs.
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_base={}_dtype={}").format(
start_shape, stop_shape, num, endpoint, base,
dtype.__name__ if dtype else "None"),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint, "base": base,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
for base in [10.0, 2, onp.e]
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testLogspace(self, start_shape, stop_shape, num,
endpoint, base, dtype, rng_factory):
if (dtype in int_dtypes and
jtu.device_under_test() in ("gpu", "tpu") and
not FLAGS.jax_enable_x64):
raise unittest.SkipTest("GPUx32 truncated exponentiation"
" doesn't exactly match other platforms.")
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 2e-2, onp.float32: 1e-2, onp.float64: 1e-6,
onp.complex64: 1e-3, onp.complex128: 1e-6}
args_maker = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
lnp_op = lambda start, stop: lnp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
onp_op = lambda start, stop: onp.logspace(
start, stop, num, endpoint=endpoint, base=base, dtype=dtype, axis=axis)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
# Why do compiled and op-by-op float16 np.power numbers differ
# slightly more than expected?
atol = {onp.float16: 1e-2}
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=atol, rtol=tol)
@parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": ("_start_shape={}_stop_shape={}_num={}_endpoint={}"
"_dtype={}").format(
start_shape, stop_shape, num, endpoint, dtype),
"start_shape": start_shape,
"stop_shape": stop_shape,
"num": num, "endpoint": endpoint,
"dtype": dtype, "rng_factory": rng_factory}
for start_shape in [(), (2,), (2, 2)]
for stop_shape in [(), (2,), (2, 2)]
for num in [0, 1, 2, 5, 20]
for endpoint in [True, False]
# NB: numpy's geomspace gives nonsense results on integer types
for dtype in inexact_dtypes + [None,]
for rng_factory in [jtu.rand_default]))
def testGeomspace(self, start_shape, stop_shape, num,
endpoint, dtype, rng_factory):
rng = rng_factory()
# relax default tolerances slightly
tol = {onp.float16: 4e-3, onp.float32: 2e-3, onp.complex128: 1e-14}
def args_maker():
"""Test the set of inputs onp.geomspace is well-defined on."""
start, stop = self._GetArgsMaker(rng,
[start_shape, stop_shape],
[dtype, dtype])()
# onp.geomspace can't handle differently ranked tensors
# w. negative numbers!
start, stop = lnp.broadcast_arrays(start, stop)
if dtype in complex_dtypes:
return start, stop
# to avoid NaNs, non-complex start and stop cannot
# differ in sign, elementwise
start = start * lnp.sign(start) * lnp.sign(stop)
return start, stop
start, stop = args_maker()
ndim = len(onp.shape(start + stop))
for axis in range(-ndim, ndim):
def lnp_op(start, stop):
return lnp.geomspace(start, stop, num, endpoint=endpoint, dtype=dtype,
axis=axis)
def onp_op(start, stop):
start = start.astype(onp.float32) if dtype == lnp.bfloat16 else start
stop = stop.astype(onp.float32) if dtype == lnp.bfloat16 else stop
return onp.geomspace(
start, stop, num, endpoint=endpoint,
dtype=dtype if dtype != lnp.bfloat16 else onp.float32,
axis=axis).astype(dtype)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker,
check_dtypes=False, tol=tol)
if dtype in (inexact_dtypes + [None,]):
self._CompileAndCheck(lnp_op, args_maker,
check_dtypes=False, atol=tol, rtol=tol)
def testDisableNumpyRankPromotionBroadcasting(self):
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "allow"
lnp.ones(2) + lnp.ones((1, 2)) # works just fine
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "raise"
self.assertRaises(ValueError, lambda: lnp.ones(2) + lnp.ones((1, 2)))
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
try:
prev_flag = FLAGS.jax_numpy_rank_promotion
FLAGS.jax_numpy_rank_promotion = "warn"
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
lnp.ones(2) + lnp.ones((1, 2))
assert len(w) > 0
msg = str(w[-1].message)
expected_msg = ("Following NumPy automatic rank promotion for add on "
"shapes (2,) (1, 2).")
self.assertEqual(msg[:len(expected_msg)], expected_msg)
prev_len = len(w)
lnp.ones(2) + 3
self.assertEqual(len(w), prev_len) # don't want to warn for scalars
finally:
FLAGS.jax_numpy_rank_promotion = prev_flag
def testStackArrayArgument(self):
# tests https://github.com/google/jax/issues/1271
@api.jit
def foo(x):
return lnp.stack(x)
foo(onp.zeros(2)) # doesn't crash
@api.jit
def foo(x):
return lnp.concatenate(x)
foo(onp.zeros((2, 2))) # doesn't crash
def testReluGradientConstants(self):
# This is a regression test that verifies that constants associated with the
# gradient of np.maximum (from lax._balanced_eq) aren't hoisted into the
# outermost jaxpr. This was producing some large materialized constants for
# every relu activation in a model.
def body(i, xy):
x, y = xy
y = y + jax.grad(lambda z: lnp.sum(lnp.maximum(z, 0.)))(x)
return x, y
f = lambda y: lax.fori_loop(0, 5, body, (y, y))
wrapped = linear_util.wrap_init(f)
pv = partial_eval.PartialVal(
(jax.ShapedArray((3, 4), onp.float32), jax.core.unit))
_, _, consts = partial_eval.trace_to_jaxpr(wrapped, [pv])
self.assertFalse(
any(onp.array_equal(x, onp.full((3, 4), 2., dtype=onp.float32))
for x in consts))
@parameterized.named_parameters(
{"testcase_name": "_from={}_to={}".format(from_shape, to_shape),
"rng_factory": rng_factory, "from_shape": from_shape, "to_shape": to_shape}
for from_shape, to_shape in [
[(1, 3), (4, 3)],
[(3,), (2, 1, 3)],
[(3,), (3, 3)],
[(1,), (3,)],
]
for rng_factory in [jtu.rand_default])
def testBroadcastTo(self, from_shape, to_shape, rng_factory):
rng = rng_factory()
args_maker = self._GetArgsMaker(rng, [from_shape], [onp.float32])
onp_op = lambda x: onp.broadcast_to(x, to_shape)
lnp_op = lambda x: lnp.broadcast_to(x, to_shape)
self._CheckAgainstNumpy(onp_op, lnp_op, args_maker, check_dtypes=True)
self._CompileAndCheck(lnp_op, args_maker, check_dtypes=True)
def testBroadcastToIssue1522(self):
self.assertRaisesRegex(
ValueError, "Incompatible shapes for broadcasting: .*",
lambda: lnp.broadcast_to(onp.ones((2, 3)), (1, 3)))
def testBroadcastToIntIssue1548(self):
self.assertAllClose(lnp.broadcast_to(1, (3, 2)), onp.ones((3, 2)),
check_dtypes=False)
def testBroadcastToOnScalar(self):
self.assertIsInstance(lnp.broadcast_to(10.0, ()), lnp.ndarray)
self.assertIsInstance(onp.broadcast_to(10.0, ()), onp.ndarray)
def testPrecision(self):
ones_1d = onp.ones((2,))
ones_2d = onp.ones((2, 2))
ones_3d = onp.ones((2, 2, 2))
HIGHEST = lax.Precision.HIGHEST
jtu.assert_dot_precision(None, lnp.dot, ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.dot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.dot, precision=HIGHEST),
ones_3d, ones_3d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.matmul, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.vdot, precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=2, precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=(0, 0), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.tensordot, axes=((0,), (0,)), precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.einsum, 'i,i', precision=HIGHEST),
ones_1d, ones_1d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.einsum, 'ij,ij', precision=HIGHEST),
ones_2d, ones_2d)
jtu.assert_dot_precision(
HIGHEST,
partial(lnp.inner, precision=HIGHEST),
ones_1d, ones_1d)
# Most grad tests are at the lax level (see lax_test.py), but we add some here
# as needed for e.g. particular compound ops of interest.
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
GRAD_TEST_RECORDS = [
grad_test_spec(lnp.arcsinh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(lnp.arccosh, nargs=1, order=2,
rng_factory=jtu.rand_positive,
dtypes=[onp.float64, onp.complex64], tol=1e-4),
grad_test_spec(lnp.arctanh, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -0.9, 0.9),
dtypes=[onp.float64, onp.complex64], tol=1e-4),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "order"])
GRAD_SPECIAL_VALUE_TEST_RECORDS = [
GradSpecialValuesTestSpec(lnp.arcsinh, [0., 1000.], 2),
GradSpecialValuesTestSpec(lnp.arccosh, [1000.], 2),
GradSpecialValuesTestSpec(lnp.arctanh, [0.], 2),
GradSpecialValuesTestSpec(lnp.sinc, [0.], 1)
]
def num_float_bits(dtype):
return lnp.finfo(dtypes.canonicalize_dtype(dtype)).bits
class NumpyGradTests(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shapes in CombosWithReplacement(nonempty_shapes, rec.nargs)
for dtype in rec.dtypes)
for rec in GRAD_TEST_RECORDS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
tol = {onp.float32: 1e-1, onp.complex64: 1e-1}
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "order": rec.order}
for special_value in rec.values)
for rec in GRAD_SPECIAL_VALUE_TEST_RECORDS))
def testOpGradSpecialValue(self, op, special_value, order):
check_grads(op, (special_value,), order, ["fwd", "rev"],
atol={onp.float32: 3e-3})
def testTakeAlongAxisIssue1521(self):
# https://github.com/google/jax/issues/1521
idx = lnp.repeat(lnp.arange(3), 10).reshape((30, 1))
def f(x):
y = x * lnp.arange(3.).reshape((1, 3))
return lnp.take_along_axis(y, idx, -1).sum()
check_grads(f, (1.,), order=1)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/lax_numpy_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from collections import namedtuple
import gc
import operator
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
from jax import api
from jax import core
from jax import numpy as np
from jax import test_util as jtu
from jax.api import jvp, linearize, vjp, jit
from jax.lax import UnshapedArray, ShapedArray, ConcreteArray
from jax.tree_util import tree_flatten, tree_unflatten, tree_multimap, tree_reduce
from jax.util import partial
from jax.interpreters import partial_eval as pe
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
_ = pe.PartialVal((UnshapedArray(onp.float32), core.unit))
__ = pe.PartialVal((ShapedArray((), onp.float32), core.unit))
def call(f, *args):
return jit(f)(*args)
def simple_fun(x, y):
return np.sin(x * y)
def simple_fun_fanout(x, y):
return np.sin(x * y) * x
def fun_with_call(x):
return call(np.sin, x)
def fun_with_nested_calls(x):
def f(y):
y2 = np.sin(y) + 1.0 + (2.0 * x)
@jit
def g(z):
return y2 * z * x + (x * y)
return call(g, y)
return call(f, x)
def error(*args):
def f(*args):
assert False
return f
def fun_with_nested_calls_2(x):
def bar(y):
def baz(w):
q = call(lambda x: y, x)
q = q + call(lambda: y)
q = q + call(lambda y: w + y, y)
q = call(lambda w: call(np.sin, x) * y, 1.0) + q
return q
p, t = jvp(baz, (x + 1.0,), (y,))
return t + (x * p)
return call(bar, x)
def fun_call_jitted(x):
@jit
def g(z):
return x * z
return call(g, x)
def fun_with_two_calls(x):
return call(np.sin, x) + call(np.cos, x)
def fun_with_call_closure(x):
def foo(y, z):
return (x * x) * np.sin(y) * z
return call(foo, x, np.cos(x)) + x
def product_io_fun(x, y):
xa = x['a']
xb = x['b']
y1, (y2, y3) = y
return np.sin(xa + y2), [xb, (y1, y3)]
R = onp.random.randn
CallSpec = namedtuple('CallSpec', ['fun', 'args'])
test_specs_base = [
CallSpec(simple_fun, (R(3, 2), R(3, 2))),
CallSpec(simple_fun_fanout, (R(3, 2), R(3, 2))),
CallSpec(product_io_fun, ({'a': R(2, 2), 'b': R(2, 2)},
(R(2, 2), (R(2, 2), R(2, 2))))),
CallSpec(fun_with_call, (R(3, 2),)),
CallSpec(fun_with_two_calls, (R(3, 2),)),
CallSpec(fun_with_call_closure, (R(3, 2),)),
CallSpec(fun_call_jitted, (R(1,),)),
CallSpec(fun_with_nested_calls, (R(),)),
CallSpec(fun_with_nested_calls, (R(3, 2),)),
CallSpec(fun_with_nested_calls_2, (R(1, 2),)),
]
def jvp_unlinearized(f, primals, tangents):
out, jvp = linearize(f, *primals)
return out, jvp(*tangents)
test_specs = []
for ts in test_specs_base:
test_specs.append(ts)
test_specs.append(CallSpec(partial(jvp, ts.fun), (ts.args, ts.args)))
test_specs.append(CallSpec(jit(ts.fun), ts.args))
test_specs.append(CallSpec(jit(jit(ts.fun)), ts.args))
test_specs.append(CallSpec(partial(jvp_unlinearized, ts.fun),
(ts.args, ts.args)))
def fwd_deriv(f):
def df(x):
return jvp(f, (x,), (1.0,))[1]
return df
class CoreTest(jtu.JaxTestCase):
def test_tree_multimap(self):
xs = ({'a': 1}, [2, 3])
ys = ({'a': 10}, [20, 30])
ys_bad = ({'a': 10, 'b': 10}, [20, 30])
zs = ({'a': 11}, [22, 33])
f = lambda x, y: x + y
assert tree_multimap(f, xs, ys) == zs
try:
tree_multimap(f, xs, ys_bad)
assert False
except (TypeError, ValueError):
pass
def test_tree_flatten(self):
flat, _ = tree_flatten(({'a': 1}, [2, 3], 4))
assert flat == [1, 2, 3, 4]
def test_tree_unflatten(self):
tree = [(1, 2), {"roy": (3, [4, 5, ()])}]
flat, treedef = tree_flatten(tree)
assert flat == [1, 2, 3, 4, 5]
tree2 = tree_unflatten(treedef, flat)
nodes_equal = tree_multimap(operator.eq, tree, tree2)
assert tree_reduce(operator.and_, nodes_equal)
@parameterized.parameters(test_specs)
def test_jit(self, f, args):
jtu.check_close(jit(f)(*args), f(*args))
@parameterized.parameters(test_specs)
def test_jvp(self, f, args):
jtu.check_jvp(f, partial(jvp, f), args, rtol={onp.float32: 3e-2})
def test_jvp_zeros(self):
def foo(x):
def bar(y):
return np.sin(x * y)
return jvp(bar, (3 * x,), (2 * x,))
jtu.check_eq(jit(foo)(0.5), foo(0.5))
@parameterized.parameters(test_specs)
def test_jvp_linearized(self, f, args):
jtu.check_jvp(f, partial(jvp_unlinearized, f), args,
rtol={onp.float32: 3e-2})
@parameterized.parameters(test_specs)
def test_vjp(self, f, args):
jtu.check_vjp(f, partial(vjp, f), args,
rtol={onp.float32: 3e-1, onp.float64: 1e-5},
atol={onp.float32: 1e-2, onp.float64: 1e-5})
def test_jvp_closure(self):
def foo(x):
def bar(y):
return np.multiply(x, y)
return jvp(bar, (3.0,), (1.0,))[1]
ans = jvp(foo, (1.0,), (2.0,))
assert ans == (1.0, 2.0), ans
def test_jit_closure(self):
def foo(x):
@jit
def bar(y):
return x + y
return bar(0.0)
assert jvp(foo, (1.0,), (2.0,)) == (1.0, 2.0)
def test_simple_jit(self):
def foo(x):
if x.shape == ():
return x + 1.
else:
return x + 2.
foo2 = jit(foo)
foo3 = jit(foo2)
x1, y1 = onp.array(1.0), onp.array(2.0)
assert foo(x1) == y1
assert foo2(x1) == y1
assert foo3(x1) == y1
x2, y2 = onp.array([1.0, 2.0]), onp.array([3.0, 4.0])
assert onp.all(foo(x2) == y2)
assert onp.all(foo2(x2) == y2)
assert onp.all(foo3(x2) == y2)
def test_product_jit(self):
def foo(x, tup):
y, z = tup
w = x + z
return (w, {'x': y}), z
foo2 = jit(foo)
foo3 = jit(foo2)
args = (1.0, (2.0, 3.0))
expected_output = ((4.0, {'x': 2.0}), 3.0)
assert foo(*args) == expected_output
assert foo2(*args) == expected_output
assert foo3(*args) == foo(*args)
def test_jvp_2(self):
d_sin = fwd_deriv(np.sin)
d2_sin = fwd_deriv(d_sin)
d3_sin = fwd_deriv(d2_sin)
assert d_sin(0.0) == 1.0
assert d2_sin(0.0) == 0.0
assert d3_sin(0.0) == -1.0
def test_reference_cycles(self):
gc.collect()
def f(x):
return x.sum()
fn = partial(linearize, f)
params = np.zeros([])
debug = gc.get_debug()
try:
fn(params)
gc.set_debug(gc.DEBUG_SAVEALL)
self.assertEqual(gc.collect(), 0)
finally:
gc.set_debug(debug)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/core_test.py
|
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import jax
import jax.numpy as np
from jax import api, lax
from jax import linear_util as lu
from jax import test_util as jtu
from jax import tree_util
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class UtilTest(jtu.JaxTestCase):
def test_wrapped_fun_transforms(self):
"""Test a combination of transforms."""
def f(*args, **kwargs):
"""The function to be transformed.
Scales the positional arguments by a factor.
Takes only one keyword argument, the factor to scale by."""
factor = kwargs.pop('factor', 2) # For PY2
assert not kwargs
return tuple(a * factor for a in args)
@lu.transformation_with_aux
def kw_to_positional(factor, *args, **kwargs):
"""A transformation with auxiliary output.
Turns all keyword parameters into positional ones.
On entry, append the values of the keyword arguments to the positional
arguments. On exit, take a list of results and recreate a dictionary
from the tail of the results. The auxiliary output is the list of
keyword keys.
"""
kwargs_keys = kwargs.keys()
new_args = tuple(kwargs[k] for k in kwargs_keys)
new_kwargs = dict(factor=factor)
results = yield args + new_args, new_kwargs # Yield transformed (args, kwargs)
# Assume results correspond 1:1 to the args + new_args
assert len(results) == len(args) + len(new_args)
aux_output = len(new_args)
yield (results[0:len(args)],
dict(zip(kwargs_keys, results[len(args):]))), aux_output
wf = lu.wrap_init(f) # Wraps `f` as a `WrappedFun`.
wf, out_thunk = kw_to_positional(wf, 2)
# Call the transformed function.
scaled_positional, scaled_kwargs = wf.call_wrapped(1, 2, three=3, four=4)
self.assertEqual((2, 4), scaled_positional)
self.assertEqual(dict(three=6, four=8), scaled_kwargs)
self.assertEqual(2, out_thunk())
|
jax-master
|
tests/util_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from unittest import SkipTest
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
from jax import test_util as jtu
from jax.interpreters.masking import ShapeError, shape_as_value, parse_spec
from jax import mask, vmap, jit, grad, shapecheck
from jax import lax
import jax.numpy as np
from jax.config import config
config.parse_flags_with_absl()
# These are 'manual' tests for masking and shape checking. The more exhaustive,
# more systematic tests should live in lax_test.py.
class MaskingTest(jtu.JaxTestCase):
@parameterized.parameters([
['(m, n)', 'ShapeSpec(m, n)'],
['(m * n)', 'ShapeSpec(m n)'],
['m * n', 'ShapeSpec(m n)'],
['(m * n,)', 'ShapeSpec(m n)'],
['(3, m)', 'ShapeSpec(3, m)'],
['(3 * m)', 'ShapeSpec(3 m)'],
['m', 'ShapeSpec(m)'],
['', 'ShapeSpec()'],
['m + n', 'ShapeSpec(m + n)'],
['m + n * k', 'ShapeSpec(m + k n)'],
['m + 3 * k', 'ShapeSpec(3 k + m)'],
['', 'ShapeSpec()'],
['_', 'ShapeSpec(_)'],
])
def test_shape_parsing(self, spec, ans):
self.assertEqual(str(parse_spec(spec)), ans)
def test_dot_shape_checking(self):
@shapecheck(['(m, n)', 'n'], 'm')
def matvec(A, b):
return np.dot(A, b)
def thunk():
@shapecheck(['(m, n)', 'n'], 'm')
def matvec(A, b):
return lax.dot_general(A, b, [((0,), (0,)), ((), ())])
self.assertRaisesRegex(ShapeError, "", thunk)
def test_flatten_shape_checking(self):
@shapecheck(['(m, n)'], 'm * n')
def flatten(x):
return lax.reshape(x, (x.shape[0] * x.shape[1],))
def test_concatenate_shape_checking(self):
@shapecheck(['m', 'n', 'm'], '3*m + n')
def cat(x, y, z):
return lax.concatenate([x, y, x, z], 0)
def thunk():
@shapecheck(['m', 'n', 'm'], '3*m + n')
def cat(x, y, z):
return lax.concatenate([x, y, x], 0)
self.assertRaisesRegex(ShapeError, "", thunk)
def test_sum(self):
@partial(mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
return np.sum(x)
ans = padded_sum([np.array([3, 1, 4, 1, 5])], dict(n=3))
expected = 8
self.assertAllClose(ans, expected, check_dtypes=False)
ans = padded_sum([np.array([3, 1, 4, 1, 5])], dict(n=4))
expected = 9
self.assertAllClose(ans, expected, check_dtypes=False)
def test_sum_vmap(self):
@partial(mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
return np.sum(x)
ans = vmap(padded_sum)([np.ones((5, 10))], dict(n=np.arange(5)))
expected = onp.array([0, 1, 2, 3, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def test_add(self):
@partial(mask, in_shapes=['n', 'n'], out_shape='n')
def addvecs(x, y):
return x + y
x = np.array([3, 1, 4, 1, 5, 9])
y = np.array([2, 6, 5, 3, 5, 8])
ans = addvecs([x, y], dict(n=3))
expected = onp.array([5, 7, 9])
self.assertAllClose(ans[:3], expected, check_dtypes=False)
thunk = lambda: addvecs([np.arange(5), np.arange(6)], dict(n=3))
self.assertRaisesRegex(ShapeError, "", thunk)
def test_scan(self):
@partial(mask, in_shapes=['n'], out_shape='')
def cumsum(arr):
out, _ = lax.scan(lambda c, x: (c + x, ()), 0, arr)
return out
ans = cumsum([np.array([5, 2, 9, 1, 4])], dict(n=3))
expected = 16
self.assertAllClose(ans, expected, check_dtypes=False)
def test_scan_vmap(self):
@partial(mask, in_shapes=['n'], out_shape='')
def cumsum(arr):
out, _ = lax.scan(lambda c, x: (c + x, ()), 0, arr)
return out
ans = vmap(cumsum)([np.arange(6).reshape(2, 3)], dict(n=np.array([1, 2])))
expected = onp.array([0, 7])
self.assertAllClose(ans, expected, check_dtypes=False)
def test_scan_jit(self):
@partial(mask, in_shapes=['n'], out_shape='')
def cumsum(arr):
out, _ = lax.scan(lambda c, x: (c + x, ()), 0, arr)
return out
@jit
def jit_cumsum(args, shape_env):
assert python_should_be_executing
return cumsum(args, shape_env)
python_should_be_executing = True
ans = jit_cumsum([np.array([5, 2, 9, 1, 4])], dict(n=3))
expected = 16
self.assertAllClose(ans, expected, check_dtypes=False)
python_should_be_executing = False
ans = jit_cumsum([np.array([5, 2, 9, 1, 4])], dict(n=4))
expected = 17
self.assertAllClose(ans, expected, check_dtypes=False)
python_should_be_executing = False
ans = jit_cumsum([np.array([5, 2, 9, 1, 4])], dict(n=1))
expected = 5
self.assertAllClose(ans, expected, check_dtypes=False)
def test_concatenate(self):
@partial(mask, in_shapes=['n', 'm', 'n'], out_shape='m + 2 * n')
def cat(x, y, z):
return lax.concatenate([x, y, z], 0)
ans = cat([np.array([1, 9]), np.array([2, 4, 9]), np.array([3, 9])],
dict(n=1, m=2))
expected = onp.array([1, 2, 4, 3])
self.assertAllClose(ans[:4], expected, check_dtypes=False)
def test_dot(self):
@partial(mask, in_shapes=['(m, k)', '(k, n)'], out_shape='(m, n)')
def dot(x, y):
return lax.dot(x, y)
x = onp.arange(6, dtype=onp.float32).reshape((2, 3))
y = onp.arange(12, dtype=onp.float32).reshape((3, 4))
ans = dot([x, y], dict(m=2, k=2, n=2))
expected = onp.dot(x[:2, :2], y[:2, :2])
self.assertAllClose(ans[:2, :2], expected, check_dtypes=False)
def test_mean(self):
@partial(mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
return np.sum(x) / shape_as_value(x.shape)[0]
ans = padded_sum([np.array([3, 1, 4, 1, 5])], dict(n=3))
expected = 8 / 3
self.assertAllClose(ans, expected, check_dtypes=False)
def test_monomorphic(self):
@partial(mask, in_shapes=['(_, n)'], out_shape='')
def padded_sum(x):
return np.sum(x)
ans = padded_sum([np.array([[3, 4], [5, 6]])], dict(n=1))
expected = 8
self.assertAllClose(ans, expected, check_dtypes=False)
def test_monomorphic2(self):
@partial(mask, in_shapes=['(_, n)'], out_shape='n')
def padded_sum(x):
return np.sum(x, axis=0)
ans = padded_sum([np.array([[3, 4], [5, 6]])], dict(n=2))
expected = np.array([8, 10])
self.assertAllClose(ans, expected, check_dtypes=False)
def test_monomorphic3(self):
@partial(mask, in_shapes=['(_, n)'], out_shape='_')
def padded_sum(x):
return np.sum(x, axis=1)
ans = padded_sum([np.array([[3, 4], [5, 6]])], dict(n=1))
expected = np.array([3, 5])
self.assertAllClose(ans, expected, check_dtypes=False)
def test_rnn(self):
n = 3
@partial(mask, in_shapes=['(_, _)', '(t, _)'], out_shape='_')
def rnn(W, xs):
def step(h, x):
new_h = np.dot(W, h) + np.dot(W, x)
return new_h, ()
predicted, _ = lax.scan(step, np.zeros(n), xs)
return predicted
rng = onp.random.RandomState(0)
W = np.eye(n)
xs = rng.randn(10, n).astype(np.float_)
ans = rnn([W, xs], dict(t=4))
expected = xs[:4].sum(0)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_rnn_grad(self):
n = 3
@partial(mask, in_shapes=['(_, _)', '(t, _)', '_'], out_shape='')
def rnn(W, xs, target):
def step(h, x):
new_h = np.tanh(np.dot(W, h) + np.dot(W, x))
return new_h, ()
predicted, _ = lax.scan(step, np.zeros(n), xs)
return np.sum((predicted - target)**2)
rng = onp.random.RandomState(0)
W = rng.randn(n, n).astype(np.float_)
xs = rng.randn(10, n).astype(np.float_)
y = rng.randn(n).astype(np.float_)
ans = grad(lambda W: rnn([W, xs, y], dict(t=4)))(W)
def rnn_reference(W, xs, target):
h = np.zeros(n)
for x in xs:
h = np.tanh(np.dot(W, h) + np.dot(W, x))
predicted = h
return np.sum((predicted - target)**2)
expected = grad(lambda W: rnn_reference(W, xs[:4], y))(W)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_ragged_batched_rnn(self):
n = 3
@partial(mask, in_shapes=('(_, _)', '(t, _)', '_'), out_shape='')
def rnn(W, xs, target):
def step(h, x):
new_h = np.tanh(np.dot(W, h) + np.dot(W, x))
return new_h, ()
predicted, _ = lax.scan(step, np.zeros(n), xs)
return np.sum((predicted - target)**2)
rng = onp.random.RandomState(0)
W = rng.randn(n, n).astype(np.float_)
seqs = rng.randn(3, 10, n).astype(np.float_)
ts = np.array([2, 5, 4])
ys = rng.randn(3, n)
ans = grad(lambda W: vmap(rnn, ((None, 0, 0), 0))((W, seqs, ys), dict(t=ts)).sum())(W)
def rnn_reference(W, seqs, targets):
total_loss = 0
for xs, target in zip(seqs, targets):
h = np.zeros(n)
for x in xs:
h = np.tanh(np.dot(W, h) + np.dot(W, x))
predicted = h
total_loss = total_loss + np.sum((predicted - target)**2)
return total_loss
seqs_ = [xs[:t] for xs, t in zip(seqs, ts)]
expected = grad(lambda W: rnn_reference(W, seqs_, ys).sum())(W)
self.assertAllClose(
ans, expected, check_dtypes=False,
rtol=2e-2 if jtu.device_under_test() == "tpu" else 1e-5)
def test_nesting(self):
raise SkipTest("not yet implemented")
@partial(mask, in_shapes=['n'], out_shape='')
def padded_sum(x):
return np.sum(x)
batched_sum = vmap(padded_sum)
@partial(mask, in_shapes=['(m, _)', 'm'], out_shape='')
def fun(x, ns):
return batched_sum([x], dict(n=ns)).sum()
x = np.array([[3, 1, 4, 1],
[5, 9, 2, 6],
[5, 3, 5, 8]])
ns = np.array([2, 3, 2])
ans = fun([x, ns], dict(m=2))
expected = 3+1 + 5+9+2
self.assertAllClose(ans, expected, check_dtypes=False)
def test_arange(self):
raise SkipTest("not yet implemented")
@partial(mask, in_shapes=['n'], out_shape='n')
def padded_add(x):
return x + lax.iota(x.shape[0])
ans = padded_add([np.array([3, 1, 4, 1, 5])], dict(n=3))
expected = onp.array([3, 2, 6])
self.assertAllClose(ans[:3], expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/masking_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
import unittest
from absl.testing import absltest, parameterized
import numpy as onp
import scipy.stats as osp_stats
from scipy.stats import random_correlation
from jax import test_util as jtu
from jax.scipy import stats as lsp_stats
from jax.scipy.special import expit
from jax.config import config
config.parse_flags_with_absl()
all_shapes = [(), (4,), (3, 4), (3, 1), (1, 4), (2, 1, 4)]
float_dtypes = [onp.float32, onp.float64]
CombosWithReplacement = itertools.combinations_with_replacement
def genNamedParametersNArgs(n, rng_factory):
return parameterized.named_parameters(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix("", shapes, dtypes),
"rng_factory": rng_factory, "shapes": shapes, "dtypes": dtypes}
for shapes in CombosWithReplacement(all_shapes, n)
for dtypes in CombosWithReplacement(float_dtypes, n)))
class LaxBackedScipyStatsTests(jtu.JaxTestCase):
"""Tests for LAX-backed scipy.stats implementations"""
@genNamedParametersNArgs(3, jtu.rand_default)
def testPoissonLogPmf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.poisson.logpmf
lax_fun = lsp_stats.poisson.logpmf
def args_maker():
k, mu, loc = map(rng, shapes, dtypes)
k = onp.floor(k)
# clipping to ensure that rate parameter is strictly positive
mu = onp.clip(onp.abs(mu), a_min=0.1, a_max=None)
loc = onp.floor(loc)
return [k, mu, loc]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testPoissonPmf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.poisson.pmf
lax_fun = lsp_stats.poisson.pmf
def args_maker():
k, mu, loc = map(rng, shapes, dtypes)
k = onp.floor(k)
# clipping to ensure that rate parameter is strictly positive
mu = onp.clip(onp.abs(mu), a_min=0.1, a_max=None)
loc = onp.floor(loc)
return [k, mu, loc]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testBernoulliLogPmf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.bernoulli.logpmf
lax_fun = lsp_stats.bernoulli.logpmf
def args_maker():
x, logit, loc = map(rng, shapes, dtypes)
x = onp.floor(x)
p = expit(logit)
loc = onp.floor(loc)
return [x, p, loc]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(5, jtu.rand_positive)
def testBetaLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.beta.logpdf
lax_fun = lsp_stats.beta.logpdf
def args_maker():
x, a, b, loc, scale = map(rng, shapes, dtypes)
return [x, a, b, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True, rtol=1e-4)
@genNamedParametersNArgs(3, jtu.rand_default)
def testCauchyLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.cauchy.logpdf
lax_fun = lsp_stats.cauchy.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(2, jtu.rand_positive)
def testDirichletLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.cauchy.logpdf
lax_fun = lsp_stats.cauchy.logpdf
dim = 4
shapes = (shapes[0] + (dim,), shapes[1] + (dim,))
def args_maker():
x, alpha = map(rng, shapes, dtypes)
x = x / onp.sum(x, axis=-1, keepdims=True)
return [x, alpha]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_positive)
def testExponLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.expon.logpdf
lax_fun = lsp_stats.expon.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(4, jtu.rand_positive)
def testGammaLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.gamma.logpdf
lax_fun = lsp_stats.gamma.logpdf
def args_maker():
x, a, loc, scale = map(rng, shapes, dtypes)
return [x, a, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=5e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_positive)
def testLaplaceLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.laplace.logpdf
lax_fun = lsp_stats.laplace.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(scale, a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testLaplaceCdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.laplace.cdf
lax_fun = lsp_stats.laplace.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# ensure that scale is not too low
scale = onp.clip(scale, a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
# TODO: currently it ignores the argument "shapes" and only tests dim=4
@genNamedParametersNArgs(3, jtu.rand_default)
def testMultivariateNormalLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.multivariate_normal.logpdf
lax_fun = lsp_stats.multivariate_normal.logpdf
dim = 4
shapex = (dim,)
def args_maker():
x, mean, cov = map(rng, (shapex, shapex, (dim, dim)), dtypes)
cov = random_correlation.rvs(onp.arange(1, 1+dim) * 2 / (dim + 1))
return [x, mean, cov]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testNormLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.norm.logpdf
lax_fun = lsp_stats.norm.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testNormLogCdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.norm.logcdf
lax_fun = lsp_stats.norm.logcdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testNormCdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.norm.cdf
lax_fun = lsp_stats.norm.cdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [x, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-6)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testNormPpf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.norm.ppf
lax_fun = lsp_stats.norm.ppf
def args_maker():
q, loc, scale = map(rng, shapes, dtypes)
# ensure probability is between 0 and 1:
q = onp.clip(onp.abs(q / 3), a_min=None, a_max=1)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [q, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=True,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True, rtol=1e-5)
@genNamedParametersNArgs(4, jtu.rand_positive)
def testParetoLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.pareto.logpdf
lax_fun = lsp_stats.pareto.logpdf
def args_maker():
x, b, loc, scale = map(rng, shapes, dtypes)
return [x, b, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(4, jtu.rand_default)
def testTLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.t.logpdf
lax_fun = lsp_stats.t.logpdf
def args_maker():
x, df, loc, scale = map(rng, shapes, dtypes)
# clipping to ensure that scale is not too low
scale = onp.clip(onp.abs(scale), a_min=0.1, a_max=None)
return [x, df, loc, scale]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-3)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
@genNamedParametersNArgs(3, jtu.rand_default)
def testUniformLogPdf(self, rng_factory, shapes, dtypes):
rng = rng_factory()
scipy_fun = osp_stats.uniform.logpdf
lax_fun = lsp_stats.uniform.logpdf
def args_maker():
x, loc, scale = map(rng, shapes, dtypes)
return [x, loc, onp.abs(scale)]
self._CheckAgainstNumpy(scipy_fun, lax_fun, args_maker, check_dtypes=False,
tol=1e-4)
self._CompileAndCheck(lax_fun, args_maker, check_dtypes=True)
def testIssue972(self):
self.assertAllClose(
onp.ones((4,), onp.float32),
lsp_stats.norm.cdf(onp.full((4,), onp.inf, onp.float32)),
check_dtypes=False)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/scipy_stats_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for nn module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from jax import test_util as jtu
from jax.test_util import check_grads
from jax import nn
from jax import random
import jax
import jax.numpy as np
from jax.config import config
config.parse_flags_with_absl()
class NNFunctionsTest(jtu.JaxTestCase):
def testSoftplusGrad(self):
check_grads(nn.softplus, (1e-8,), 4,
rtol=1e-2 if jtu.device_under_test() == "tpu" else None)
def testSoftplusValue(self):
val = nn.softplus(89.)
self.assertAllClose(val, 89., check_dtypes=False)
def testEluGrad(self):
check_grads(nn.elu, (1e4,), 4, eps=1.)
def testEluValue(self):
val = nn.elu(1e4)
self.assertAllClose(val, 1e4, check_dtypes=False)
@jtu.skip_on_devices("gpu", "tpu")
def testEluMemory(self):
# see https://github.com/google/jax/pull/1640
jax.make_jaxpr(nn.elu)(np.ones((10 ** 12,))) # don't oom
@jtu.skip_on_devices("gpu", "tpu")
def testHardTanhMemory(self):
# see https://github.com/google/jax/pull/1640
jax.make_jaxpr(nn.hard_tanh)(np.ones((10 ** 12,))) # don't oom
InitializerRecord = collections.namedtuple(
"InitializerRecord",
["name", "initializer", "shapes"])
ALL_SHAPES = [(2,), (2, 2), (2, 3), (3, 2), (2, 3, 4), (4, 3, 2), (2, 3, 4, 5)]
def initializer_record(name, initializer, min_dims=2, max_dims=4):
shapes = [shape for shape in ALL_SHAPES
if min_dims <= len(shape) <= max_dims]
return InitializerRecord(name, initializer, shapes)
INITIALIZER_RECS = [
initializer_record("uniform", nn.initializers.uniform(), 1),
initializer_record("normal", nn.initializers.normal(), 1),
initializer_record("he_normal", nn.initializers.he_normal()),
initializer_record("he_uniform", nn.initializers.he_uniform()),
initializer_record("glorot_normal", nn.initializers.glorot_normal()),
initializer_record("glorot_uniform", nn.initializers.glorot_uniform()),
initializer_record("lecun_normal", nn.initializers.lecun_normal()),
initializer_record("lecun_uniform", nn.initializers.lecun_uniform()),
initializer_record("orthogonal", nn.initializers.orthogonal(), 2, 2),
initializer_record("orthogonal", nn.initializers.delta_orthogonal(), 4, 4)
]
class NNInitializersTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_{}_{}".format(
rec.name,
jtu.format_shape_dtype_string(shape, dtype)),
"initializer": rec.initializer,
"shape": shape, "dtype": dtype}
for rec in INITIALIZER_RECS
for shape in rec.shapes
for dtype in [onp.float32, onp.float64]))
def testInitializer(self, initializer, shape, dtype):
rng = random.PRNGKey(0)
val = initializer(rng, shape, dtype)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/nn_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the LAPAX linear algebra module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import itertools
import unittest
import numpy as onp
import scipy as osp
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.lib
from jax import jit, grad, jvp, vmap
from jax import lax
from jax import lax_linalg
from jax import numpy as np
from jax import scipy as jsp
from jax import test_util as jtu
from jax.lib import xla_bridge
from jax.lib import lapack
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
T = lambda x: onp.swapaxes(x, -1, -2)
float_types = [onp.float32, onp.float64]
complex_types = [onp.complex64, onp.complex128]
def _skip_if_unsupported_type(dtype):
dtype = onp.dtype(dtype)
if (not FLAGS.jax_enable_x64 and
dtype in (onp.dtype('float64'), onp.dtype('complex128'))):
raise unittest.SkipTest("--jax_enable_x64 is not set")
class NumpyLinalgTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (1000, 0, 0)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testCholesky(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
def args_maker():
factor_shape = shape[:-1] + (2 * shape[-1],)
a = rng(factor_shape, dtype)
return [onp.matmul(a, np.conj(T(a)))]
if np.issubdtype(dtype, np.complexfloating) and (
len(shape) > 2 or jtu.device_under_test() != "cpu"):
self.skipTest("Unimplemented case for complex Cholesky decomposition.")
self._CheckAgainstNumpy(onp.linalg.cholesky, np.linalg.cholesky, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.cholesky, args_maker, check_dtypes=True)
if np.finfo(dtype).bits == 64:
jtu.check_grads(np.linalg.cholesky, args_maker(), order=2)
def testCholeskyGradPrecision(self):
rng = jtu.rand_default()
a = rng((3, 3), onp.float32)
a = onp.dot(a, a.T)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, np.linalg.cholesky), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype, "rng_factory": rng_factory}
for n in [0, 4, 5, 25] # TODO(mattjj): complex64 unstable on large sizes?
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testDet(self, n, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng((n, n), dtype)]
self._CheckAgainstNumpy(onp.linalg.det, np.linalg.det, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.det, args_maker, check_dtypes=True,
rtol={onp.float64: 1e-13})
def testDetOfSingularMatrix(self):
x = np.array([[-1., 3./2], [2./3, -1.]], dtype=onp.float32)
self.assertAllClose(onp.float32(0), jsp.linalg.det(x), check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(0, 0), (1, 1), (3, 3), (4, 4), (10, 10), (200, 200),
(2, 2, 2), (2, 3, 3), (3, 2, 2)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu")
def testSlogdet(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.slogdet, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5), (2, 7, 7)]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu")
def testSlogdetGrad(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
a = rng(shape, dtype)
jtu.check_grads(np.linalg.slogdet, (a,), 2, atol=1e-1, rtol=1e-1)
def testIssue1213(self):
for n in range(5):
mat = np.array([onp.diag(onp.ones([5], dtype=onp.float32))*(-.01)] * 2)
args_maker = lambda: [mat]
self._CheckAgainstNumpy(onp.linalg.slogdet, np.linalg.slogdet, args_maker,
check_dtypes=True, tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(0, 0), (4, 4), (5, 5), (50, 50), (2, 6, 6)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
# TODO(phawkins): enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEig(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * np.finfo(dtype).eps)
a, = args_maker()
w, v = np.linalg.eig(a)
self.assertTrue(onp.all(norm(onp.matmul(a, v) - w[..., None, :] * v) < 100))
self._CompileAndCheck(partial(np.linalg.eig), args_maker,
check_dtypes=True, rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
# TODO: enable when there is an eigendecomposition implementation
# for GPU/TPU.
@jtu.skip_on_devices("gpu", "tpu")
def testEigvals(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
n = shape[-1]
args_maker = lambda: [rng(shape, dtype)]
a, = args_maker()
w1, _ = np.linalg.eig(a)
w2 = np.linalg.eigvals(a)
self.assertAllClose(w1, w2, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu", "tpu")
def testEigBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
shape = (10,) + shape
args = rng(shape, dtype)
ws, vs = vmap(np.linalg.eig)(args)
self.assertTrue(onp.all(onp.linalg.norm(
onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_lower={}".format(
jtu.format_shape_dtype_string((n,n), dtype), lower),
"n": n, "dtype": dtype, "lower": lower, "rng_factory": rng_factory}
for n in [0, 4, 5, 50]
for dtype in float_types + complex_types
for lower in [False, True]
for rng_factory in [jtu.rand_default]))
def testEigh(self, n, dtype, lower, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
tol = 30
if jtu.device_under_test() == "tpu":
if np.issubdtype(dtype, onp.complexfloating):
raise unittest.SkipTest("No complex eigh on TPU")
# TODO(phawkins): this tolerance is unpleasantly high.
tol = 1500
args_maker = lambda: [rng((n, n), dtype)]
uplo = "L" if lower else "U"
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / ((n + 1) * np.finfo(dtype).eps)
a, = args_maker()
a = (a + onp.conj(a.T)) / 2
w, v = np.linalg.eigh(onp.tril(a) if lower else onp.triu(a),
UPLO=uplo, symmetrize_input=False)
self.assertTrue(norm(onp.eye(n) - onp.matmul(onp.conj(T(v)), v)) < 5)
self.assertTrue(norm(onp.matmul(a, v) - w * v) < tol)
self._CompileAndCheck(partial(np.linalg.eigh, UPLO=uplo), args_maker,
check_dtypes=True, rtol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 4), (5, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testEigvalsh(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if jtu.device_under_test() == "tpu":
if np.issubdtype(dtype, np.complexfloating):
raise unittest.SkipTest("No complex eigh on TPU")
n = shape[-1]
def args_maker():
a = rng((n, n), dtype)
a = (a + onp.conj(a.T)) / 2
return [a]
self._CheckAgainstNumpy(onp.linalg.eigvalsh, np.linalg.eigvalsh, args_maker,
check_dtypes=True, tol=1e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "lower":lower}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50), (2, 10, 10)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]
for lower in [True, False]))
def testEighGrad(self, shape, dtype, rng_factory, lower):
rng = rng_factory()
self.skipTest("Test fails with numeric errors.")
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + onp.conj(T(a))) / 2
ones = onp.ones((a.shape[-1], a.shape[-1]), dtype=dtype)
a *= onp.tril(ones) if lower else onp.triu(ones)
# Gradient checks will fail without symmetrization as the eigh jvp rule
# is only correct for tangents in the symmetric subspace, whereas the
# checker checks against unconstrained (co)tangents.
if dtype not in complex_types:
f = partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)
else: # only check eigenvalue grads for complex matrices
f = lambda a: partial(np.linalg.eigh, UPLO=uplo, symmetrize_input=True)(a)[0]
jtu.check_grads(f, (a,), 2, rtol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_lower={}".format(jtu.format_shape_dtype_string(shape, dtype),
lower),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "lower":lower, "eps":eps}
for shape in [(1, 1), (4, 4), (5, 5), (50, 50)]
for dtype in complex_types
for rng_factory in [jtu.rand_default]
for lower in [True, False]
for eps in [1e-4]))
# TODO(phawkins): enable when there is a complex eigendecomposition
# implementation for TPU.
@jtu.skip_on_devices("tpu")
def testEighGradVectorComplex(self, shape, dtype, rng_factory, lower, eps):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
# Special case to test for complex eigenvector grad correctness.
# Exact eigenvector coordinate gradients are hard to test numerically for complex
# eigensystem solvers given the extra degrees of per-eigenvector phase freedom.
# Instead, we numerically verify the eigensystem properties on the perturbed
# eigenvectors. You only ever want to optimize eigenvector directions, not coordinates!
uplo = "L" if lower else "U"
a = rng(shape, dtype)
a = (a + onp.conj(a.T)) / 2
a = onp.tril(a) if lower else onp.triu(a)
a_dot = eps * rng(shape, dtype)
a_dot = (a_dot + onp.conj(a_dot.T)) / 2
a_dot = onp.tril(a_dot) if lower else onp.triu(a_dot)
# evaluate eigenvector gradient and groundtruth eigensystem for perturbed input matrix
f = partial(np.linalg.eigh, UPLO=uplo)
(w, v), (dw, dv) = jvp(f, primals=(a,), tangents=(a_dot,))
new_a = a + a_dot
new_w, new_v = f(new_a)
new_a = (new_a + onp.conj(new_a.T)) / 2
# Assert rtol eigenvalue delta between perturbed eigenvectors vs new true eigenvalues.
RTOL=1e-2
assert onp.max(
onp.abs((onp.diag(onp.dot(onp.conj((v+dv).T), onp.dot(new_a,(v+dv)))) - new_w) / new_w)) < RTOL
# Redundant to above, but also assert rtol for eigenvector property with new true eigenvalues.
assert onp.max(
onp.linalg.norm(onp.abs(new_w*(v+dv) - onp.dot(new_a, (v+dv))), axis=0) /
onp.linalg.norm(onp.abs(new_w*(v+dv)), axis=0)
) < RTOL
def testEighGradPrecision(self):
rng = jtu.rand_default()
a = rng((3, 3), onp.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST, partial(jvp, np.linalg.eigh), (a,), (a,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (5, 5)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testEighBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (jtu.device_under_test() == "tpu" and
np.issubdtype(dtype, onp.complexfloating)):
raise unittest.SkipTest("No complex eigh on TPU")
shape = (10,) + shape
args = rng(shape, dtype)
args = (args + onp.conj(T(args))) / 2
ws, vs = vmap(jsp.linalg.eigh)(args)
self.assertTrue(onp.all(onp.linalg.norm(
onp.matmul(args, vs) - ws[..., None, :] * vs) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_ord={}_axis={}_keepdims={}".format(
jtu.format_shape_dtype_string(shape, dtype), ord, axis, keepdims),
"shape": shape, "dtype": dtype, "axis": axis, "keepdims": keepdims,
"ord": ord, "rng_factory": rng_factory}
for axis, shape in [
(None, (1,)), (None, (7,)), (None, (5, 8)),
(0, (9,)), (0, (4, 5)), ((1,), (10, 7, 3)), ((-2,), (4, 8)),
(-1, (6, 3)), ((0, 2), (3, 4, 5)), ((2, 0), (7, 8, 9)),
(None, (7, 8, 11))]
for keepdims in [False, True]
for ord in (
[None] if axis is None and len(shape) > 2
else [None, 0, 1, 2, 3, -1, -2, -3, np.inf, -np.inf]
if (axis is None and len(shape) == 1) or
isinstance(axis, int) or
(isinstance(axis, tuple) and len(axis) == 1)
else [None, 'fro', 1, 2, -1, -2, np.inf, -np.inf, 'nuc'])
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testNorm(self, shape, dtype, ord, axis, keepdims, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (ord in ('nuc', 2, -2) and (
jtu.device_under_test() != "cpu" or
(isinstance(axis, tuple) and len(axis) == 2))):
raise unittest.SkipTest("No adequate SVD implementation available")
args_maker = lambda: [rng(shape, dtype)]
onp_fn = partial(onp.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
np_fn = partial(np.linalg.norm, ord=ord, axis=axis, keepdims=keepdims)
self._CheckAgainstNumpy(onp_fn, np_fn, args_maker,
check_dtypes=False, tol=1e-3)
self._CompileAndCheck(np_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_n={}_full_matrices={}_compute_uv={}".format(
jtu.format_shape_dtype_string(b + (m, n), dtype), full_matrices,
compute_uv),
"b": b, "m": m, "n": n, "dtype": dtype, "full_matrices": full_matrices,
"compute_uv": compute_uv, "rng_factory": rng_factory}
for b in [(), (3,), (2, 3)]
for m in [2, 7, 29, 53]
for n in [2, 7, 29, 53]
for dtype in float_types + complex_types
for full_matrices in [False, True]
for compute_uv in [False, True]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("gpu", "tpu") # TODO(b/145608614): SVD crashes on GPU.
def testSVD(self, b, m, n, dtype, full_matrices, compute_uv, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(b + (m, n), dtype)]
# Norm, adjusted for dimension and type.
def norm(x):
norm = onp.linalg.norm(x, axis=(-2, -1))
return norm / (max(m, n) * np.finfo(dtype).eps)
a, = args_maker()
out = np.linalg.svd(a, full_matrices=full_matrices, compute_uv=compute_uv)
if compute_uv:
# Check the reconstructed matrices
if full_matrices:
k = min(m, n)
if m < n:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0], out[2][..., :k, :])) < 50))
else:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0][..., :, :k], out[2])) < 350))
else:
self.assertTrue(onp.all(
norm(a - onp.matmul(out[1][..., None, :] * out[0], out[2])) < 300))
# Check the unitary properties of the singular vector matrices.
self.assertTrue(onp.all(norm(onp.eye(out[0].shape[-1]) - onp.matmul(onp.conj(T(out[0])), out[0])) < 10))
if m >= n:
self.assertTrue(onp.all(norm(onp.eye(out[2].shape[-1]) - onp.matmul(onp.conj(T(out[2])), out[2])) < 10))
else:
self.assertTrue(onp.all(norm(onp.eye(out[2].shape[-2]) - onp.matmul(out[2], onp.conj(T(out[2])))) < 20))
else:
self.assertTrue(onp.allclose(onp.linalg.svd(a, compute_uv=False), onp.asarray(out), atol=1e-4, rtol=1e-4))
self._CompileAndCheck(partial(np.linalg.svd, full_matrices=full_matrices, compute_uv=compute_uv),
args_maker, check_dtypes=True)
if not full_matrices:
svd = partial(np.linalg.svd, full_matrices=False)
jtu.check_jvp(svd, partial(jvp, svd), (a,), rtol=1e-2, atol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_fullmatrices={}".format(
jtu.format_shape_dtype_string(shape, dtype), full_matrices),
"shape": shape, "dtype": dtype, "full_matrices": full_matrices,
"rng_factory": rng_factory}
for shape in [(1, 1), (3, 3), (3, 4), (2, 10, 5), (2, 200, 100)]
for dtype in float_types + complex_types
for full_matrices in [False, True]
for rng_factory in [jtu.rand_default]))
def testQr(self, shape, dtype, full_matrices, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (np.issubdtype(dtype, onp.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest("No complex QR implementation")
m, n = shape[-2:]
if full_matrices:
mode, k = "complete", m
else:
mode, k = "reduced", min(m, n)
a = rng(shape, dtype)
lq, lr = np.linalg.qr(a, mode=mode)
# onp.linalg.qr doesn't support batch dimensions. But it seems like an
# inevitable extension so we support it in our version.
nq = onp.zeros(shape[:-2] + (m, k), dtype)
nr = onp.zeros(shape[:-2] + (k, n), dtype)
for index in onp.ndindex(*shape[:-2]):
nq[index], nr[index] = onp.linalg.qr(a[index], mode=mode)
max_rank = max(m, n)
# Norm, adjusted for dimension and type.
def norm(x):
n = onp.linalg.norm(x, axis=(-2, -1))
return n / (max_rank * np.finfo(dtype).eps)
def compare_orthogonal(q1, q2):
# Q is unique up to sign, so normalize the sign first.
sum_of_ratios = onp.sum(onp.divide(q1, q2), axis=-2, keepdims=True)
phases = onp.divide(sum_of_ratios, onp.abs(sum_of_ratios))
q1 *= phases
self.assertTrue(onp.all(norm(q1 - q2) < 30))
# Check a ~= qr
self.assertTrue(onp.all(norm(a - onp.matmul(lq, lr)) < 30))
# Compare the first 'k' vectors of Q; the remainder form an arbitrary
# orthonormal basis for the null space.
compare_orthogonal(nq[..., :k], lq[..., :k])
# Check that q is close to unitary.
self.assertTrue(onp.all(
norm(onp.eye(k) -onp.matmul(onp.conj(T(lq)), lq)) < 5))
if not full_matrices and m >= n:
jtu.check_jvp(np.linalg.qr, partial(jvp, np.linalg.qr), (a,), atol=3e-3)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype,
"rng_factory": rng_factory}
for shape in [(10, 4, 5), (5, 3, 3), (7, 6, 4)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testQrBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
args = rng(shape, np.float32)
qs, rs = vmap(jsp.linalg.qr)(args)
self.assertTrue(onp.all(onp.linalg.norm(args - onp.matmul(qs, rs)) < 1e-3))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
((1, 2, 2), (3, 2)),
((2, 1, 3, 3), (2, 4, 3, 4)),
]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testSolve(self, lhs_shape, rhs_shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.solve, np.linalg.solve, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.solve, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 5, 5), (200, 200), (5, 5, 5)]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
def testInv(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if jtu.device_under_test() == "gpu" and shape == (200, 200):
raise unittest.SkipTest("Test is flaky on GPU")
def args_maker():
invertible = False
while not invertible:
a = rng(shape, dtype)
try:
onp.linalg.inv(a)
invertible = True
except onp.linalg.LinAlgError:
pass
return [a]
self._CheckAgainstNumpy(onp.linalg.inv, np.linalg.inv, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.inv, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 4), (2, 70, 7), (2000, 7), (7, 10000), (70, 7, 2)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # SVD is not implemented on the TPU backend
def testPinv(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
self._CheckAgainstNumpy(onp.linalg.pinv, np.linalg.pinv, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(np.linalg.pinv, args_maker, check_dtypes=True)
# Regression test for incorrect type for eigenvalues of a complex matrix.
@jtu.skip_on_devices("tpu") # TODO(phawkins): No complex eigh implementation on TPU.
def testIssue669(self):
def test(x):
val, vec = np.linalg.eigh(x)
return np.real(np.sum(val))
grad_test_jc = jit(grad(jit(test)))
xc = onp.eye(3, dtype=onp.complex)
self.assertAllClose(xc, grad_test_jc(xc), check_dtypes=True)
def testIssue1151(self):
A = np.array(onp.random.randn(100, 3, 3), dtype=np.float32)
b = np.array(onp.random.randn(100, 3), dtype=np.float32)
x = np.linalg.solve(A, b)
self.assertAllClose(vmap(np.dot)(A, x), b, atol=1e-3, rtol=1e-2,
check_dtypes=True)
jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A, b)
jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A, b)
jac0 = jax.jacobian(np.linalg.solve, argnums=0)(A[0], b[0])
jac1 = jax.jacobian(np.linalg.solve, argnums=1)(A[0], b[0])
def testIssue1383(self):
seed = jax.random.PRNGKey(0)
tmp = jax.random.uniform(seed, (2,2))
a = np.dot(tmp, tmp.T)
def f(inp):
val, vec = np.linalg.eigh(inp)
return np.dot(np.dot(vec, inp), vec.T)
grad_func = jax.jacfwd(f)
hess_func = jax.jacfwd(grad_func)
cube_func = jax.jacfwd(hess_func)
self.assertFalse(onp.any(onp.isnan(cube_func(a))))
class ScipyLinalgTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 5), (10, 5), (50, 50)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLu(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng(shape, dtype)]
x, = args_maker()
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, onp.matmul(p, onp.matmul(l, u)), check_dtypes=True,
rtol={onp.float32: 1e-4, onp.float64:1e-12,
onp.complex64: 1e-4, onp.complex128:1e-12})
self._CompileAndCheck(jsp.linalg.lu, args_maker, check_dtypes=True)
def testLuOfSingularMatrix(self):
x = np.array([[-1., 3./2], [2./3, -1.]], dtype=onp.float32)
p, l, u = jsp.linalg.lu(x)
self.assertAllClose(x, onp.matmul(p, onp.matmul(l, u)), check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(1, 1), (4, 5), (10, 5), (10, 10), (6, 7, 7)]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(phawkins): precision problems on TPU.
def testLuGrad(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
a = rng(shape, dtype)
lu = vmap(jsp.linalg.lu) if len(shape) > 2 else jsp.linalg.lu
jtu.check_grads(lu, (a,), 2, atol=5e-2, rtol=1e-1)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}".format(jtu.format_shape_dtype_string(shape, dtype)),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory}
for shape in [(4, 5), (6, 5)]
for dtype in [np.float32]
for rng_factory in [jtu.rand_default]))
def testLuBatching(self, shape, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args = [rng(shape, np.float32) for _ in range(10)]
expected = list(osp.linalg.lu(x) for x in args)
ps = onp.stack([out[0] for out in expected])
ls = onp.stack([out[1] for out in expected])
us = onp.stack([out[2] for out in expected])
actual_ps, actual_ls, actual_us = vmap(jsp.linalg.lu)(np.stack(args))
self.assertAllClose(ps, actual_ps, check_dtypes=True)
self.assertAllClose(ls, actual_ls, check_dtypes=True)
self.assertAllClose(us, actual_us, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_n={}".format(jtu.format_shape_dtype_string((n,n), dtype)),
"n": n, "dtype": dtype, "rng_factory": rng_factory}
for n in [1, 4, 5, 200]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLuFactor(self, n, dtype, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
args_maker = lambda: [rng((n, n), dtype)]
x, = args_maker()
lu, piv = jsp.linalg.lu_factor(x)
l = onp.tril(lu, -1) + onp.eye(n, dtype=dtype)
u = onp.triu(lu)
for i in range(n):
x[[i, piv[i]],] = x[[piv[i], i],]
self.assertAllClose(x, onp.matmul(l, u), check_dtypes=True, rtol=1e-3,
atol=1e-3)
self._CompileAndCheck(jsp.linalg.lu_factor, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_trans={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
trans),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"trans": trans, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4, 2)),
]
for trans in [0, 1, 2]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testLuSolve(self, lhs_shape, rhs_shape, dtype, trans, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
osp_fun = lambda lu, piv, rhs: osp.linalg.lu_solve((lu, piv), rhs, trans=trans)
jsp_fun = lambda lu, piv, rhs: jsp.linalg.lu_solve((lu, piv), rhs, trans=trans)
def args_maker():
a = rng(lhs_shape, dtype)
lu, piv = osp.linalg.lu_factor(a)
return [lu, piv, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_sym_pos={}_lower={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
sym_pos, lower),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"sym_pos": sym_pos, "lower": lower, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((1, 1), (1, 1)),
((4, 4), (4,)),
((8, 8), (8, 4)),
]
for sym_pos, lower in [
(False, False),
(True, False),
(True, True),
]
for dtype in float_types + complex_types
for rng_factory in [jtu.rand_default]))
def testSolve(self, lhs_shape, rhs_shape, dtype, sym_pos, lower, rng_factory):
rng = rng_factory()
_skip_if_unsupported_type(dtype)
if (sym_pos and np.issubdtype(dtype, onp.complexfloating) and
jtu.device_under_test() == "tpu"):
raise unittest.SkipTest(
"Complex Cholesky decomposition not implemented on TPU")
osp_fun = lambda lhs, rhs: osp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
jsp_fun = lambda lhs, rhs: jsp.linalg.solve(lhs, rhs, sym_pos=sym_pos, lower=lower)
def args_maker():
a = rng(lhs_shape, dtype)
if sym_pos:
a = onp.matmul(a, onp.conj(T(a)))
a = onp.tril(a) if lower else onp.triu(a)
return [a, rng(rhs_shape, dtype)]
self._CheckAgainstNumpy(osp_fun, jsp_fun, args_maker,
check_dtypes=True, tol=1e-3)
self._CompileAndCheck(jsp_fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs={}_rhs={}_lower={}_transposea={}_unit_diagonal={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lower, transpose_a, unit_diagonal),
"lower": lower, "transpose_a": transpose_a,
"unit_diagonal": unit_diagonal, "lhs_shape": lhs_shape,
"rhs_shape": rhs_shape, "dtype": dtype, "rng_factory": rng_factory}
for lower in [False, True]
for transpose_a in [False, True]
for unit_diagonal in [False, True]
for lhs_shape, rhs_shape in [
((4, 4), (4,)),
((4, 4), (4, 3)),
((2, 8, 8), (2, 8, 10)),
]
for dtype in float_types
for rng_factory in [jtu.rand_default]))
def testSolveTriangular(self, lower, transpose_a, unit_diagonal, lhs_shape,
rhs_shape, dtype, rng_factory):
_skip_if_unsupported_type(dtype)
rng = rng_factory()
k = rng(lhs_shape, dtype)
l = onp.linalg.cholesky(onp.matmul(k, T(k))
+ lhs_shape[-1] * onp.eye(lhs_shape[-1]))
l = l.astype(k.dtype)
b = rng(rhs_shape, dtype)
if unit_diagonal:
a = onp.tril(l, -1) + onp.eye(lhs_shape[-1], dtype=dtype)
else:
a = l
a = a if lower else T(a)
inv = onp.linalg.inv(T(a) if transpose_a else a).astype(a.dtype)
if len(lhs_shape) == len(rhs_shape):
onp_ans = onp.matmul(inv, b)
else:
onp_ans = onp.einsum("...ij,...j->...i", inv, b)
# The standard scipy.linalg.solve_triangular doesn't support broadcasting.
# But it seems like an inevitable extension so we support it.
ans = jsp.linalg.solve_triangular(
l if lower else T(l), b, trans=1 if transpose_a else 0, lower=lower,
unit_diagonal=unit_diagonal)
self.assertAllClose(onp_ans, ans, check_dtypes=True,
rtol={onp.float32: 1e-4, onp.float64: 1e-11})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_A={}_B={}_lower={}_transposea={}_conja={}_unitdiag={}_leftside={}".format(
jtu.format_shape_dtype_string(a_shape, dtype),
jtu.format_shape_dtype_string(b_shape, dtype),
lower, transpose_a, conjugate_a, unit_diagonal, left_side),
"lower": lower, "transpose_a": transpose_a, "conjugate_a": conjugate_a,
"unit_diagonal": unit_diagonal, "left_side": left_side,
"a_shape": a_shape, "b_shape": b_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lower in [False, True]
for unit_diagonal in [False, True]
for dtype in float_types + complex_types
for transpose_a in [False, True]
for conjugate_a in (
[False] if np.issubdtype(dtype, np.floating) else [False, True])
for left_side, a_shape, b_shape in [
(False, (4, 4), (1, 4,)),
(False, (3, 3), (4, 3)),
(True, (4, 4), (4, 1)),
(True, (4, 4), (4, 3)),
(True, (2, 8, 8), (2, 8, 10)),
]
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(phawkins): Test fails on TPU.
def testTriangularSolveGrad(
self, lower, transpose_a, conjugate_a, unit_diagonal, left_side, a_shape,
b_shape, dtype, rng_factory):
_skip_if_unsupported_type(dtype)
rng = rng_factory()
# Test lax_linalg.triangular_solve instead of scipy.linalg.solve_triangular
# because it exposes more options.
A = np.tril(rng(a_shape, dtype) + 5 * onp.eye(a_shape[-1], dtype=dtype))
A = A if lower else T(A)
B = rng(b_shape, dtype)
f = partial(lax_linalg.triangular_solve, lower=lower,
transpose_a=transpose_a, conjugate_a=conjugate_a,
unit_diagonal=unit_diagonal, left_side=left_side)
jtu.check_grads(f, (A, B), 2, rtol=4e-2, eps=1e-3)
def testTriangularSolveGradPrecision(self):
rng = jtu.rand_default()
a = np.tril(rng((3, 3), onp.float32))
b = rng((1, 3), onp.float32)
jtu.assert_dot_precision(
lax.Precision.HIGHEST,
partial(jvp, lax_linalg.triangular_solve),
(a, b),
(a, b))
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/linalg_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import enum
from functools import partial
import itertools
import unittest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from jax import api
from jax import lax
from jax import numpy as lnp
from jax import ops
from jax import test_util as jtu
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
# We disable the whitespace continuation check in this file because otherwise it
# makes the test name formatting unwieldy.
# pylint: disable=bad-continuation
float_dtypes = [onp.float32, onp.float64]
int_dtypes = [onp.int32, onp.int64]
bool_types = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
all_dtypes = float_dtypes + int_dtypes + bool_types
IndexSpec = collections.namedtuple("IndexTest", ["shape", "indexer"])
def check_grads(f, args, order, atol=None, rtol=None, eps=None):
# TODO(mattjj,dougalm): add higher-order check
default_tol = 1e-6 if FLAGS.jax_enable_x64 else 1e-2
atol = atol or default_tol
rtol = rtol or default_tol
eps = eps or default_tol
jtu.check_jvp(f, partial(api.jvp, f), args, atol, rtol, eps)
jtu.check_vjp(f, partial(api.vjp, f), args, atol, rtol, eps)
STATIC_INDEXING_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),
]),
("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
("OneSliceIndex", [
IndexSpec(shape=(10,), indexer=slice(1, 3)),
IndexSpec(shape=(10,), indexer=slice(1, -1)),
IndexSpec(shape=(10,), indexer=slice(None, -1)),
IndexSpec(shape=(10,), indexer=slice(None, None, None)),
IndexSpec(shape=(10, 8), indexer=slice(1, 3)),
IndexSpec(shape=(10, 8), indexer=slice(1, None)),
IndexSpec(shape=(10, 8), indexer=slice(None, 3)),
IndexSpec(shape=(10, 8), indexer=slice(-3, None)),
]),
("OneSliceIndexNegativeStride", [
IndexSpec(shape=(10,), indexer=slice(3, 1, -1)),
IndexSpec(shape=(10,), indexer=slice(1, 8, -1)), # empty result
IndexSpec(shape=(10,), indexer=slice(None, 1, -2)),
IndexSpec(shape=(10,), indexer=slice(None, None, -1)),
IndexSpec(shape=(10, 8), indexer=slice(3, 1, -1)),
IndexSpec(shape=(10, 8), indexer=slice(0, 8, -1)), # empty result
IndexSpec(shape=(10, 8), indexer=slice(None, None, -1)),
]),
("OneSliceIndexNonUnitStride", [
IndexSpec(shape=(10,), indexer=slice(0, 8, 2)),
IndexSpec(shape=(10,), indexer=slice(0, 8, 3)),
IndexSpec(shape=(10,), indexer=slice(1, 3, 2)),
IndexSpec(shape=(10,), indexer=slice(1, None, 2)),
IndexSpec(shape=(10,), indexer=slice(None, 1, -2)),
IndexSpec(shape=(10, 8), indexer=slice(1, 8, 3)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, 2)),
IndexSpec(shape=(10, 8), indexer=slice(None, 1, -2)),
IndexSpec(shape=(10, 8), indexer=slice(None, None, -2)),
]),
("TwoSliceIndices", [
IndexSpec(shape=(10, 8), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(10, 8), indexer=(slice(1, None), slice(None, 2))),
IndexSpec(
shape=(10, 8), indexer=(slice(None, None, -1), slice(None, 2))),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, 3), slice(0, None))),
IndexSpec(shape=(10, 8, 3), indexer=(slice(1, None), slice(0, 2))),
]),
("OneColonIndex", [
IndexSpec(shape=(3,), indexer=slice(None)),
IndexSpec(shape=(3, 4), indexer=slice(None)),
]),
("MultipleColonIndices", [
IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))),
]),
("MixedSliceIndices", [
IndexSpec(shape=(10, 4), indexer=(slice(None), slice(0, 2))),
IndexSpec(shape=(10, 4), indexer=(1, slice(None))),
]),
("EllipsisIndex", [
IndexSpec(shape=(3,), indexer=Ellipsis),
IndexSpec(shape=(3, 4), indexer=Ellipsis),
IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)),
]),
("NoneIndex", [
IndexSpec(shape=(), indexer=None),
IndexSpec(shape=(), indexer=(None, None)),
IndexSpec(shape=(), indexer=(Ellipsis, None)),
IndexSpec(shape=(3,), indexer=None),
IndexSpec(shape=(3, 4), indexer=None),
IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)),
IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)),
IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)),
]),
("EmptyIndex", [
IndexSpec(shape=(), indexer=()),
IndexSpec(shape=(3,), indexer=()),
IndexSpec(shape=(3, 4), indexer=()),
]),
]
STATIC_INDEXING_GRAD_TESTS = [
("OneIntIndex", [
IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2),
]),
("TwoIntIndices", [
IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),
]),
("ThreeIntIndices", [IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
("OneSliceIndex", [
IndexSpec(shape=(5,), indexer=slice(1, 3)),
IndexSpec(shape=(5,), indexer=slice(1, -1)),
IndexSpec(shape=(5,), indexer=slice(None, -1)),
IndexSpec(shape=(5,), indexer=slice(None, None, None)),
IndexSpec(shape=(5, 4), indexer=slice(1, 3)),
IndexSpec(shape=(5, 4), indexer=slice(1, None)),
IndexSpec(shape=(5, 4), indexer=slice(None, 3)),
IndexSpec(shape=(5, 4), indexer=slice(-3, None)),
]),
("TwoSliceIndices", [
IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4), indexer=(slice(1, None), slice(None, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, None), slice(0, 2))),
]),
("OneColonIndex", [
IndexSpec(shape=(3,), indexer=slice(None)),
IndexSpec(shape=(3, 4), indexer=slice(None)),
]),
("MultipleColonIndices", [
IndexSpec(shape=(3, 4), indexer=(slice(None), slice(None))),
IndexSpec(shape=(3, 4, 5), indexer=(slice(None), slice(None))),
]),
("MixedSliceIndices", [
IndexSpec(shape=(5, 4), indexer=(slice(None), slice(0, 2))),
IndexSpec(shape=(5, 4), indexer=(1, slice(None))),
]),
("EllipsisIndex", [
IndexSpec(shape=(3,), indexer=Ellipsis),
IndexSpec(shape=(3, 4), indexer=Ellipsis),
IndexSpec(shape=(3, 4, 5), indexer=(0, Ellipsis)),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis, 2, 3)),
]),
("NoneIndex", [
IndexSpec(shape=(), indexer=None),
IndexSpec(shape=(), indexer=(None, None)),
IndexSpec(shape=(), indexer=(Ellipsis, None)),
IndexSpec(shape=(3,), indexer=None),
IndexSpec(shape=(3, 4), indexer=None),
IndexSpec(shape=(3, 4), indexer=(Ellipsis, None)),
IndexSpec(shape=(3, 4), indexer=(0, None, Ellipsis)),
IndexSpec(shape=(3, 4, 5), indexer=(1, None, Ellipsis)),
]),
# TODO(mattjj): these fail for uninteresting dtype reasons
# ("EmptyIndex",
# [IndexSpec(shape=(), indexer=()),
# IndexSpec(shape=(3,), indexer=()),
# IndexSpec(shape=(3, 4), indexer=()),
# ]),
]
ADVANCED_INDEXING_TESTS = [
("One1DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([0, 1])),
IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])),
IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),
IndexSpec(shape=(0,), indexer=onp.array([], dtype=onp.int32)),
]),
("One2DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])),
IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1],
[0, 1, -1]])),
IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1],
[-1, -2, 1, 0]])),
]),
("Two1DIntArrayIndicesNoBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),
onp.array([1, 2])]),
IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]),
onp.array([-1, 0, -1, 2])]),
]),
("Two1DIntArrayIndicesWithBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),
onp.array([1, 2])]),
IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]),
onp.array([-1, 0, -1, 2])]),
]),
("ListOfPythonInts",
[IndexSpec(shape=(3,), indexer=[0, 1, 0]),
IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),
]),
("ListOfListsOfPythonInts",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]),
]),
("TupleOfListsOfPythonInts",
[IndexSpec(shape=(3, 4, 5), indexer=([0, 1])),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0, 3]])),
]),
("ListOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),
IndexSpec(shape=(3, 4, 5), indexer=[0, 1,
onp.array([[2, 3, 0, 3]])]),
]),
("ListOfListsOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],
onp.array([[2, 3, 0, 3]])]),
]),
]
ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("One1DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([0, 1])),
IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 0])),
IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),
IndexSpec(shape=(0,), indexer=onp.array([], dtype=onp.int32)),
]),
("One2DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([[0, 1]])),
IndexSpec(shape=(6, 6), indexer=onp.array([[1, 2, 0],
[3, 4, -1]])),
]),
("Two1DIntArrayIndicesNoBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),
onp.array([1, 2])]),
IndexSpec(shape=(4, 5, 6), indexer=[onp.array([0, 2, 1, 3]),
onp.array([-1, 0, -2, 1])]),
]),
("Two1DIntArrayIndicesWithBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),
onp.array([1, 2])]),
IndexSpec(shape=(4, 5, 6), indexer=[onp.array([[0, 2, -1, 1]]),
onp.array([-1, 0, -2, 2])]),
]),
("ListOfPythonInts",
[IndexSpec(shape=(3,), indexer=[0, 2, 1]),
IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),
]),
("ListOfListsOfPythonInts",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0]]]),
]),
("TupleOfListsOfPythonInts",
[IndexSpec(shape=(3, 4, 5), indexer=([0, 1])),
IndexSpec(shape=(3, 4, 5), indexer=([[0], [-1]], [[2, 3, 0]])),
]),
("ListOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),
IndexSpec(shape=(3, 4, 5), indexer=[0, 1,
onp.array([[2, 3, 0]])]),
]),
("ListOfListsOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],
onp.array([[2, 3, 0]])]),
]),
]
MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS = [
("SlicesAndOneIntArrayIndex",
[IndexSpec(shape=(2, 3), indexer=(onp.array([0, 1]), slice(1, 2))),
IndexSpec(shape=(2, 3), indexer=(slice(0, 2),
onp.array([0, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,
onp.array([0, 2]),
slice(None))),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,
onp.array([[0, 2], [1, 3]]),
slice(None))),
]),
("SlicesAndTwoIntArrayIndices",
[IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,
onp.array([0, 2]),
onp.array([-1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),
Ellipsis,
onp.array([-1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),
onp.array([-1, 2]),
Ellipsis)),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),
onp.array([-1, 2]),
slice(1, 3))),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),
slice(1, 3),
onp.array([-1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]),
slice(None, None, 2),
onp.array([-1, 2, 1]))),
]),
("NonesAndIntArrayIndices",
[IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2]),
None,
onp.array([-1, 2])]),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2]),
None,
None,
onp.array([-1, 2]))),
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,
onp.array([0, 2]),
None,
None,
onp.array([-1, 2]))),
]),
("IntArrayWithInt32Type",
[IndexSpec(shape=(3, 4), indexer=(Ellipsis, onp.array(1, dtype=onp.int32)))
]),
]
MIXED_ADVANCED_INDEXING_TESTS = MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS + [
("SlicesAndOneIntArrayIndex",
[
IndexSpec(shape=(3, 4, 5), indexer=(Ellipsis,
onp.array([[0, 2], [1, 1]]),
slice(None))),
]),
("SlicesAndTwoIntArrayIndices",
[IndexSpec(shape=(3, 4, 5), indexer=(onp.array([0, 2, -2]),
slice(None, None, 2),
onp.array([-1, 2, -1]))),
IndexSpec(shape=(3, 4, 5), indexer=(onp.array([[0, 2], [2, 0]]),
Ellipsis,
onp.array([[1, 0], [1, 0]]))),
]),]
class IndexingTest(jtu.JaxTestCase):
"""Tests for Numpy indexing translation rules."""
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}".format(
name, jtu.format_shape_dtype_string( shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testStaticIndexing(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
fun = lambda x: x[indexer]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters({
"testcase_name":
"{}_inshape={}_indexer={}".format(name,
jtu.format_shape_dtype_string(
shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer
} for name, index_specs in STATIC_INDEXING_GRAD_TESTS
for shape, indexer in index_specs
for dtype in float_dtypes
for rng_factory in [jtu.rand_default])
def testStaticIndexingGrads(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
tol = 1e-2 if lnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
fun = lambda x: x[indexer]**2
check_grads(fun, (arg,), 2, tol, tol, tol)
def _ReplaceSlicesWithTuples(self, idx):
"""Helper method to replace slices with tuples for dynamic indexing args."""
if isinstance(idx, slice):
triple = idx.start, idx.stop, idx.step
isnone = [i for i, elt in enumerate(triple) if elt is None]
zeros = itertools.repeat(0)
nones = itertools.repeat(None)
out = lax.subvals(triple, zip(isnone, zeros))
return out, lambda out: slice(*lax.subvals(out, zip(isnone, nones)))
elif isinstance(idx, (tuple, list)) and idx:
t = type(idx)
elts, packs = zip(*map(self._ReplaceSlicesWithTuples, idx))
return elts, lambda elts: t((pack(i) for pack, i in zip(packs, elts)))
else:
return idx, lambda x: x
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("OneSliceIndex",
[IndexSpec(shape=(5,), indexer=slice(1, 3)),
IndexSpec(shape=(5, 4), indexer=slice(1, 3))]),
("TwoSliceIndices",
[IndexSpec(shape=(5, 4), indexer=(slice(1, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, 2)))]),
("NonUnitStrides", [
IndexSpec(shape=(3,), indexer=slice(None, None, -1)),
IndexSpec(shape=(3, 3), indexer=slice(0, 3, -2)),
IndexSpec(shape=(3, 4, 5), indexer=slice(0, 4, 2))
]),
("OnlyStartOrStopDynamic", [
IndexSpec(shape=(5, 4), indexer=(slice(None, 3), slice(0, 2))),
IndexSpec(shape=(5, 4, 3), indexer=(slice(1, 3), slice(0, None)))
]),
]
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testDynamicIndexingWithSlicesErrors(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@api.jit
def fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self.assertRaises(IndexError, lambda: fun(*args_maker()))
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2)]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2))]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testDynamicIndexingWithIntegers(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
def fun(x, unpacked_indexer):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
args_maker = lambda: [rng(shape, dtype), unpacked_indexer]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("OneIntIndex",
[IndexSpec(shape=(3,), indexer=1),
IndexSpec(shape=(3, 3), indexer=0),
IndexSpec(shape=(3, 4, 5), indexer=2),
IndexSpec(shape=(3,), indexer=-1),
IndexSpec(shape=(3,), indexer=-2),
]),
("TwoIntIndices",
[IndexSpec(shape=(3, 3), indexer=(2, 1)),
IndexSpec(shape=(3, 4, 5), indexer=(1, 2)),
IndexSpec(shape=(3, 4, 5), indexer=(-1, 2)),
]),
("ThreeIntIndices",
[IndexSpec((3, 4, 5), indexer=(1, 2, 3))]),
]
for shape, indexer in index_specs
for dtype in float_dtypes
for rng_factory in [jtu.rand_default])
def testDynamicIndexingWithIntegersGrads(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
tol = 1e-2 if lnp.finfo(dtype).bits == 32 else None
unpacked_indexer, pack_indexer = self._ReplaceSlicesWithTuples(indexer)
@api.jit
def fun(unpacked_indexer, x):
indexer = pack_indexer(unpacked_indexer)
return x[indexer]
arr = rng(shape, dtype)
check_grads(partial(fun, unpacked_indexer), (arr,), 2, tol, tol, tol)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in ADVANCED_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testAdvancedIntegerIndexing(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), indexer]
fun = lambda x, idx: x[idx]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in [
("One1DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([0, 1])),
IndexSpec(shape=(3, 3), indexer=onp.array([1, 2, 1])),
IndexSpec(shape=(3, 4, 5), indexer=onp.array([0, 2, 0, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-1, 1])),
IndexSpec(shape=(3,), indexer=onp.array([-2, -1])),
]),
("One2DIntArrayIndex",
[IndexSpec(shape=(3,), indexer=onp.array([[0, 0]])),
IndexSpec(shape=(3, 3), indexer=onp.array([[1, 2, 1],
[0, 1, -1]])),
IndexSpec(shape=(3, 4, 5), indexer=onp.array([[0, 2, 0, 1],
[-1, -2, 1, 0]])),
]),
("Two1DIntArrayIndicesNoBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([0, 1]),
onp.array([1, 2])]),
IndexSpec(shape=(3, 4, 5), indexer=[onp.array([0, 2, 0, 1]),
onp.array([-1, 0, -1, 2])]),
]),
("Two1DIntArrayIndicesWithBroadcasting",
[IndexSpec(shape=(3, 3), indexer=[onp.array([[0, 1]]),
onp.array([1, 2])]),
IndexSpec(shape=(3, 4, 5), indexer=[onp.array([[0, 2, 0, 1]]),
onp.array([-1, 0, -1, 2])]),
]),
("ListOfPythonInts",
[IndexSpec(shape=(3,), indexer=[0, 1, 0]),
IndexSpec(shape=(3, 4, 5), indexer=[0, -1]),
]),
("ListOfListsOfPythonInts",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1]]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]], [[2, 3, 0, 3]]]),
]),
("ListOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[0, onp.array([0, 1])]),
IndexSpec(shape=(3, 4, 5), indexer=[0, 1,
onp.array([[2, 3, 0, 3]])]),
]),
("ListOfListsOfPythonIntsAndIntArrays",
[IndexSpec(shape=(3, 4, 5), indexer=[[0, 1], onp.array([0])]),
IndexSpec(shape=(3, 4, 5), indexer=[[[0], [-1]],
onp.array([[2, 3, 0, 3]])]),
]),
]
for shape, indexer in index_specs
for dtype in float_dtypes
for rng_factory in [jtu.rand_default])
def testAdvancedIntegerIndexingGrads(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
tol = 1e-2 if lnp.finfo(dtype).bits == 32 else None
arg = rng(shape, dtype)
fun = lambda x: x[indexer]**2
check_grads(fun, (arg,), 2, tol, tol, tol)
@parameterized.named_parameters(
{"testcase_name": "{}_inshape={}_indexer={}"
.format(name, jtu.format_shape_dtype_string(shape, dtype), indexer),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer}
for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS
for shape, indexer in index_specs
for dtype in all_dtypes
for rng_factory in [jtu.rand_default])
def testMixedAdvancedIntegerIndexing(self, shape, dtype, rng_factory, indexer):
rng = rng_factory()
indexer_with_dummies = [e if isinstance(e, onp.ndarray) else ()
for e in indexer]
substitutes = [(i, e) for i, e in enumerate(indexer)
if not isinstance(e, onp.ndarray)]
args_maker = lambda: [rng(shape, dtype), indexer_with_dummies]
def fun(x, indexer_with_dummies):
idx = type(indexer)(lax.subvals(indexer_with_dummies, substitutes))
return x[idx]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
def testAdvancedIndexingManually(self):
x = onp.random.RandomState(0).randn(3, 4, 5)
index_array = onp.array([0, 2, -1, 0])
op = lambda x, index_array: x[..., index_array, :]
cop = api.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
op = lambda x, index_array: x[..., index_array, :, index_array, None]
cop = api.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
op = lambda x, index_array: x[index_array, ..., index_array[:, None], None]
cop = api.jit(op)
a1 = op(x, index_array)
a2 = cop(x, index_array)
self.assertAllClose(a1, a2, check_dtypes=True)
def testUnpacking(self):
def foo(x):
a, b, c = x
return a + b + c
cfoo = api.jit(foo)
a1 = foo(onp.arange(3))
a2 = cfoo(onp.arange(3))
self.assertAllClose(a1, a2, check_dtypes=True)
def testBooleanIndexingArray1D(self):
idx = onp.array([True, True, False])
x = api.device_put(onp.arange(3))
ans = x[idx]
expected = onp.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList1D(self):
idx = [True, True, False]
x = api.device_put(onp.arange(3))
ans = x[idx]
expected = onp.arange(3)[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingArray2DBroadcast(self):
idx = onp.array([True, True, False, True])
x = onp.arange(8).reshape(4, 2)
ans = api.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingList2DBroadcast(self):
idx = [True, True, False, True]
x = onp.arange(8).reshape(4, 2)
ans = api.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingArray2D(self):
idx = onp.array([[True, False],
[False, True],
[False, False],
[True, True]])
x = onp.arange(8).reshape(4, 2)
ans = api.device_put(x)[idx]
expected = x[idx]
self.assertAllClose(ans, expected, check_dtypes=False)
def testBooleanIndexingDynamicShapeError(self):
x = onp.zeros(3)
i = onp.array([True, True, False])
self.assertRaises(IndexError, lambda: api.jit(lambda x, i: x[i])(x, i))
def testIssue187(self):
x = lnp.ones((5, 5))
x[[0, 2, 4], [0, 2, 4]] # doesn't crash
x = onp.arange(25).reshape((5, 5))
ans = api.jit(lambda x: x[[0, 2, 4], [0, 2, 4]])(x)
expected = x[[0, 2, 4], [0, 2, 4]]
self.assertAllClose(ans, expected, check_dtypes=False)
def testJVPOfGradOfIndexing(self):
# Should return a value, even though we didn't pass a symbolic zero as the
# index tangent.
x = lnp.ones((3, 4), lnp.float32)
i = lnp.ones((3,), lnp.int32)
f = lambda x, i: lnp.sum(x[i])
primals, tangents = api.jvp(api.grad(f), (x, i), (x, onp.zeros_like(i)))
expected = onp.broadcast_to(
onp.array([0, 3, 0], dtype=onp.float32)[:, None], (3, 4))
self.assertAllClose(expected, primals, check_dtypes=True)
self.assertAllClose(onp.zeros_like(x), tangents, check_dtypes=True)
def testTrivialGatherIsntGenerated(self):
# https://github.com/google/jax/issues/1621
jaxpr = api.make_jaxpr(lambda x: x[:, None])(onp.arange(4))
self.assertEqual(len(jaxpr.jaxpr.eqns), 1)
self.assertNotIn('gather', str(jaxpr))
def testBooleanIndexingWithEmptyResult(self):
# based on a TensorFlow Probability test that started failing after #1622
x = lnp.array([-1])
mask = lnp.array([False])
ans = x[mask] # doesn't crash
expected = onp.array([-1])[onp.array([False])]
self.assertAllClose(ans, expected, check_dtypes=False)
def testFloatIndexingError(self):
x = lnp.array([1, 2, 3])
self.assertRaises(TypeError, lambda: x[3.5])
def _broadcastable_shapes(shape):
"""Returns all shapes that broadcast to `shape`."""
def f(rshape):
yield []
if rshape:
for s in f(rshape[1:]):
yield rshape[0:1] + s
if rshape[0] != 1:
for s in f(rshape[1:]):
yield [1] + s
for x in f(list(reversed(shape))):
yield list(reversed(x))
def _update_shape(shape, indexer):
return onp.zeros(shape)[indexer].shape
class UpdateOps(enum.Enum):
UPDATE = 0
ADD = 1
MIN = 2
MAX = 3
def onp_fn(op, indexer, x, y):
x = x.copy()
x[indexer] = {
UpdateOps.UPDATE: lambda: y,
UpdateOps.ADD: lambda: x[indexer] + y,
UpdateOps.MIN: lambda: onp.minimum(x[indexer], y),
UpdateOps.MAX: lambda: onp.maximum(x[indexer], y),
}[op]()
return x
def jax_fn(op, indexer, x, y):
return {
UpdateOps.UPDATE: ops.index_update,
UpdateOps.ADD: ops.index_add,
UpdateOps.MIN: ops.index_min,
UpdateOps.MAX: ops.index_max,
}[op](x, indexer, y)
class IndexedUpdateTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes)
for rng_factory in [jtu.rand_default]))
def testStaticIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
onp_fn = lambda x, y: UpdateOps.onp_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)
self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True)
self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in ADVANCED_INDEXING_TESTS_NO_REPEATS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes)
for rng_factory in [jtu.rand_default]))
def testAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
onp_fn = lambda x, y: UpdateOps.onp_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)
self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True)
self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in MIXED_ADVANCED_INDEXING_TESTS_NO_REPEATS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in (all_dtypes if op == UpdateOps.UPDATE else default_dtypes)
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in ([dtype] if op == UpdateOps.ADD else all_dtypes)
for rng_factory in [jtu.rand_default]))
def testMixedAdvancedIndexing(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), rng(update_shape, update_dtype)]
onp_fn = lambda x, y: UpdateOps.onp_fn(op, indexer, x, y)
jax_fn = lambda x, y: UpdateOps.jax_fn(op, indexer, x, y)
self._CheckAgainstNumpy(onp_fn, jax_fn, args_maker, check_dtypes=True)
self._CompileAndCheck(jax_fn, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list({
"testcase_name": "{}_inshape={}_indexer={}_update={}_op={}".format(
name, jtu.format_shape_dtype_string(shape, dtype), indexer,
jtu.format_shape_dtype_string(update_shape, update_dtype), op.name),
"shape": shape, "dtype": dtype, "rng_factory": rng_factory, "indexer": indexer,
"update_shape": update_shape, "update_dtype": update_dtype,
"op": op
} for name, index_specs in STATIC_INDEXING_TESTS
for shape, indexer in index_specs
for op in UpdateOps
for dtype in float_dtypes
for update_shape in _broadcastable_shapes(_update_shape(shape, indexer))
for update_dtype in ([dtype] if op == UpdateOps.ADD else float_dtypes)
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(mattjj,phawkins): tpu issues
def testStaticIndexingGrads(self, shape, dtype, update_shape, update_dtype,
rng_factory, indexer, op):
rng = rng_factory()
jax_op = ops.index_update if op == UpdateOps.UPDATE else ops.index_add
jax_fn = lambda x, y: jax_op(x, indexer, y)
x = rng(shape, dtype)
y = rng(update_shape, update_dtype)
check_grads(jax_fn, (x, y), 2, rtol=1e-3, atol=1e-3, eps=1.)
def testSegmentSumBehavior(self):
# testAdvancedIndexing compares against NumPy, and as a result doesn't check
# repeated indices. This test is just a simple manual check, based on
# https://www.tensorflow.org/api_docs/python/tf/math/segment_sum
data = onp.array([5, 1, 7, 2, 3, 4, 1, 3])
segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3])
ans = ops.index_add(onp.zeros(onp.max(segment_ids) + 1), segment_ids, data)
expected = onp.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
def testSegmentSum(self):
data = onp.array([5, 1, 7, 2, 3, 4, 1, 3])
segment_ids = onp.array([0, 0, 0, 1, 2, 2, 3, 3])
# test with explicit num_segments
ans = ops.segment_sum(data, segment_ids, num_segments=4)
expected = onp.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
# test without explicit num_segments
ans = ops.segment_sum(data, segment_ids)
expected = onp.array([13, 2, 7, 4])
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/lax_numpy_indexing_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import os
from random import shuffle
from unittest import SkipTest
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import jax
import jax.numpy as np
from jax import test_util as jtu
from jax import core
from jax import lax
from jax import random
from jax.api import (pmap, soft_pmap, jit, vmap, jvp, grad, make_jaxpr,
linearize, device_put)
from jax.lib import xla_bridge
from jax.util import prod
from jax.interpreters import pxla
from jax.interpreters import xla
from jax.config import config
config.parse_flags_with_absl()
prev_xla_flags = None
# Run all tests with 8 CPU devices.
def setUpModule():
global prev_xla_flags
prev_xla_flags = os.getenv("XLA_FLAGS")
flags_str = prev_xla_flags or ""
# Don't override user-specified device count, or other XLA flags.
if "xla_force_host_platform_device_count" not in flags_str:
os.environ["XLA_FLAGS"] = (flags_str +
" --xla_force_host_platform_device_count=8")
# Clear any cached backends so new CPU backend will pick up the env var.
xla_bridge.get_backend.cache_clear()
# Reset to previous configuration in case other test modules will be run.
def tearDownModule():
if prev_xla_flags is None:
del os.environ["XLA_FLAGS"]
else:
os.environ["XLA_FLAGS"] = prev_xla_flags
xla_bridge.get_backend.cache_clear()
class PmapTest(jtu.JaxTestCase):
def _getMeshShape(self, device_mesh_shape):
device_count = xla_bridge.device_count()
if any(size == -1 for size in device_mesh_shape):
try:
return onp.arange(device_count).reshape(device_mesh_shape).shape
except ValueError:
msg = "device mesh shape {} not compatible with device count {}"
raise SkipTest(msg.format(device_mesh_shape, device_count))
else:
if device_count % prod(device_mesh_shape):
msg = "device mesh size {} does not divide available device count {}"
raise SkipTest(msg.format(prod(device_mesh_shape), device_count))
else:
return device_mesh_shape
def testBasic(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = x - onp.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testComplexPsum(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4 * 2)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape).view(onp.complex64)
expected = x - onp.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testNestedBasic(self):
f = lambda x: lax.psum(lax.psum(x, 'i'), 'j')
f = pmap(pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return onp.repeat(onp.sum(x, axis, keepdims=True), x.shape[axis], axis)
shape = (xla_bridge.device_count(), 1, 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMismatchedAxisSizes(self):
n = xla_bridge.device_count()
f = pmap(lambda x, y: x + y)
self.assertRaisesRegex(
ValueError,
"Axis size .* does not match leading dimension of shape .*",
lambda: f(onp.random.randn(n), onp.random.randn(n - 1)))
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedShardingAndStacking(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
f = lambda x: x
f = pmap(pmap(f, 'i'), 'j')
shape = mesh_shape + (4,)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = f(x)
expected = x
self.assertEqual(ans.shape, expected.shape)
self.assertAllClose(ans, expected, check_dtypes=False)
def testJvpAndPartialEval(self):
@partial(pmap, axis_name='i')
def f(x):
return np.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(np.ones_like(x))
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = onp.cos(x)
ans = splitjvp(x)
self.assertAllClose(ans, expected, check_dtypes=False)
make_jaxpr(splitjvp)(x) # doesn't crash
def testGradBasic(self):
@partial(pmap, axis_name='i')
def f(x):
return np.sin(x)
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = grad(lambda x: np.sum(np.sin(x)))(x)
expected = grad(lambda x: np.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfPsum(self):
@partial(pmap, axis_name='i')
def f(x):
return lax.psum(x, axis_name='i')
shape = (jax.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
jtu.check_grads(f, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, eps=1.)
def testGradOfJvp(self):
@partial(pmap, axis_name='i')
def f(x):
return np.sin(x)
def splitjvp(x):
_, jvp = linearize(f, x)
return jvp(np.ones_like(x))
fun = lambda x: np.sum(jvp(np.sin, (x,), (np.ones_like(x),))[1])
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = grad(lambda x: np.sum(splitjvp(x)))(x)
expected = grad(fun)(x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testTwoArgsGrad(self):
def f(x, y):
return lax.psum(5. * np.cos(x) * np.sin(y), 'i')
f = pmap(f, 'i')
def g(x, y):
tot = np.sum(5. * np.cos(x) * np.sin(y))
return tot * np.ones_like(x) # broadcast to map like pjit does
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
y = 4 + x
ans = grad(lambda x, y: np.sum(g(x, y)))(x, y)
expected = grad(lambda x, y: np.sum(g(x, y)))(x, y)
self.assertAllClose(ans, expected, check_dtypes=False)
@parameterized.named_parameters(
{"testcase_name": "_mesh={}".format(device_mesh_shape),
"device_mesh_shape": device_mesh_shape}
for device_mesh_shape in [(1, 1), (2, -1), (-1, 2)])
def testNestedWithClosure(self, device_mesh_shape):
mesh_shape = self._getMeshShape(device_mesh_shape)
@partial(pmap, axis_name='i')
def test_fun(x):
y = np.sum(np.sin(x))
@partial(pmap, axis_name='j')
def g(z):
return 3. * np.exp(np.sin(x).sum() * np.cos(y) * np.tan(z))
return grad(lambda w: np.sum(g(w)))(x)
@vmap
def baseline_fun(x):
y = np.sum(np.sin(x))
@vmap
def g(z):
return 3. * np.exp(np.sin(x).sum() * np.cos(y) * np.tan(z))
return grad(lambda w: np.sum(g(w)))(x)
shape = mesh_shape + (4,)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = grad(lambda x: np.sum(test_fun(x)))(x)
expected = grad(lambda x: np.sum(baseline_fun(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=True, atol=1e-3)
def testShardedDeviceArrays(self):
f = lambda x: 2 * x
f = pmap(f, axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
# test that we can pass in and out ShardedDeviceArrays
y = f(x)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
z = f(y)
self.assertIsInstance(z, pxla.ShardedDeviceArray)
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can pass in a regular DeviceArray
y = f(device_put(x))
self.assertIsInstance(y, pxla.ShardedDeviceArray)
self.assertAllClose(y, 2 * x, check_dtypes=False)
# test that we can pass a ShardedDeviceArray to a regular jit computation
z = y + y
self.assertAllClose(z, 2 * 2 * x, check_dtypes=False)
# test that we can handle device movement on dispatch
y.device_buffers = y.device_buffers[::-1]
z = f(y)
self.assertAllClose(z, 2 * 2 * x[::-1], check_dtypes=False)
# test that the repr doesn't crash
repr(z)
def testPsumMultiple(self):
f = lambda x: lax.psum(x, ('i', 'j'))
f = pmap(pmap(f, 'i'), 'j')
def sum_and_broadcast(x, axis):
return onp.repeat(onp.sum(x, axis, keepdims=True), x.shape[axis], axis)
device_count = xla_bridge.device_count()
num_pairs, ragged = divmod(device_count, 2)
if num_pairs > 1 and not ragged:
shape = (num_pairs, 2, 4)
else:
shape = (device_count, 1, 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = f(x)
expected = sum_and_broadcast(sum_and_broadcast(x, 0), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testAxisGroups(self):
axis_env = xla.AxisEnv(8, ['i', 'j'], [4, 2])
groups = xla.axis_groups(axis_env, 'i')
self.assertEqual(groups, ((0, 2, 4, 6), (1, 3, 5, 7)))
groups = xla.axis_groups(axis_env, 'j')
self.assertEqual(groups, ((0, 1), (2, 3), (4, 5), (6, 7)))
groups = xla.axis_groups(axis_env, ('i', 'j'))
self.assertEqual(groups, ((0, 1, 2, 3, 4, 5, 6, 7,),))
groups = xla.axis_groups(axis_env, ('j', 'i'))
self.assertEqual(len(groups), 1)
self.assertEqual((tuple(sorted(groups[0])),),
((0, 1, 2, 3, 4, 5, 6, 7,),)) # order doesn't matter
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermute(self):
device_count = xla_bridge.device_count()
rotation = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=rotation, axis_name='i')
f = pmap(f, 'i')
x = np.arange(4 * device_count).reshape((device_count, 4))
ans = f(x)
expected = onp.roll(x, shift=1, axis=0)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermuteGrad(self):
device_count = xla_bridge.device_count()
shift_right = [(i, (i + 1)) for i in range(device_count - 1)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = onp.pi + onp.arange(device_count, dtype=onp.float32)
g = lambda x: np.sum(y * pmap(f, 'i')(x))
x = onp.arange(device_count, dtype=onp.float32)
ans = grad(g)(x)
expected = onp.concatenate([onp.pi + onp.arange(1, device_count), [0]])
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testCollectivePermuteCyclicGrad(self):
device_count = xla_bridge.device_count()
shift_right = [(i, (i + 1) % device_count) for i in range(device_count)]
f = lambda x: lax.ppermute(x, perm=shift_right, axis_name='i')
y = onp.pi + onp.arange(device_count, dtype=onp.float32)
g = lambda x: np.sum(y * pmap(f, 'i')(x))
x = onp.arange(device_count, dtype=onp.float32)
ans = grad(g)(x)
expected = onp.roll(onp.pi + onp.arange(device_count), 1)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testPpermuteWithZipObject(self):
# https://github.com/google/jax/issues/1703
num_devices = xla_bridge.device_count()
perm = [num_devices - 1] + list(range(num_devices - 1))
f = pmap(
lambda x: lax.ppermute(x, "i", zip(range(num_devices), perm)), "i")
result = f(np.arange(num_devices, dtype=np.float32))
expected = np.asarray(perm, dtype=np.float32)
self.assertAllClose(result, expected, check_dtypes=True)
@jtu.skip_on_devices("cpu", "gpu")
def testRule30(self):
# This is a test of collective_permute implementing a simple halo exchange
# to run a rule 30 simulation: https://en.wikipedia.org/wiki/Rule_30
# Halo exchange should be useful in spatially-sharded convolutions and in
# other simulations.
device_count = xla_bridge.device_count()
def send_right(x, axis_name):
left_perm = [(i, (i + 1) % device_count) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def send_left(x, axis_name):
left_perm = [((i + 1) % device_count, i) for i in range(device_count)]
return lax.ppermute(x, perm=left_perm, axis_name=axis_name)
def update_board(board):
left = board[:-2]
right = board[2:]
center = board[1:-1]
return lax.bitwise_xor(left, lax.bitwise_or(center, right))
@partial(pmap, axis_name='i')
def step(board_slice):
left, right = board_slice[:1], board_slice[-1:]
right, left = send_left(left, 'i'), send_right(right, 'i')
enlarged_board_slice = np.concatenate([left, board_slice, right])
return update_board(enlarged_board_slice)
board = onp.zeros(40, dtype=bool)
board[board.shape[0] // 2] = True
reshaped_board = board.reshape((device_count, -1))
boards = []
def print_board(board):
boards.append(''.join('*' if x else ' ' for x in board.ravel()))
print_board(reshaped_board)
for _ in range(20):
reshaped_board = step(reshaped_board)
print_board(reshaped_board)
ans = '\n'.join(boards)
expected = '\n'.join((
' * ',
' *** ',
' ** * ',
' ** **** ',
' ** * * ',
' ** **** *** ',
' ** * * * ',
' ** **** ****** ',
' ** * *** * ',
' ** **** ** * *** ',
' ** * * **** ** * ',
' ** **** ** * * **** ',
' ** * *** ** ** * * ',
' ** **** ** *** *** ** *** ',
' ** * * *** * *** * * ',
' ** **** ** * * ***** ******* ',
' ** * *** **** * *** * ',
' ** **** ** *** ** ** * *** ',
' ** * * *** * ** *** **** ** * ',
' ** **** ** * ****** * * *** ****',
' * * *** **** **** *** ** * ',
))
print(ans)
self.assertEqual(ans, expected)
@jtu.skip_on_devices("cpu", "gpu")
def testReduceMax(self):
f = pmap(lambda x: x - lax.pmax(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = x - onp.max(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("cpu", "gpu")
def testReduceMin(self):
f = pmap(lambda x: x - lax.pmin(x, 'i'), axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = x - onp.min(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testDeviceCountError(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: x)
x = np.arange(device_count + 1)
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = pmap(lambda x: x)
x = onp.ones((device_count + 1, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
f = pmap(lambda x: pmap(lambda x: x)(x))
x = onp.ones((device_count, 2, 10))
self.assertRaisesRegex(ValueError, ".*requires.*replicas", lambda: f(x))
def testPmapConstant(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: 3)
x = np.arange(device_count)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = onp.repeat(3, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
f = pmap(lambda x: (x, 3))
with jtu.count_jit_and_pmap_compiles() as count:
_, ans = f(x)
self.assertEqual(count[0], 1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testPmapConstantDevices(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
devices = xla_bridge.devices()[:-1]
shuffle(devices)
f = pmap(lambda x: 3, devices=devices)
x = np.arange(len(devices))
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = onp.repeat(3, len(devices))
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
self.assertEqual([b.device() for b in ans.device_buffers], devices)
def testPmapConstantError(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: 3)
x = np.arange(device_count + 1)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
f = pmap(lambda x: 3, devices=[xla_bridge.devices()[0]])
x = np.arange(2)
self.assertRaisesRegex(
ValueError, "Cannot replicate across 2 replicas because only 1 "
"local devices are available.", lambda: f(x))
def testNestedPmapConstant(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = pmap(pmap(lambda x: 3))
shape = (2, xla_bridge.device_count() // 2, 3)
x = np.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = 3 * onp.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = pmap(pmap(lambda x: x))(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
f = pmap(pmap(lambda x: (x, 3)))
x_sharded, ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=False)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in x_sharded.device_buffers])
def testNestedPmapConstantDevices(self):
raise SkipTest("Nested pmaps with devices not yet implemented")
if xla_bridge.device_count() < 6:
raise SkipTest("this test requires >= 6 devices")
devices = xla_bridge.devices()[:-2]
shuffle(devices)
f = pmap(pmap(lambda x: 3), devices=devices)
shape = (2, len(devices) // 2, 3)
x = np.arange(prod(shape)).reshape(shape)
with jtu.count_jit_and_pmap_compiles() as count:
ans = f(x)
self.assertEqual(count[0], 0)
expected = 3 * onp.ones(shape[:2])
self.assertAllClose(ans, expected, check_dtypes=False)
# Test that 'ans' was properly replicated across devices.
expected_sharded = pmap(pmap(lambda x: x), devices=devices)(expected)
self.assertEqual([b.device() for b in ans.device_buffers],
[b.device() for b in expected_sharded.device_buffers])
def testNestedPmapConstantError(self):
f = pmap(pmap(lambda x: 3))
shape = (2, xla_bridge.device_count() // 2 + 1, 3)
x = np.arange(prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
if xla_bridge.device_count() > 1:
f = pmap(pmap(lambda x: 3), devices=xla_bridge.devices()[:-1])
shape = (2, xla_bridge.device_count() // 2, 3)
x = np.arange(prod(shape)).reshape(shape)
self.assertRaisesRegex(
ValueError, r"Cannot replicate across \d+ replicas because only \d+ "
r"local devices are available.", lambda: f(x))
def testCollectiveConstant(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: lax.psum(1, 'i'), 'i')
x = np.arange(device_count)
ans = f(x)
expected = onp.repeat(device_count, device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testCollectiveConstantNested(self):
device_count = xla_bridge.device_count()
@partial(pmap, axis_name='i')
def f(x):
@partial(pmap, axis_name='j')
def g(y):
a = lax.psum(1, 'i')
b = lax.psum(1, 'j')
c = lax.psum(1, ('i', 'j'))
return a, b, c
return g(x)
shape = (device_count, 1, 4)
x = np.arange(prod(shape)).reshape(shape)
a, b, c = f(x)
self.assertEqual(a.shape, shape[:-1])
self.assertEqual(b.shape, shape[:-1])
self.assertEqual(c.shape, shape[:-1])
self.assertEqual(a.ravel()[0], device_count)
self.assertEqual(b.ravel()[0], 1)
self.assertEqual(c.ravel()[0], device_count * 1)
def testAxisIndex(self):
device_count = xla_bridge.device_count()
f = pmap(lambda x: x + pxla.axis_index('i'), 'i')
x = np.ones(device_count)
ans = f(x)
expected = 1 + onp.arange(device_count)
self.assertAllClose(ans, expected, check_dtypes=False)
def testVmapOfPmap(self):
device_count = xla_bridge.device_count()
f0 = lambda x: x
f1 = pmap(f0, axis_name='i')
ax = onp.random.randn(2, device_count, 50, 60)
bx = vmap(f1)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmap2(self):
N_DEVICES = xla_bridge.device_count()
keys = random.split(random.PRNGKey(1), 13) # [13, 2]
@pmap
def g(key):
params = random.normal(key, ())
return 0.
@vmap
def s(keys):
keys = np.broadcast_to(keys, (N_DEVICES,) + keys.shape)
return g(keys)
ans = s(keys) # doesn't crash
self.assertEqual(ans.shape, (13, N_DEVICES))
def testVmapOfPmapNonLeadingAxis(self):
device_count = xla_bridge.device_count()
f0 = lambda x: x
f1 = pmap(f0, axis_name='i')
ax = onp.random.randn(device_count, 2, 50, 60)
bx = vmap(f1, in_axes=2, out_axes=2)(ax)
self.assertAllClose(ax, bx, check_dtypes=False)
def testVmapOfPmapTuple(self):
device_count = xla_bridge.device_count()
f0 = lambda *x: x
f1 = pmap(f0, axis_name='i')
ax = onp.random.randn(device_count, 2, 50, 60)
ay = onp.random.randn(device_count, 30, 2)
az1 = onp.random.randn(device_count, 20)
az2 = onp.random.randn(2, device_count, 20)
bx, by, bz = vmap(f1, in_axes=(1, 2, (None, 0)), out_axes=(1, 2, 0))(ax, ay, (az1, az2))
self.assertAllClose(ax, bx, check_dtypes=False)
self.assertAllClose(ay, by, check_dtypes=False)
bz1, bz2 = bz
expected_bz1 = onp.broadcast_to(az1, (2,) + az1.shape)
self.assertAllClose(expected_bz1, bz1, check_dtypes=False)
self.assertAllClose(bz2, bz2, check_dtypes=False)
@jtu.skip_on_devices("gpu")
def testPswapaxes(self):
device_count = xla_bridge.device_count()
# TODO: AllToAll not yet implemented on XLA:CPU
if jtu.device_under_test() == "cpu":
device_count = 1
shape = (device_count, 3, device_count, 5)
x = onp.arange(prod(shape)).reshape(shape)
ans = pmap(lambda x: lax.pswapaxes(x, 'i', 1), axis_name='i')(x)
expected = onp.swapaxes(x, 0, 2)
self.assertAllClose(ans, expected, check_dtypes=False)
def testSoftPmapPsum(self):
n = 4 * xla_bridge.device_count()
def f(x):
return x / lax.psum(x, 'i')
ans = soft_pmap(f, 'i')(np.ones(n))
expected = onp.ones(n) / n
self.assertAllClose(ans, expected, check_dtypes=False)
def testSoftPmapAxisIndex(self):
n = 4 * xla_bridge.device_count()
def f(x):
return x * lax.axis_index('i')
ans = soft_pmap(f, 'i')(2 * np.ones(n))
expected = 2 * onp.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
def testSoftPmapOfJit(self):
n = 4 * xla_bridge.device_count()
def f(x):
return 3 * x
ans = soft_pmap(jit(f), 'i')(onp.arange(n))
expected = 3 * onp.arange(n)
self.assertAllClose(ans, expected, check_dtypes=False)
def testSoftPmapNested(self):
n = 4 * xla_bridge.device_count()
@partial(soft_pmap, axis_name='i')
@partial(soft_pmap, axis_name='j')
def f(x):
i_size = lax.psum(1, 'i')
return x + lax.axis_index('i') + i_size * lax.axis_index('j')
ans = f(np.zeros((n, n)))
expected = onp.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
def testGradOfSoftPmap(self):
n = 4 * xla_bridge.device_count()
@partial(soft_pmap, axis_name='i')
def f(x):
return x * lax.axis_index('i')
ans = grad(lambda x: np.sum(f(x)))(np.zeros((n, n)))
expected = onp.repeat(onp.arange(n)[:, None], n, axis=1)
self.assertAllClose(ans, expected, check_dtypes=False)
def testSoftPmapDevicePersistence(self):
device_count = xla_bridge.device_count()
shape = (2 * 2 * device_count, 2, 3)
# check that we can maintain device persistence across calls
x = onp.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ChunkedDeviceArray)
x._npy_value = onp.float32(onp.nan) # can't be coerced to ndarray for xfer
x = soft_pmap(lambda x: x)(x) # doesn't crash
self.assertIsInstance(x, pxla.ChunkedDeviceArray)
# check that we don't crash when we can't maintain device persistence
x = onp.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ChunkedDeviceArray)
y = x.reshape(device_count, -1)
self.assertIsInstance(y, xla.DeviceArray) # should have forced collection
soft_pmap(lambda x: x)(y) # doesn't crash
z = x + 2
self.assertIsInstance(z, xla.DeviceArray) # should have forced collection
x._npy_value = onp.float32(onp.nan) # can't be coerced to ndarray for xfer
self.assertRaisesRegex(
RuntimeError,
'.*does not match host shape or layout of computation parameter 0.*',
lambda: x + 2)
# check that different axis merges aren't a problem
x = onp.arange(prod(shape)).reshape(shape)
x = soft_pmap(lambda x: x)(x)
self.assertIsInstance(x, pxla.ChunkedDeviceArray)
x = x.reshape(2 * device_count, 2, 2, 3) # axis merge of the wrong size
self.assertIsInstance(x, xla.DeviceArray) # should have forced collection
@jtu.skip_on_devices("gpu")
def DISABLED_testSoftPmapAllToAll(self):
n = 4 * xla_bridge.device_count()
def f(x):
return lax.all_to_all(x, 'i', 0, 0)
ans = soft_pmap(f, 'i')(np.arange(n ** 2).reshape(n, n))
expected = onp.arange(n ** 2).reshape(n, n).T
self.assertAllClose(ans, expected, check_dtypes=False)
def testShardedDeviceArrayBlockUntilReady(self):
x = onp.arange(xla_bridge.device_count())
x = pmap(lambda x: x)(x)
x.block_until_ready() # doesn't crash
def testJitPmapComposition(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = x - onp.sum(x, 0)
ans = jit(pmap(f, 'i'))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = pmap(jit(f), 'i')(x)
self.assertAllClose(ans, expected, check_dtypes=False)
def testMakeJaxprOfOpenSpmd(self):
f = lambda x: x - lax.psum(x, 'i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
make_jaxpr(f)(x) # doesn't crash
def testCompositionWithJitTwice(self):
@jit
def f(x):
y = 2 * x
@jit
def g(z):
return pmap(lambda x: x * y)(z)
return g(x)
f(onp.arange(1.).reshape((1, 1))) # doesn't crash
def testIssue1065(self):
# from https://github.com/google/jax/issues/1065
device_count = xla_bridge.device_count()
def multi_step_pmap(state, count):
@partial(pmap, axis_name='x')
@jit
def exchange_and_multi_step(state):
return state
@jit
def time_evolution(state):
return lax.fori_loop(0, count, lambda i, s: exchange_and_multi_step(s), state)
return time_evolution(state)
multi_step_pmap(np.zeros((device_count,)), count=1)
def testShardedDeviceArrayGetItem(self):
f = lambda x: 2 * x
f = pmap(f, axis_name='i')
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
y = f(x)
self.assertIsInstance(y, np.ndarray)
self.assertIsInstance(y, pxla.ShardedDeviceArray)
z = y[0] # doesn't crash
self.assertAllClose(z, 2 * x[0], check_dtypes=False)
def testPostProcessMap(self):
# TODO(mattjj): this fails with multiple devices (unless we add a jit)
# because we assume eager ops (like scan here) can't require more than 1
# replica.
raise SkipTest("need eager multi-replica support")
# test came from https://github.com/google/jax/issues/1369
nrep = xla_bridge.device_count()
def pmvm(a, b):
a = a.reshape((nrep, -1, a.shape[1]))
func = pmap(lambda z: np.dot(z, b))
return func(a).reshape(b.shape)
n = nrep * 2
rng = onp.random.RandomState(0)
a = rng.randn(n, n)
b = rng.randn(n)
iters = np.arange(5)
def body(carry, i):
return pmvm(a, carry), i
ans, _ = lax.scan(body, b, iters)
expected = onp.linalg.matrix_power(a, 5).dot(b)
self.assertAllClose(ans, expected, check_dtypes=False)
def testManyArgs(self):
@pmap
def f(args_list):
return sum(args_list)
vals = list(range(500))
ndevices = xla_bridge.device_count()
self.assertAllClose(f(np.array([vals] * ndevices)),
np.array([sum(vals)] * ndevices),
check_dtypes=True)
class PmapWithDevicesTest(jtu.JaxTestCase):
def testAllDevices(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i',
devices=xla_bridge.devices())
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
expected = x - onp.sum(x, 0)
ans = f(x)
self.assertAllClose(ans, expected, check_dtypes=True)
def testOneDevice(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
d0 = xla_bridge.devices()[0]
d1 = xla_bridge.devices()[1]
f = lambda x: np.dot(x, x.T)
f0 = pmap(f, devices=[d0])
f1 = pmap(f, devices=[d1])
x = onp.random.rand(1, 1000, 1000)
r0 = f0(x)
r1 = f1(x)
expected = onp.expand_dims(onp.dot(x.squeeze(), x.squeeze().T), 0)
self.assertAllClose(r0, expected, check_dtypes=True, atol=1e-6, rtol=1e-3)
self.assertAllClose(r1, expected, check_dtypes=True, atol=1e-6, rtol=1e-3)
def testNoDevicesError(self):
f = pmap(lambda x: x - lax.psum(x, 'i'), axis_name='i', devices=[])
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
with self.assertRaisesRegex(
ValueError, "'devices' argument to pmap must be non-empty, or None."):
f(x)
def testBadAxisSizeError(self):
if xla_bridge.device_count() == 1:
raise SkipTest("this test requires multiple devices")
f = pmap(lambda x: lax.psum(x, 'i'), axis_name='i',
devices=xla_bridge.devices())
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=1, "
r"num_local_devices=\d."):
f(np.ones(1))
with self.assertRaisesRegex(
ValueError, r"Leading axis size of input to pmapped function must "
r"equal the number of local devices passed to pmap. Got axis_size=\d, "
r"num_local_devices=\d."):
f(np.ones(xla_bridge.device_count() + 1))
def testNestedPmapsError(self):
# Devices specified in outer pmap
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def foo(x):
@partial(pmap, axis_name='j')
def bar(y):
return lax.psum(y, 'j')
return bar(x)
with self.assertRaisesRegex(
ValueError,
"Nested pmaps with explicit devices argument."):
foo(np.ones((xla_bridge.device_count(), 1)))
# Devices specified in inner pmap
@partial(pmap, axis_name='i')
def foo(x):
@partial(pmap, axis_name='j', devices=xla_bridge.devices())
def bar(y):
return lax.psum(y, 'j')
return bar(x)
with self.assertRaisesRegex(
ValueError,
"Nested pmaps with explicit devices argument."):
foo(np.ones((xla_bridge.device_count(), 1)))
def testJitInPmap(self):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def foo(x):
@jit
def bar(y):
return y + 1
return lax.psum(bar(x), 'i')
ndevices = xla_bridge.device_count()
ans = foo(np.ones((ndevices, 1)))
expected = onp.ones((ndevices, 1), dtype=np.float_) * ndevices * 2
self.assertAllClose(ans, expected, check_dtypes=True)
def testPmapInJit(self):
@jit
def foo(x):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def bar(y):
return lax.psum(y, 'i')
return bar(x)
ndevices = xla_bridge.device_count()
ans = foo(np.ones((ndevices, 1)))
expected = onp.ones((ndevices, 1), dtype=np.float_) * ndevices
self.assertAllClose(ans, expected, check_dtypes=True)
def testGradBasic(self):
@partial(pmap, axis_name='i', devices=xla_bridge.devices())
def f(x):
return np.sin(x)
shape = (xla_bridge.device_count(), 4)
x = onp.arange(prod(shape), dtype=onp.float32).reshape(shape)
ans = grad(lambda x: np.sum(np.sin(x)))(x)
expected = grad(lambda x: np.sum(f(x)))(x)
self.assertAllClose(ans, expected, check_dtypes=False)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/pmap_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import numpy as onp
from absl.testing import absltest
from absl.testing import parameterized
import scipy.ndimage as osp_ndimage
from jax import test_util as jtu
from jax import dtypes
from jax.scipy import ndimage as lsp_ndimage
from jax.config import config
config.parse_flags_with_absl()
float_dtypes = [onp.float32, onp.float64]
complex_dtypes = [onp.complex64, onp.complex128]
inexact_dtypes = float_dtypes + complex_dtypes
int_dtypes = [onp.int32, onp.int64]
bool_dtypes = [onp.bool_]
all_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes
def _fixed_ref_map_coordinates(input, coordinates, order, mode, cval=0.0):
# SciPy's implementation of map_coordinates handles boundaries incorrectly,
# unless mode='reflect'. For order=1, this only affects interpolation outside
# the bounds of the original array.
# https://github.com/scipy/scipy/issues/2640
assert order <= 1
padding = [(max(-onp.floor(c.min()).astype(int) + 1, 0),
max(onp.ceil(c.max()).astype(int) + 1 - size, 0))
for c, size in zip(coordinates, input.shape)]
shifted_coords = [c + p[0] for p, c in zip(padding, coordinates)]
pad_mode = {
'nearest': 'edge', 'mirror': 'reflect', 'reflect': 'symmetric'
}.get(mode, mode)
if mode == 'constant':
padded = onp.pad(input, padding, mode=pad_mode, constant_values=cval)
else:
padded = onp.pad(input, padding, mode=pad_mode)
dtype = onp.result_type(padded, *shifted_coords)
result = osp_ndimage.map_coordinates(
padded, shifted_coords, order=order, mode=mode, cval=cval)
return result
class NdimageTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_coordinates={}_order={}_mode={}_cval={}_impl={}_round={}".format(
jtu.format_shape_dtype_string(shape, dtype),
jtu.format_shape_dtype_string(coords_shape, coords_dtype),
order, mode, cval, impl, round_),
"rng_factory": rng_factory, "shape": shape,
"coords_shape": coords_shape, "dtype": dtype,
"coords_dtype": coords_dtype, "order": order, "mode": mode,
"cval": cval, "impl": impl, "round_": round_}
for shape in [(5,), (3, 4), (3, 4, 5)]
for coords_shape in [(7,), (2, 3, 4)]
for dtype in float_dtypes
for coords_dtype in float_dtypes
for order in [0, 1]
for mode in ['wrap', 'constant', 'nearest']
for cval in ([0, -1] if mode == 'constant' else [0])
for impl, rng_factory in [
("original", partial(jtu.rand_uniform, 0, 1)),
("fixed", partial(jtu.rand_uniform, -0.75, 1.75)),
]
for round_ in [True, False]))
def testMapCoordinates(self, shape, dtype, coords_shape, coords_dtype, order,
mode, cval, impl, round_, rng_factory):
def args_maker():
x = onp.arange(onp.prod(shape), dtype=dtype).reshape(shape)
coords = [(size - 1) * rng(coords_shape, coords_dtype) for size in shape]
if round_:
coords = [c.round().astype(int) for c in coords]
return x, coords
rng = rng_factory()
lsp_op = lambda x, c: lsp_ndimage.map_coordinates(
x, c, order=order, mode=mode, cval=cval)
impl_fun = (osp_ndimage.map_coordinates if impl == "original"
else _fixed_ref_map_coordinates)
osp_op = lambda x, c: impl_fun(x, c, order=order, mode=mode, cval=cval)
epsilon = max([dtypes.finfo(dtypes.canonicalize_dtype(d)).eps
for d in [dtype, coords_dtype]])
self._CheckAgainstNumpy(lsp_op, osp_op, args_maker, tol=10*epsilon,
check_dtypes=True)
def testMapCoordinatesErrors(self):
x = onp.arange(5.0)
c = [onp.linspace(0, 5, num=3)]
with self.assertRaisesRegex(NotImplementedError, 'requires order<=1'):
lsp_ndimage.map_coordinates(x, c, order=2)
with self.assertRaisesRegex(
NotImplementedError, 'does not yet support mode'):
lsp_ndimage.map_coordinates(x, c, order=1, mode='reflect')
with self.assertRaisesRegex(ValueError, 'sequence of length'):
lsp_ndimage.map_coordinates(x, [c, c], order=1)
def testMapCoordinateDocstring(self):
self.assertIn("Only linear interpolation",
lsp_ndimage.map_coordinates.__doc__)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/scipy_ndimage_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for Vectorize library."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from jax import numpy as np
from jax import test_util as jtu
from jax import random
from jax.experimental.vectorize import vectorize
from jax.config import config
config.parse_flags_with_absl()
matmat = vectorize('(n,m),(m,k)->(n,k)')(np.dot)
matvec = vectorize('(n,m),(m)->(n)')(np.dot)
vecmat = vectorize('(m),(m,k)->(k)')(np.dot)
vecvec = vectorize('(m),(m)->()')(np.dot)
@vectorize('(n)->()')
def magnitude(x):
return np.dot(x, x)
mean = vectorize('(n)->()')(np.mean)
@vectorize('()->(n)')
def stack_plus_minus(x):
return np.stack([x, -x])
@vectorize('(n)->(),(n)')
def center(array):
bias = np.mean(array)
debiased = array - bias
return bias, debiased
class VectorizeTest(jtu.JaxTestCase):
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_leftshape={}_rightshape={}".format(left_shape, right_shape),
"left_shape": left_shape, "right_shape": right_shape, "result_shape": result_shape}
for left_shape, right_shape, result_shape in [
((2, 3), (3, 4), (2, 4)),
((2, 3), (1, 3, 4), (1, 2, 4)),
((5, 2, 3), (1, 3, 4), (5, 2, 4)),
((6, 5, 2, 3), (3, 4), (6, 5, 2, 4)),
]))
def test_matmat(self, left_shape, right_shape, result_shape):
self.assertEqual(matmat(np.zeros(left_shape),
np.zeros(right_shape)).shape, result_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_leftshape={}_rightshape={}".format(left_shape, right_shape),
"left_shape": left_shape, "right_shape": right_shape, "result_shape": result_shape}
for left_shape, right_shape, result_shape in [
((2, 3), (3,), (2,)),
((2, 3), (1, 3), (1, 2)),
((4, 2, 3), (1, 3), (4, 2)),
((5, 4, 2, 3), (1, 3), (5, 4, 2)),
]))
def test_matvec(self, left_shape, right_shape, result_shape):
self.assertEqual(matvec(np.zeros(left_shape),
np.zeros(right_shape)).shape, result_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_leftshape={}_rightshape={}".format(left_shape, right_shape),
"left_shape": left_shape, "right_shape": right_shape, "result_shape": result_shape}
for left_shape, right_shape, result_shape in [
((3,), (3,), ()),
((2, 3), (3,), (2,)),
((4, 2, 3), (3,), (4, 2)),
]))
def test_vecvec(self, left_shape, right_shape, result_shape):
self.assertEqual(vecvec(np.zeros(left_shape),
np.zeros(right_shape)).shape, result_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(shape),
"shape": shape, "result_shape": result_shape}
for shape, result_shape in [
((3,), ()),
((2, 3,), (2,)),
((1, 2, 3,), (1, 2)),
]))
def test_magnitude(self, shape, result_shape):
size = 1
for x in shape:
size *= x
self.assertEqual(magnitude(np.arange(size).reshape(shape)).shape, result_shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(shape),
"shape": shape, "result_shape": result_shape}
for shape, result_shape in [
((3,), ()),
((2, 3), (2,)),
((1, 2, 3, 4), (1, 2, 3)),
]))
def test_mean(self, shape, result_shape):
self.assertEqual(mean(np.zeros(shape)).shape, result_shape)
def test_mean_axis(self):
self.assertEqual(mean(np.zeros((2, 3)), axis=0).shape, (3,))
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}".format(shape),
"shape": shape, "result_shape": result_shape}
for shape, result_shape in [
((), (2,)),
((3,), (3,2,)),
]))
def test_stack_plus_minus(self, shape, result_shape):
self.assertEqual(stack_plus_minus(np.zeros(shape)).shape, result_shape)
def test_center(self):
b, a = center(np.arange(3))
self.assertEqual(a.shape, (3,))
self.assertEqual(b.shape, ())
self.assertAllClose(1.0, b, False)
X = np.arange(12).reshape((3, 4))
b, a = center(X, axis=1)
self.assertEqual(a.shape, (3, 4))
self.assertEqual(b.shape, (3,))
self.assertAllClose(np.mean(X, axis=1), b, True)
b, a = center(X, axis=0)
self.assertEqual(a.shape, (3, 4))
self.assertEqual(b.shape, (4,))
self.assertAllClose(np.mean(X, axis=0), b, True)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/vectorize_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the optix module."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from jax import numpy as jnp
from jax.experimental import optimizers
from jax.experimental import optix
import jax.test_util # imported only for flags
from jax.tree_util import tree_leaves
import numpy as onp
from jax.config import config
config.parse_flags_with_absl()
STEPS = 50
LR = 1e-2
class OptixTest(absltest.TestCase):
def setUp(self):
super(OptixTest, self).setUp()
self.init_params = (jnp.array([1., 2.]), jnp.array([3., 4.]))
self.per_step_updates = (jnp.array([500., 5.]), jnp.array([300., 3.]))
def test_sgd(self):
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.sgd(LR)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
sgd = optix.sgd(LR, 0.0)
state = sgd.init(optix_params)
for _ in range(STEPS):
updates, state = sgd.update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-5)
def test_adam(self):
b1, b2, eps = 0.9, 0.999, 1e-8
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.adam(LR, b1, b2, eps)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
adam = optix.adam(LR, b1, b2, eps)
state = adam.init(optix_params)
for _ in range(STEPS):
updates, state = adam.update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-4)
def test_rmsprop(self):
decay, eps = .9, 0.1
# experimental/optimizers.py
jax_params = self.init_params
opt_init, opt_update, get_params = optimizers.rmsprop(LR, decay, eps)
state = opt_init(jax_params)
for i in range(STEPS):
state = opt_update(i, self.per_step_updates, state)
jax_params = get_params(state)
# experimental/optix.py
optix_params = self.init_params
rmsprop = optix.rmsprop(LR, decay, eps)
state = rmsprop.init(optix_params)
for _ in range(STEPS):
updates, state = rmsprop.update(self.per_step_updates, state)
optix_params = optix.apply_updates(optix_params, updates)
# Check equivalence.
for x, y in zip(tree_leaves(jax_params), tree_leaves(optix_params)):
onp.testing.assert_allclose(x, y, rtol=1e-5)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/optix_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
from functools import partial
import unittest
import warnings
import weakref
from absl import logging
from absl.testing import absltest
import numpy as onp
import six
if six.PY3:
import concurrent.futures
import jax
import jax.numpy as np
from jax import jit, grad, device_put, jacfwd, jacrev, hessian
from jax import api, lax
from jax.core import Primitive
from jax.interpreters import ad
from jax.interpreters import xla
from jax.abstract_arrays import concretization_err_msg
from jax.lib import xla_bridge as xb
from jax import test_util as jtu
from jax import tree_util
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class APITest(jtu.JaxTestCase):
def test_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
assert grad(f)(1.0, 1.0, 1.0, flag=True) == 1.0
assert grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == 2.0
assert grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (3.0, 1.0)
def test_value_and_grad_argnums(self):
def f(x, y, z, flag=False):
assert flag
return 1.0 * x + 2.0 * y + 3.0 * z
y = f(1.0, 1.0, 1.0, flag=True)
assert api.value_and_grad(f)(1.0, 1.0, 1.0, flag=True) == (y, 1.0)
assert api.value_and_grad(f, argnums=1)(1.0, 1.0, 1.0, flag=True) == (y, 2.0)
assert api.value_and_grad(f, argnums=(2, 0))(1.0, 1.0, 1.0, flag=True) == (y, (3.0, 1.0))
def test_jit_static_args(self):
side = []
def f(x, y, z, flag=False, flag2=False):
assert flag
side.append(None)
return 100*x + 10*y + z
f1 = jit(f, static_argnums=(3, 4))
assert f1(1, 2, 3, True, False) == 123
assert len(side) == 1
assert f1(2, 1, 3, True, False) == 213
assert len(side) == 1
assert f1(2, 1, 3, True, True) == 213
assert len(side) == 2
side[:] = []
f2 = jit(f, static_argnums=(0, 2, 3, 4))
assert f2(1, 2, 3, True, False) == 123
assert len(side) == 1
assert f2(1, 3, 3, True, False) == 133
assert len(side) == 1
assert f2(2, 2, 3, True, False) == 223
assert len(side) == 2
assert f2(2, 4, 3, True, False) == 243
assert len(side) == 2
assert f2(2, 4, 3, True, True) == 243
assert len(side) == 3
assert f2(2, 5, 3, True, True) == 253
assert len(side) == 3
def test_jit_kwargs(self):
side = []
def f(x, y, z):
side.append(None)
return 100*x + 10*y + z
f = jit(f)
assert f(1, 2, 3) == 123
assert len(side) == 1
assert f(1, 2, 3) == 123
assert len(side) == 1
assert f(1, 2, z=3) == 123
assert len(side) == 2 # actually recompiles from kwarg
assert f(1, 2, z=3) == 123
assert len(side) == 2 # but should still cache
f(1, 2, z=onp.zeros(3)) # doesn't crash
def test_jit_many_args_tuples(self):
@jit
def f(args_list):
return sum(args_list)
make_tuple = xla.make_tuple
counts = [0]
def make_tuple_and_count(*args, **kwargs):
counts[0] += 1
return make_tuple(*args, **kwargs)
try:
xla.make_tuple = make_tuple_and_count
ans = f(list(range(500)))
finally:
xla.make_tuple = make_tuple
expected = sum(range(500))
self.assertEqual(counts[0], 1) # formed a tuple on dispatch
self.assertEqual(ans, expected) # computed the correct result
def test_grad_of_jit(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
assert grad(f)(1.0) == 2.0
assert len(side) == 1
assert grad(f)(2.0) == 4.0
assert len(side) == 1
def test_jit_of_grad(self):
side = []
@jit
def f(x):
side.append(None)
return x * x
g = jit(grad(f))
assert g(1.0) == 2.0
assert len(side) == 1
assert g(2.0) == 4.0
assert len(side) == 1
def test_bad_input(self):
def f(x):
return x
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: grad(f)("foo"))
self.assertRaisesRegex(
TypeError, ".* 'foo' of type <.*'str'> is not a valid JAX type",
lambda: jit(f)("foo"))
def test_grad_tuple_output(self):
jtu.check_raises(lambda: grad(lambda x: (x,x))(1.0), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_unit_output(self):
jtu.check_raises(lambda: grad(lambda x: ())(onp.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_grad_nonscalar_output(self):
jtu.check_raises(lambda: grad(lambda x: x)(onp.zeros(3)), TypeError,
"Gradient only defined for scalar-output functions. ")
def test_unwrapped_numpy(self):
def f(x):
return onp.exp(x)
jtu.check_raises(lambda: grad(f)(onp.zeros(3)), Exception,
"Tracer can't be used with raw numpy functions. "
"You might have\n import numpy as np\ninstead of\n"
" import jax.numpy as np")
def test_binop_mismatch(self):
def f(x, y):
return x + y
jtu.check_raises(
lambda: f(np.zeros(3), np.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
jtu.check_raises(
lambda: grad(f)(onp.zeros(3), onp.zeros(4)),
TypeError,
"add got incompatible shapes for broadcasting: (3,), (4,).")
def test_dot_mismatch(self):
def f(x, y):
return np.dot(x, y)
self.assertRaisesRegex(
TypeError, "Incompatible shapes for dot: got \\(3L?,\\) and \\(4L?,\\).",
lambda: grad(f)(onp.zeros(3), onp.zeros(4)))
def test_switch_value_jit(self):
def f(x):
y = x > 0
if y:
return x
else:
return -x
assert grad(f)(1.0) == 1.0
assert grad(f)(-1.0) == -1.0
jtu.check_raises(lambda: jit(f)(1), TypeError, concretization_err_msg(bool))
def test_range_err(self):
def f(x, n):
for i in range(n):
x = x + i
return x
assert jit(f, static_argnums=(1,))(0, 5) == 10
self.assertRaisesRegex(
TypeError,
"('JaxprTracer' object cannot be interpreted as an integer"
"|Abstract value passed to .*)",
lambda: jit(f)(0, 5))
def test_casts(self):
for castfun in [float, complex, hex, oct] + list(six.integer_types):
f = lambda x: castfun(x)
self.assertRaisesRegex(
TypeError,
"('JaxprTracer' object cannot be interpreted as an integer"
"|Abstract value passed to .*)", lambda: jit(f)(0))
def test_unimplemented_interpreter_rules(self):
foo_p = Primitive('foo')
def foo(x):
return foo_p.bind(x)
jtu.check_raises(lambda: foo(1.0), NotImplementedError,
"Evaluation rule for 'foo' not implemented")
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"Abstract evaluation for 'foo' not implemented")
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Forward-mode differentiation rule for 'foo' not implemented")
foo_p.def_abstract_eval(lambda x: x)
jtu.check_raises(lambda: jit(foo)(1.0), NotImplementedError,
"XLA translation rule for primitive 'foo' not found")
foo_p.def_impl(lambda x: x)
ad.defjvp(foo_p, lambda g, x: foo(g))
jtu.check_raises(lambda: grad(foo)(1.0), NotImplementedError,
"Reverse-mode differentiation rule for 'foo' not implemented")
def test_device_put_and_get(self):
x = onp.arange(12.).reshape((3, 4)).astype("float32")
dx = api.device_put(x)
self.assertIsInstance(dx, xla.DeviceArray)
x2 = api.device_get(dx)
self.assertIsInstance(x2, onp.ndarray)
assert onp.all(x == x2)
y = [x, (2 * x, 3 * x)]
dy = api.device_put(y)
y2 = api.device_get(dy)
self.assertIsInstance(y2, list)
self.assertIsInstance(y2[0], onp.ndarray)
assert onp.all(y2[0] == x)
self.assertIsInstance(y2[1], tuple)
self.assertIsInstance(y2[1][0], onp.ndarray)
assert onp.all(y2[1][0] == 2 * x)
self.assertIsInstance(y2[1][1], onp.ndarray)
assert onp.all(y2[1][1] == 3 * x)
def test_device_put_across_devices(self):
if xb.device_count() == 1:
raise unittest.SkipTest("this test requires multiple devices")
d1, d2 = xb.local_devices()[:2]
x = api.device_put(onp.array([1,2,3]), device=d1)
self.assertEqual(x.device_buffer.device(), d1)
y = api.device_put(x, device=d2)
self.assertEqual(y.device_buffer.device(), d2)
# Make sure these don't crash
api.device_put(x)
api.device_put(y)
@jtu.skip_on_devices("cpu")
def test_device_put_across_platforms(self):
default_device = jax.devices()[0]
cpu_device = jax.devices("cpu")[0]
onp_arr = onp.array([1,2,3])
scalar = 1
device_arr = np.array([1,2,3])
assert device_arr.device_buffer.device() is default_device
for val in [onp_arr, device_arr, scalar]:
x = api.device_put(val, device=cpu_device)
self.assertEqual(x.device_buffer.device(), cpu_device)
y = api.device_put(x)
self.assertEqual(y.device_buffer.device(), default_device)
@jtu.skip_on_devices("tpu")
def test_jacobian(self):
R = onp.random.RandomState(0).randn
A = R(4, 3)
x = R(3)
f = lambda x: np.dot(A, x)
assert onp.allclose(jacfwd(f)(x), A)
assert onp.allclose(jacrev(f)(x), A)
f = lambda x: np.tanh(np.dot(A, x))
assert onp.allclose(jacfwd(f)(x), jacrev(f)(x))
@jtu.skip_on_devices("tpu")
def test_hessian(self):
R = onp.random.RandomState(0).randn
A = R(4, 4)
x = R(4)
f = lambda x: np.dot(x, np.dot(A, x))
assert onp.allclose(hessian(f)(x), A + A.T)
def test_std_basis(self):
basis = api._std_basis(np.zeros(3))
assert getattr(basis, "shape", None) == (3, 3)
assert onp.allclose(basis, onp.eye(3))
basis = api._std_basis(np.zeros((3, 3)))
assert getattr(basis, "shape", None) == (9, 3, 3)
assert onp.allclose(basis, onp.eye(9).reshape(9, 3, 3))
basis = api._std_basis([0., (np.zeros(3), np.zeros((3, 4)))])
assert isinstance(basis, list) and len(basis) == 2
assert getattr(basis[0], "shape", None) == (16,)
assert isinstance(basis[1], tuple) and len(basis[1]) == 2
assert getattr(basis[1][0], "shape", None) == (16, 3)
assert getattr(basis[1][1], "shape", None) == (16, 3, 4)
@jtu.skip_on_devices("tpu")
def test_jacobian_on_pytrees(self):
for jacfun in [jacfwd, jacrev]:
ans = jacfun(lambda x, y: (x, y))(0., 1.)
expected = (1., 0.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), 1)(0., 1.)
expected = (0., 1.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x, y: (x, y), (0, 1))(0., 1.)
expected = ((1., 0.),
(0., 1.),)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = jacfun(lambda x: x[:2])((1., 2., 3.))
expected = ((1., 0., 0.),
(0., 1., 0.))
self.assertAllClose(ans, expected, check_dtypes=False)
R = onp.random.RandomState(0).randn
x = R(2)
y = R(3)
ans = jacfun(lambda x, y: {'x': x, 'xy': np.outer(x, y)})(x, y)
expected = {'x': onp.eye(2),
'xy': onp.kron(onp.eye(2), y[:, None]).reshape(2, 3, 2)}
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_hessian_on_pytrees(self):
ans = hessian(lambda x: np.array(x)**2)((1., 2.))
expected = ((onp.array([2., 0.]), onp.array([0., 0.])),
(onp.array([0., 0.]), onp.array([0., 2.])))
self.assertAllClose(ans, expected, check_dtypes=False)
@jtu.skip_on_devices("tpu")
def test_issue1372(self):
def quad(x):
return np.dot(x, x)
def f(x, u):
return quad(x) + quad(u)
x, u = np.ones(5), np.ones(2)
rev = jacrev
fwd = jacfwd
# Diagonal entries
self.assertEqual(rev(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(rev(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(fwd(fwd(f, 0), 0)(x, u).shape, (5, 5))
self.assertEqual(rev(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(rev(fwd(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(rev(f, 1), 1)(x, u).shape, (2, 2))
self.assertEqual(fwd(fwd(f, 1), 1)(x, u).shape, (2, 2))
# Off-diagonal entries by reverse-mode on the outside
self.assertEqual(rev(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(rev(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(rev(fwd(f, 0), 1)(x, u).shape, (5, 2))
# Off-diagonal entries by forward-mode on the outside
self.assertEqual(fwd(rev(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(fwd(f, 1), 0)(x, u).shape, (2, 5))
self.assertEqual(fwd(rev(f, 0), 1)(x, u).shape, (5, 2))
self.assertEqual(fwd(fwd(f, 0), 1)(x, u).shape, (5, 2))
def test_disable_jit(self):
effects = []
@api.jit
def f(x):
effects.append(1)
return x
with api.disable_jit():
f(2)
f(2)
assert len(effects) == 2
f(2)
f(2)
assert len(effects) == 3
def test_large_device_constant(self):
ans = jit(lambda x: 2 * x)(np.ones(int(2e6))) # doesn't crash
self.assertAllClose(ans, onp.ones(int(2e6)) * 2., check_dtypes=False)
def test_grad_and_aux_basic(self):
g, aux = grad(lambda x: (x**3, [x**2]), has_aux=True)(3.)
self.assertAllClose(g, grad(lambda x: x**3)(3.), check_dtypes=True)
self.assertAllClose(aux, [9.], check_dtypes=False)
def test_grad_and_aux_nested(self):
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0]
f2 = lambda x: x**3
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def f(x):
g, aux = grad(lambda x: (x**3, [x**3]), has_aux=True)(x)
return aux[0] * np.sin(x)
f2 = lambda x: x**3 * np.sin(x)
self.assertEqual(grad(f)(4.), grad(f2)(4.))
self.assertEqual(jit(grad(f))(4.), grad(f2)(4.))
self.assertEqual(jit(grad(jit(f)))(4.), grad(f2)(4.))
def test_grad_and_aux_constant(self):
g, aux = grad(lambda x: (x**3, [4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.])
g, aux = grad(lambda x: (x**3, [x**2, 4.]), has_aux=True)(4.)
self.assertEqual(g, grad(lambda x: x**3)(4.))
self.assertEqual(aux, [4.**2, 4.])
def test_jvp_mismatched_arguments(self):
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (onp.float32(2),), ()))
# If primals and tangents must both be tuples or both lists
self.assertRaisesRegex(
TypeError,
("primal and tangent arguments to jax.jvp must have the same tree "
"structure"),
lambda: api.jvp(lambda x, y: x * y, (onp.float32(2),), [onp.float32(2)]))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must have equal types",
lambda: api.jvp(lambda x: -x, (onp.float16(2),), (onp.float32(4),)))
def test_jvp_non_tuple_arguments(self):
def f(x, y): return x + y
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found float and tuple.",
lambda: partial(api.jvp(f, 0., (1.,))))
self.assertRaisesRegex(
TypeError,
"primal and tangent arguments to jax.jvp must be tuples or lists; found tuple and ndarray.",
lambda: partial(api.jvp(f, (0.,), onp.array([1., 2.]))))
def test_vjp_mismatched_arguments(self):
_, pullback = api.vjp(lambda x, y: x * y, onp.float32(3), onp.float32(4))
self.assertRaisesRegex(
TypeError,
"Tree structure of cotangent input.*does not match",
lambda: pullback((onp.float32(7), onp.float32(100))))
self.assertRaisesRegex(
TypeError,
"Type of cotangent input to vjp pullback.*does not match type",
lambda: pullback((onp.float16(42))))
def test_jarrett_jvps(self):
def f1(x):
return np.sin(np.sin(np.sin(x)))
f2 = api.jarrett(f1)
for x in [3., onp.array([2., 3., 4.])]:
self.assertAllClose(f1(x), f2(x), check_dtypes=True)
_, f1_vjp = api.vjp(f1, x)
_, f2_vjp = api.vjp(f2, x)
self.assertAllClose(f1_vjp(x), f2_vjp(x), check_dtypes=True)
# TODO(mattjj): test that constants/literals are set up properly
# jaxpr2 = api.make_jaxpr(f2_vjp)(x)
# assert len(jaxpr2.constvars) == 1
def test_jarrett_jvps2(self):
def f1(x, y):
return np.sin(x) * np.cos(y) * np.sin(x) * np.cos(y)
f2 = api.jarrett(f1)
# TODO(mattjj): doesn't work for (3., onp.array([4., 5.]))
for x, y in [(3., 4.), (onp.array([5., 6.]), onp.array([7., 8.]))]:
self.assertAllClose(f1(x, y), f2(x, y), check_dtypes=True)
_, f1_vjp = api.vjp(f1, x, y)
_, f2_vjp = api.vjp(f2, x, y)
self.assertAllClose(f1_vjp(y), f2_vjp(y), check_dtypes=True)
# TODO(mattjj): test that constants/literals are set up properly
# jaxpr2 = api.make_jaxpr(f2_vjp)(y)
# assert len(jaxpr2.constvars) == 2
def test_jvp_jit_cached(self):
"""Bug in caching in presence of JVP and JIT."""
def func(x):
def inner(y):
return y * x
# Must have two calls to the inner jit (the second one hits the cache)
res1 = api.jit(inner)(4.)
res2 = api.jit(inner)(5.)
return res1 + res2
self.assertAllClose((45., 9.), api.jvp(func, (5.,), (1.,)), check_dtypes=True)
def test_complex_grad_raises_error(self):
self.assertRaises(TypeError, lambda: grad(lambda x: np.sin(x))(1 + 2j))
def test_holomorphic_grad(self):
out = grad(lambda x: np.sin(x), holomorphic=True)(1 + 2j)
expected = 2.0327230070196656 - 3.0518977991518j
self.assertAllClose(out, expected, check_dtypes=False)
def test_nonholomorphic_grad(self):
zs = 0.5j * onp.arange(5) + onp.arange(5)
def f(z):
return np.sum(np.cos(np.abs(z)))
ans = grad(f)(zs)
expected = onp.array([ 0. +0.j,
-0.80430663+0.40215331j,
-0.70368982+0.35184491j,
0.1886467 -0.09432335j,
0.86873727-0.43436864j])
self.assertAllClose(ans, expected, check_dtypes=False,
atol=jtu.default_gradient_tolerance,
rtol=jtu.default_gradient_tolerance)
def test_complex_output_jacrev_raises_error(self):
self.assertRaises(TypeError, lambda: jacrev(lambda x: np.sin(x))(1 + 2j))
def test_nonholomorphic_jacrev(self):
# code based on https://github.com/google/jax/issues/603
zs = 0.5j * onp.arange(5) + onp.arange(5)
def f(z):
return np.cos(np.linalg.norm(2 * z))
ans = jacrev(f)(zs)
expected = grad(f)(zs)
self.assertAllClose(ans, expected, check_dtypes=True)
def test_complex_input_jacfwd_raises_error(self):
self.assertRaises(TypeError, lambda: jacfwd(lambda x: np.sin(x))(1 + 2j))
def test_defvjp_all(self):
foo_p = Primitive('foo')
def foo(x): return 2. * foo_p.bind(x)
ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (4 * g * np.sin(x),)))
val_ans, grad_ans = api.value_and_grad(foo)(3.)
self.assertAllClose(val_ans, 2 * 3.**2, check_dtypes=False)
self.assertAllClose(grad_ans, 4 * 2 * onp.sin(3.), check_dtypes=False)
def test_defvjp_all_const(self):
foo_p = Primitive('foo')
def foo(x): return foo_p.bind(x)
ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (12.,)))
val_ans, grad_ans = api.value_and_grad(foo)(3.)
self.assertAllClose(val_ans, 9., check_dtypes=False)
self.assertAllClose(grad_ans, 12., check_dtypes=True)
def test_defvjp_all_higher_order_revmode(self):
foo_p = Primitive('foo')
def foo(x): return 2. * foo_p.bind(x)
ad.defvjp_all(foo_p, lambda x: (x**2, lambda g: (g * x ** 2,)))
ans = api.grad(api.grad(foo))(3.)
self.assertAllClose(ans, 2 * 2 * 3., check_dtypes=False)
def test_defvjp_all_multiple_arguments(self):
# also tests passing in symbolic zero tangents b/c we differentiate wrt only
# the first argument in one case
foo_p = Primitive('foo')
def foo(x, y): return foo_p.bind(x, y)
def vjpfun(x, y):
out = x**2 + y**3
vjp = lambda g: (g + x + y, g * x * 9.)
return out, vjp
ad.defvjp_all(foo_p, vjpfun)
val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)
self.assertAllClose(val_ans, 3.**2 + 4.**3, check_dtypes=False)
self.assertAllClose(grad_ans, 1. + 3. + 4., check_dtypes=False)
ans = api.grad(foo, (0, 1))(3., 4.)
self.assertAllClose(ans, (1. + 3. + 4., 1. * 3. * 9.), check_dtypes=False)
def test_defvjp_all(self):
@api.custom_transforms
def foo(x):
return np.sin(x)
api.defvjp_all(foo, lambda x: (np.sin(x), lambda g: (g * x,)))
val_ans, grad_ans = api.value_and_grad(foo)(3.)
self.assertAllClose(val_ans, onp.sin(3.), check_dtypes=False)
self.assertAllClose(grad_ans, 3., check_dtypes=False)
# TODO(mattjj): add defvjp_all test with pytree arguments
def test_defvjp(self):
@api.custom_transforms
def foo(x, y):
return np.sin(x * y)
api.defvjp(foo, None, lambda g, _, x, y: g * x * y)
val_ans, grad_ans = api.value_and_grad(foo)(3., 4.)
self.assertAllClose(val_ans, onp.sin(3. * 4.), check_dtypes=False)
self.assertAllClose(grad_ans, 0., check_dtypes=False)
ans_0, ans_1 = api.grad(foo, (0, 1))(3., 4.)
self.assertAllClose(ans_0, 0., check_dtypes=False)
self.assertAllClose(ans_1, 3. * 4., check_dtypes=False)
def test_defvjp_higher_order(self):
@api.custom_transforms
def foo(x):
return np.sin(2. * x)
api.defvjp(foo, lambda g, _, x: g * np.cos(x))
ans = api.grad(api.grad(foo))(2.)
expected = api.grad(api.grad(np.sin))(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_defvjp_use_ans(self):
@api.custom_transforms
def foo(x, y):
return np.sin(x * y)
api.defvjp(foo, None, lambda g, ans, x, y: g * x * y + np.cos(ans))
val_ans, grad_ans = api.value_and_grad(foo, 1)(3., 4.)
self.assertAllClose(val_ans, onp.sin(3. * 4.), check_dtypes=False)
self.assertAllClose(grad_ans, 3. * 4. + onp.cos(onp.sin(3. * 4)),
check_dtypes=False)
# TODO
# def test_defjvp_closure_error(self):
# def foo(x):
# @api.custom_transforms
# def bar(y):
# return x * y
# api.defjvp(bar, lambda y_dot, ans, y: x * y)
# return bar(x)
# jtu.check_raises(
# lambda: api.jvp(foo, (1.,), (1.,)), ValueError,
# "Detected differentiation with respect to closed-over values with "
# "custom JVP rule, which isn't supported.")
# TODO
# def test_defvjp_closure_error(self):
# def foo(x):
# @api.custom_transforms
# def bar(y):
# return x * y
# api.defvjp(bar, lambda g, ans, y: x * y)
# return bar(x)
# jtu.check_raises(
# lambda: grad(foo)(1.,), ValueError,
# "Detected differentiation w.r.t. variables from outside "
# "the scope of <jax.custom_transforms function bar>, but defvjp and "
# "defvjp_all only support differentiation w.r.t. positional arguments.")
def test_custom_transforms_eval_with_pytrees(self):
@api.custom_transforms
def f(x):
a, b = x[0], x[1]
return {'hi': 2 * a, 'bye': 2 * b}
ans = f((1, 2))
self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
def test_custom_transforms_jit_with_pytrees(self):
@api.custom_transforms
def f(x):
a, b = x[0], x[1]
return {'hi': 2 * a, 'bye': 2 * b}
ans = jit(f)((1, 2))
self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
def test_custom_transforms_jit_with_pytrees_consts(self):
# The purpose of this test is to exercise the custom_transforms default
# translation rule in how it deals with constants that are too large to be
# treated as literals (at the time of writing).
z = onp.arange(10.)
@api.custom_transforms
def f(x):
a, b = x[0], x[1]
return {'hi': 2 * a, 'bye': z * b}
ans = jit(f)((1, 2))
self.assertAllClose(ans, {'hi': 2 * 1, 'bye': z * 2}, check_dtypes=False)
def test_custom_transforms_jvp_with_pytrees(self):
@api.custom_transforms
def f(x):
a, b = x[0], x[1]
return {'hi': 2 * a, 'bye': 2 * b}
ans, out_tangent = api.jvp(f, ((1, 2),), ((3, 4),))
self.assertEqual(ans, {'hi': 2 * 1, 'bye': 2 * 2})
self.assertEqual(out_tangent, {'hi': 2 * 3, 'bye': 2 * 4})
def test_custom_transforms_vmap_with_pytrees(self):
@api.custom_transforms
def f(x):
a, b = x[0], x[1]
return {'hi': 2 * a, 'bye': 2 * b}
ans = api.vmap(f)((onp.arange(3), onp.ones((3, 2))))
expected = {'hi': 2 * onp.arange(3), 'bye': 2 * onp.ones((3, 2))}
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_transforms_jvp_with_closure(self):
def f(x):
@api.custom_transforms
def g(y):
return x * y
return g(x)
ans = api.grad(f)(1.)
expected = 2.
self.assertAllClose(ans, expected, check_dtypes=False)
def test_custom_gradient(self):
@api.custom_gradient
def f(x):
return x ** 2, lambda g: (g * x,)
self.assertAllClose(f(3.), 9., check_dtypes=False)
self.assertAllClose(api.grad(f)(3.), 3., check_dtypes=False)
def test_legacy_devicearray_repr(self):
dx = device_put(3.)
str(dx.item()) # doesn't crash
def test_devicearray_repr(self):
x = device_put(np.zeros(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x) # doesn't crash
x = device_put(np.ones(3) + 1j * np.ones(3))
self.assertIsInstance(x, xla.DeviceArray)
repr(x) # doesn't crash
def test_devicearray_delete(self):
x = device_put(1.)
x.delete()
self.assertRaisesRegex(ValueError, "DeviceValue has been deleted.",
lambda: repr(x))
def test_devicearray_block_until_ready(self):
x = device_put(1.)
y = x.block_until_ready()
# Tests mostly that block_until_ready() does not produce an error.
self.assertTrue(y is x)
def test_namedtuple_transparency(self):
# See https://github.com/google/jax/issues/446
Point = collections.namedtuple("Point", ["x", "y"])
def f(pt):
return np.sqrt(pt.x ** 2 + pt.y ** 2)
pt = Point(1., 2.)
f(pt) # doesn't crash
g = api.grad(f)(pt)
self.assertIsInstance(g, Point)
f_jit = api.jit(f)
self.assertAllClose(f(pt), f_jit(pt), check_dtypes=False)
def test_namedtuple_subclass_transparency(self):
# See https://github.com/google/jax/issues/806
Point = collections.namedtuple("Point", ["x", "y"])
class ZeroPoint(Point):
def is_zero(self):
return (self.x == 0) and (self.y == 0)
pt = ZeroPoint(0., 0.)
def f(pt):
return 0. if pt.is_zero() else np.sqrt(pt.x ** 2 + pt.y ** 2)
f(pt) # doesn't crash
g = api.grad(f)(pt)
self.assertIsInstance(pt, ZeroPoint)
def test_eval_shape(self):
def fun(x, y):
return np.tanh(np.dot(x, y) + 3.)
x = np.ones((2, 3))
y = np.ones((3, 4))
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_constants(self):
def fun():
x = np.ones((2, 3))
y = np.ones((3, 4))
return np.tanh(np.dot(x, y) + 3.)
out_shape = api.eval_shape(fun)
self.assertEqual(out_shape.shape, (2, 4))
def test_eval_shape_tuple_unpacking(self):
def fun(x, y):
a, b = x
return a + b + y
x = (np.ones(2), np.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_tuple_itemgetting(self):
def fun(x, y):
return x[0] + x[1] + y
x = (np.ones(2), np.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
self.assertEqual(out_shape.shape, (2,))
def test_eval_shape_output_dict(self):
def fun(x, y):
return {'hi': x[0] + x[1] + y}
x = (np.ones(2), np.ones(2))
y = 3.
out_shape = api.eval_shape(fun, x, y)
out_shape = tree_util.tree_map(onp.shape, out_shape)
self.assertEqual(out_shape, {'hi': (2,)})
def test_eval_shape_shape_error(self):
def fun(x, y):
return np.tanh(np.dot(x, y) + 3.)
x = np.ones((3, 3))
y = np.ones((4, 4))
self.assertRaises(TypeError, lambda: api.eval_shape(fun, x, y))
def test_eval_shape_duck_typing(self):
def fun(A, b, x):
return np.dot(A, x) + b
class MyArgArray(object):
def __init__(self, shape, dtype):
self.shape = shape
self.dtype = dtype
A = MyArgArray((3, 4), np.float32)
b = MyArgArray((5,), np.float32)
x = MyArgArray((4, 5), np.float32)
out_shape = api.eval_shape(fun, A, b, x)
self.assertEqual(out_shape.shape, (3, 5))
def test_issue_871(self):
T = np.array([[1., 2.], [3., 4.], [5., 6.]])
x = np.array([1, 2, 3])
y, f_jvp = api.linearize(np.sum, x)
jtu.check_raises(lambda: f_jvp(T), ValueError,
("linearized function called on tangent values "
"inconsistent with the original primal values."))
y, f_jvp = api.linearize(api.jit(np.sum), x)
jtu.check_raises(lambda: f_jvp(T), ValueError,
("linearized function called on tangent values "
"inconsistent with the original primal values."))
def test_partial_eval_lower(self):
# this is a simplified model of a bug that arose when we first used @jit in
# a jvp rule. it's in this file because we want to use make_jaxpr.
# NOTE(mattjj): I no longer understand what this was meant to test. My guess
# is it was related to staging out the broadcast into a jaxpr to be
# transposed, but after #1749 that's no longer a problem. After changing
# make_jaxpr (and jit) to stage out sub-calls fully, this test started to
# fail; I left it in as skipped because deleting tests feels wrong.
raise unittest.SkipTest("obsolete test")
@api.jit
def f(a, b, c):
a = lax.broadcast(a, (2,))
return lax.select(a, b, c)
a = onp.ones((3, 3), dtype=onp.bool_)
b = onp.ones((2, 3, 3))
c = onp.ones((2, 3, 3))
jaxpr = api.make_jaxpr(lambda b, c: f(a, b, c))(b, c)
subjaxpr = next(eqn.bound_subjaxprs[0][0] for eqn in jaxpr.jaxpr.eqns
if eqn.bound_subjaxprs)
self.assertEqual(len(subjaxpr.eqns), 1)
def test_grad_of_int_errors(self):
dfn = grad(lambda x: x ** 2)
self.assertRaisesRegex(
TypeError,
"Primal inputs to reverse-mode differentiation must be of float or "
"complex type, got type int..", lambda: dfn(3))
def test_xla_computation(self):
# these tests basically check the examples in the xla_computation docstring
def h(x):
return np.sin(np.cos(x))
c = api.xla_computation(h)(2.)
self.assertIn('cosine', c.GetHloText())
self.assertIn('sine', c.GetHloText())
def f(x):
return x - lax.psum(x, 'i')
axis_env = [('i', 4)]
c = api.xla_computation(f, axis_env=axis_env)(2)
self.assertIn('all-reduce', c.GetHloText())
self.assertIn('replica_groups={{0,1,2,3}}', c.GetHloText())
def g(x):
rowsum = lax.psum(x, 'i')
colsum = lax.psum(x, 'j')
allsum = lax.psum(x, ('i', 'j'))
return rowsum, colsum, allsum
axis_env = [('i', 4), ('j', 2)]
c = api.xla_computation(g, axis_env=axis_env)(5.)
self.assertIn('all-reduce', c.GetHloText())
self.assertIn('replica_groups={{0,2,4,6},{1,3,5,7}}', c.GetHloText())
self.assertIn('replica_groups={{0,1},{2,3},{4,5},{6,7}}', c.GetHloText())
self.assertIn('replica_groups={{0,1,2,3,4,5,6,7}}', c.GetHloText())
def test_xla_computation_args(self):
def foo(x, y, z):
return x + y + z
c = api.xla_computation(foo)(1., 2., 3.)
self.assertEqual(len(c.GetProgramShape().parameter_shapes()), 3)
c = api.xla_computation(foo, tuple_args=True)(1., 2., 3.)
param_shapes = c.GetProgramShape().parameter_shapes()
self.assertEqual(len(param_shapes), 1)
self.assertEqual(param_shapes[0].xla_element_type(),
xb.xla_client.PrimitiveType.TUPLE)
def test_staging_out_multi_replica(self):
def f(x):
return api.pmap(np.mean)(x)
xla_comp = api.xla_computation(f)
xla_comp(np.arange(8)).GetHloText() # doesn't crash
def test_xla_computation_instantiate_constant_outputs(self):
def f():
return np.zeros((3, 4))
xla_comp = api.xla_computation(f, instantiate_const_outputs=True)()
out_shape, = xla_comp.GetReturnValueShape().tuple_shapes()
self.assertEqual(out_shape.dimensions(), (3, 4))
def test_jit_device(self):
device = xb.devices()[-1]
x = api.jit(lambda x: x, device=device)(3.)
self.assertIsInstance(x, xla.DeviceArray)
self.assertEqual(x.device_buffer.device(), device)
def test_jit_of_noncallable(self):
self.assertRaisesRegex(TypeError, "Expected a callable value.*",
lambda: api.jit(3))
def test_issue_1062(self):
# code from https://github.com/google/jax/issues/1062 @shoyer
# this tests, among other things, whether ShardedDeviceTuple constants work
device_count = xb.device_count()
@jit
def multi_step(state, count):
return lax.fori_loop(0, count, lambda i, s: s, state)
@jit
def multi_step_pmap(state, count=2):
@partial(api.pmap, axis_name='x')
def pmapped_multi_step(state):
return multi_step(state, count)
return pmapped_multi_step(state)
u = np.ones((device_count, 100))
u_final = multi_step_pmap(u) # doesn't crash
@unittest.skipIf(six.PY2, "Test requires Python 3")
def test_concurrent_device_get_and_put(self):
def f(x):
for _ in range(100):
y = jax.device_put(x)
x = jax.device_get(y)
return x
xs = [onp.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x, y, check_dtypes=True)
@unittest.skipIf(six.PY2, "Test requires Python 3")
def test_concurrent_jit(self):
@jit
def f(x):
return x + x - 3.
xs = [onp.random.randn(i) for i in range(10)]
with concurrent.futures.ThreadPoolExecutor() as executor:
futures = [executor.submit(partial(f, x)) for x in xs]
ys = [f.result() for f in futures]
for x, y in zip(xs, ys):
self.assertAllClose(x * 2 - 3., y, check_dtypes=True)
def test_dtype_warning(self):
# cf. issue #1230
if FLAGS.jax_enable_x64:
return # test only applies when x64 is disabled
def check_warning(warn, nowarn):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
nowarn() # get rid of extra startup warning
prev_len = len(w)
nowarn()
assert len(w) == prev_len
warn()
assert len(w) > 0
msg = str(w[-1].message)
expected_prefix = "Explicitly requested dtype "
self.assertEqual(expected_prefix, msg[:len(expected_prefix)])
prev_len = len(w)
nowarn()
assert len(w) == prev_len
check_warning(lambda: np.array([1, 2, 3], dtype="float64"),
lambda: np.array([1, 2, 3], dtype="float32"),)
check_warning(lambda: np.ones(3, dtype=onp.float64),
lambda: np.ones(3))
check_warning(lambda: np.ones_like(3, dtype=onp.int64),
lambda: np.ones_like(3, dtype=onp.int32))
check_warning(lambda: np.zeros(3, dtype="int64"),
lambda: np.zeros(3, dtype="int32"))
check_warning(lambda: np.zeros_like(3, dtype="float64"),
lambda: np.zeros_like(3, dtype="float32"))
check_warning(lambda: np.full((2, 3), 1, dtype="int64"),
lambda: np.full((2, 3), 1))
check_warning(lambda: np.ones(3).astype("float64"),
lambda: np.ones(3).astype("float32"))
check_warning(lambda: np.eye(3, dtype=onp.float64),
lambda: np.eye(3))
check_warning(lambda: np.arange(3, dtype=onp.float64),
lambda: np.arange(3, dtype=onp.float32))
check_warning(lambda: np.linspace(0, 3, dtype=onp.float64),
lambda: np.linspace(0, 3, dtype=onp.float32))
check_warning(lambda: np.tri(2, dtype="float64"),
lambda: np.tri(2, dtype="float32"))
def test_custom_vjp_zeros(self):
@api.custom_transforms
def f(x, y):
return 2 * x, 3 * y
def f_vjp(x, y):
return (2 * x, 3 * y), lambda ts: (4 * ts[0], 5 * ts[1])
api.defvjp_all(f, f_vjp, )
api.grad(lambda x, y: f(x, y)[0])(1., 2.) # doesn't crash
def test_custom_transforms_vjp_nones(self):
# issue rasied by jsnoek@ and jumper@
@jax.custom_transforms
def solve(a, b):
return np.dot(np.linalg.inv(a), b)
# print(solve(a, b))
def solve_vjp(a, b):
x = solve(a, b)
def vjp(x_tangent):
dx = np.dot(solve(a, x_tangent), x.T)
out = (dx, b * 0.)
return out
return x, vjp
jax.defvjp_all(solve, solve_vjp)
gf = grad(lambda a,b: np.sum(solve(a, b)))
n = 3
a_in = np.linspace(0, 1, n)[:, None]
a = np.dot(a_in, a_in.T) + np.eye(n) * 0.1
real_x = onp.random.RandomState(0).randn(n)
b = np.dot(a + np.eye(a.shape[0]), real_x)
print(gf(a, b)) # doesn't crash
def test_vmap_in_axes_tree_prefix_error(self):
# https://github.com/google/jax/issues/795
self.assertRaisesRegex(
ValueError,
"axes specification must be a tree prefix of the corresponding "
r"value, got specification \(0, 0\) for value "
r"PyTreeDef\(tuple, \[\*\]\).",
lambda: api.vmap(lambda x: x, in_axes=(0, 0))(np.ones(3))
)
def test_vmap_unbatched_object_passthrough_issue_183(self):
# https://github.com/google/jax/issues/183
fun = lambda f, x: f(x)
vfun = api.vmap(fun, (None, 0))
ans = vfun(lambda x: x + 1, np.arange(3))
self.assertAllClose(ans, onp.arange(1, 4), check_dtypes=False)
def test_vmap_mismatched_axis_sizes_error_message_issue_705(self):
# https://github.com/google/jax/issues/705
def h(a, b):
return np.sum(a) + np.sum(b)
X = onp.random.randn(10, 4)
U = onp.random.randn(10, 2)
self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
"so\n"
"arg 0 has an axis to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2",
lambda: api.vmap(h, in_axes=(0, 1))(X, U))
self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
r"arg 0 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
r"arg 1 has shape \(10, 2\) and axis 1 is to be mapped" "\n"
r"arg 2 has shape \(10, 4\) and axis 0 is to be mapped" "\n"
"so\n"
"args 0, 2 have axes to be mapped of size 10\n"
"arg 1 has an axis to be mapped of size 2",
lambda: api.vmap(lambda x, y, z: None, in_axes=(0, 1, 0))(X, U, X))
self.assertRaisesRegex(
ValueError,
"vmap got inconsistent sizes for array axes to be mapped:\n"
"the tree of axis sizes is:\n"
r"\(10, \[2, 2\]\)",
lambda: api.vmap(h, in_axes=(0, 1))(X, [U, U]))
def test_vmap_structured_in_axes(self):
A, B, C, D = 2, 3, 4, 5
K = 6 # batch size
x = onp.ones((K, A, B)) # batch axis in different locations
y = onp.ones((B, K, C))
z = onp.ones((C, D, K))
def foo(tree_arg):
x, (y, z) = tree_arg
return np.dot(x, np.dot(y, z))
tree = (x, (y, z))
vfoo = api.vmap(foo, in_axes=((0, (1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
Point = collections.namedtuple("Point", ["x", "y"])
tree = (x, Point(y, z))
vfoo = api.vmap(foo, in_axes=((0, Point(1, 2)),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def foo(tree_arg):
x, dct = tree_arg
y, z = dct['a'], dct['b']
return np.dot(x, np.dot(y, z))
tree = (x, {'a':y, 'b':z})
vfoo = api.vmap(foo, in_axes=((0, {'a':1, 'b':2}),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
tree = (x, collections.OrderedDict([('a', y), ('b', z)]))
vfoo = api.vmap(
foo, in_axes=((0, collections.OrderedDict([('a', 1), ('b', 2)])),))
self.assertEqual(vfoo(tree).shape, (6, 2, 5))
def test_jit_reference_dropping(self):
x = onp.ones(10)
f = (lambda x: lambda: x)(x) # reference to x in f's closure
g = jit(f)
x = weakref.ref(x) # no more strong ref to x in this scope
assert x() is not None # x is still around
f() # f runs
g() # g runs
g() # g runs a second time
del f # delete the raw callable
assert x() is not None # x is still around
g() # g still runs
del g # no more references to x
assert x() is None # x is gone
def test_jit_global_cache(self):
def f(x):
assert python_should_be_executing
return x
python_should_be_executing = True
api.jit(f)(2)
python_should_be_executing = False
api.jit(f)(3)
def test_jit_shallow_copy(self):
def f(x):
return copy.copy(x)
api.jit(f)(1)
def test_jit_deep_copy(self):
def f(x):
return copy.deepcopy(x)
api.jit(f)(1)
def test_pmap_global_cache(self):
def f(x):
assert python_should_be_executing
return x
x = onp.ones(1)
python_should_be_executing = True
api.pmap(f)(x)
python_should_be_executing = False
api.pmap(f)(x)
python_should_be_executing = True
api.pmap(f, 'i')(x)
python_should_be_executing = False
api.pmap(f, 'i')(x)
def test_repr(self):
rep = repr(np.ones(()) + 1.)
self.assertStartsWith(rep, 'DeviceArray')
def test_grad_without_enough_args_error_message(self):
# https://github.com/google/jax/issues/1696
def f(x, y): return x + y
df = api.grad(f, argnums=0)
self.assertRaisesRegex(
TypeError,
"differentiating with respect to argnums=0 requires at least 1 "
"positional arguments to be passed by the caller, but got only 0 "
"positional arguments.",
lambda: partial(df, x=0.)(y=1.))
def test_grad_of_jit_compilation_caching(self):
if not hasattr(self, "assertLogs"):
raise unittest.SkipTest("test requires assertLogs (python 3)")
lax.add(1, 2) # make sure some initial warnings are already printed
sin = api.jit(np.sin)
prev_level = logging.get_verbosity()
try:
logging.set_verbosity('DEBUG')
with self.assertLogs(level=logging.DEBUG) as l:
ans1 = api.grad(sin)(2.)
ans2 = api.grad(sin)(3.)
finally:
logging.set_verbosity(prev_level)
self.assertLen(l.output, 2)
self.assertAllClose(ans1, onp.cos(2.), check_dtypes=False)
self.assertAllClose(ans2, onp.cos(3.), check_dtypes=False)
def test_remat_basic(self):
@api.remat
def g(x):
return lax.sin(lax.sin(x)), 3.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = onp.sin(onp.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans, f_lin = api.linearize(f, 2.)
expected = onp.sin(onp.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = onp.cos(onp.sin(2.)) * onp.cos(2.) * 3.
self.assertAllClose(ans, expected, check_dtypes=False)
sin_calls = []
cos_calls = []
sin_impl = lax.sin_p.impl
cos_impl = lax.cos_p.impl
try:
lax.sin_p.def_impl(lambda x: sin_calls.append(1) or sin_impl(x))
lax.cos_p.def_impl(lambda x: cos_calls.append(1) or cos_impl(x))
f_lin(3.)
finally:
lax.sin_p.def_impl(sin_impl)
lax.cos_p.def_impl(cos_impl)
self.assertEqual(len(sin_calls), 1)
self.assertEqual(len(cos_calls), 2)
def test_remat_freevars(self):
def f1(x):
y = 2 * np.sin(x)
z = np.cos(x) * np.sin(y)
return z
def f2(x):
y = 2 * np.sin(x)
z = api.remat(lambda x: np.cos(x) * np.sin(y))(x)
return z
ans, f_lin = api.linearize(f2, 2.)
expected, f_lin_expected = api.linearize(f1, 2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = f_lin(3.)
expected = f_lin_expected(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_grad_python_control_flow(self):
@partial(api.remat, concrete=True)
def g(x):
if x > 0:
return lax.sin(x), 3.
else:
return lax.cos(x), 4.
def f(x):
x, _ = g(x)
return x
ans = f(2.)
expected = onp.sin(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = onp.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_jit(self):
@api.remat
def g(x):
return lax.sin(lax.sin(x))
def f_(x):
return g(x)
f = api.jit(f_)
ans = f(2.)
expected = onp.sin(onp.sin(2.))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f)(2.)
expected = onp.cos(onp.sin(2.)) * onp.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jit(api.grad(f_))(2.)
expected = onp.cos(onp.sin(2.)) * onp.cos(2.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_vmap(self):
@api.remat
def g(x):
return lax.sin(lax.sin(x))
x = onp.arange(3.)
ans = api.vmap(g)(x)
expected = onp.sin(onp.sin(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacfwd(g)(x)
expected = onp.diag(onp.cos(onp.sin(x)) * onp.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.jacrev(g)(x)
expected = onp.diag(onp.cos(onp.sin(x)) * onp.cos(x))
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_higher_order_autodiff(self):
def f(x):
return lax.cos(lax.sin(x))
g = api.remat(f)
ans = api.grad(api.grad(g))(3.)
expected = api.grad(api.grad(f))(3.)
self.assertAllClose(ans, expected, check_dtypes=False)
def test_remat_scan(self):
to_scan = lambda c, x: (np.sin(c), None)
def f_noremat(x):
y, _ = lax.scan(to_scan, x, onp.arange(3.))
return y
def f_yesremat(x):
y, _ = lax.scan(api.remat(to_scan), x, onp.arange(3.))
return y
ans = f_yesremat(4.)
expected = f_noremat(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
ans = api.grad(f_yesremat)(4.)
expected = api.grad(f_noremat)(4.)
self.assertAllClose(ans, expected, check_dtypes=False)
jaxpr = api.make_jaxpr(api.linearize(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
jaxpr = api.make_jaxpr(api.vjp(f_yesremat, 4.)[1])(1.)
scan_eqn, = jaxpr.jaxpr.eqns
self.assertIn(' cos ', str(scan_eqn.params['jaxpr']))
def test_remat_no_redundant_flops(self):
# see https://github.com/google/jax/pull/1749#issuecomment-558267584
@api.jit
def g(x):
return f(2., x)
@api.remat
def f(x, y):
return np.sin(x) * y
# We swap out sin_p's impl rule to count how many times it's invoked
called = []
sin_impl = lax.sin_p.impl
try:
lax.sin_p.def_impl(lambda x: called.append(1) or sin_impl(x))
api.grad(g)(3.)
finally:
lax.sin_p.def_impl(sin_impl)
num_calls = len(called)
self.assertEqual(num_calls, 1)
def test_remat_binomial_checkpointing(self):
def binom_checkpoint(funs):
if len(funs) == 1:
return funs[0]
else:
f1 = binom_checkpoint(funs[:len(funs)//2])
f2 = binom_checkpoint(funs[len(funs)//2:])
return api.remat(lambda x: f1(f2(x)))
f1 = binom_checkpoint([np.sin, np.sin, np.sin, np.sin])
f2 = lambda x: np.sin(np.sin(np.sin(np.sin(x))))
x = 4.
self.assertAllClose(f1(x), f2(x), check_dtypes=False)
self.assertAllClose(api.grad(f1)(x), api.grad(f2)(x), check_dtypes=False)
def test_remat_symbolic_zeros(self):
# code from https://github.com/google/jax/issues/1907
test_remat = True
test_scan = True
key = jax.random.PRNGKey(0)
key, split = jax.random.split(key)
n = 5
def func(D0):
def shift(R, dR, **unused_kwargs):
return R + dR
def apply_fn(R):
return D0 * R
Rinit = jax.random.uniform(split, (n,3), minval=0.0, maxval=5.0,
dtype=np.float32)
def move(R,i):
F = apply_fn(R)
return shift(R, 0.001 * F), np.array([0.])
move = api.remat(move)
R, temp = lax.scan(move, Rinit, np.arange(2))
return R[0, 0]
api.grad(func)(5.0) # doesn't crash
def test_trivial_computations(self):
x = np.array([1, 2, 3])
y = api.jit(lambda x: x)(x)
self.assertIs(x, y)
z1, z2 = api.jit(lambda x: (x, x))(x)
self.assertIs(z1, z2)
x1, x2 = np.array([1, 2]), np.array([2, 3])
z1, z2, z3 = api.jit(lambda x, y: (y, 1, x))(x1, x2)
self.assertIs(z1, x2)
self.assertIs(z3, x1)
self.assertEqual(z2, 1)
def test_nested_jit_hoisting(self):
@api.jit
def f(x, y):
z = 2 * x
return y + z, 3
@api.jit
def g(x):
return f(2, x)
jaxpr_subcomp = xla.jaxpr_subcomp
jaxprs = []
def jaxpr_subcomp_and_collect(c, jaxpr, *args, **kwargs):
jaxprs.append(jaxpr)
return jaxpr_subcomp(c, jaxpr, *args, **kwargs)
try:
xla.jaxpr_subcomp = jaxpr_subcomp_and_collect
ans = g(3)
finally:
xla.jaxpr_subcomp = jaxpr_subcomp
self.assertEqual(ans, (7, 3))
self.assertLen(jaxprs, 2)
outer_jaxpr, inner_jaxpr = jaxprs
self.assertLen(outer_jaxpr.eqns, 1)
self.assertEqual(outer_jaxpr.eqns[0].primitive.name, 'xla_call')
(subjaxpr_1, _, _), = outer_jaxpr.eqns[0].bound_subjaxprs
self.assertEqual(str(subjaxpr_1), str(inner_jaxpr))
self.assertLen(inner_jaxpr.eqns, 2)
self.assertEqual(inner_jaxpr.eqns[0].primitive.name, 'mul')
self.assertEqual(inner_jaxpr.eqns[1].primitive.name, 'add')
def test_primitive_compilation_cache(self):
with jtu.count_primitive_compiles() as count:
lax.add(1, 2)
lax.add(2, 3)
self.assertEqual(count[0], 1)
class JaxprTest(jtu.JaxTestCase):
def test_scalar_literals(self):
jaxpr = api.make_jaxpr(lambda x: x + 2)(42)
self.assertLen(jaxpr.jaxpr.constvars, 0)
def test_const(self):
def fun(x):
return (x, 1., np.zeros(1))
jaxpr = api.make_jaxpr(fun)(0.)
self.assertMultiLineStrippedEqual(str(jaxpr), """
{ lambda b ; ; a.
let
in [a, 1.0, b] }
""")
def test_cond(self):
def f(x):
return lax.cond(x >= 0.,
x + 1.,
lambda xt: xt + x,
x + 2.,
lambda xf: xf - x)
jaxpr = api.make_jaxpr(f)(3.)
self.assertMultiLineStrippedEqual(str(jaxpr), """
{ lambda ; ; a.
let b = ge a 0.0
c = add a 1.0
d = add a 2.0
e = cond[ false_jaxpr={ lambda ; ; b a.
let c = sub a b
in [c] }
false_nconsts=1
true_jaxpr={ lambda ; ; b a.
let c = add a b
in [c] }
true_nconsts=1 ] b a c a d
in [e] }
""")
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/api_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import threading
from absl.testing import absltest
import jax
from jax import lax, numpy as np
from jax.config import config
from jax.lib import xla_client
import jax.test_util
import numpy as onp
config.parse_flags_with_absl()
FLAGS = config.FLAGS
class InfeedTest(jax.test_util.JaxTestCase):
def testInfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
(y,), token = lax.infeed(
token, shape=(jax.ShapedArray((3, 4), np.float32),))
(z,), _ = lax.infeed(
token, shape=(jax.ShapedArray((3, 1, 1), np.float32),))
return x + y + z
x = onp.float32(1.5)
y = onp.reshape(onp.arange(12, dtype=onp.float32), (3, 4)) # onp.random.randn(3, 4).astype(onp.float32)
z = onp.random.randn(3, 1, 1).astype(onp.float32)
xla_client.transfer_to_infeed((y,))
xla_client.transfer_to_infeed((z,))
self.assertAllClose(f(x), x + y + z, check_dtypes=True)
def testInfeedThenOutfeed(self):
@jax.jit
def f(x):
token = lax.create_token(x)
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
token = lax.outfeed(token, y + onp.float32(1))
return lax.tie_in(token, x - 1)
x = onp.float32(7.5)
y = onp.random.randn(3, 4).astype(onp.float32)
execution = threading.Thread(target=lambda: f(x))
execution.start()
xla_client.transfer_to_infeed((y,))
out, = xla_client.transfer_from_outfeed(xla_client.shape_from_pyval((y,)))
execution.join()
self.assertAllClose(out, y + onp.float32(1), check_dtypes=True)
def testInfeedThenOutfeedInALoop(self):
def doubler(_, token):
y, token = lax.infeed(
token, shape=jax.ShapedArray((3, 4), np.float32))
return lax.outfeed(token, y * onp.float32(2))
@jax.jit
def f(n):
token = lax.create_token(n)
token = lax.fori_loop(0, n, doubler, token)
return lax.tie_in(token, n)
n = 10
execution = threading.Thread(target=lambda: f(n))
execution.start()
for _ in range(n):
x = onp.random.randn(3, 4).astype(onp.float32)
xla_client.transfer_to_infeed((x,))
y, = xla_client.transfer_from_outfeed(xla_client.shape_from_pyval((x,)))
self.assertAllClose(y, x * onp.float32(2), check_dtypes=True)
execution.join()
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/infeed_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
from jax.lib import xla_bridge as xb
class XlaBridgeTest(absltest.TestCase):
def test_set_device_assignment_no_partition(self):
compile_options = xb.get_compile_options(
num_replicas=4, device_assignment=[0, 1, 2, 3])
expected_device_assignment = ("Computations: 1 Replicas: 4\nComputation 0: "
"0 1 2 3 \n")
self.assertEqual(compile_options.device_assignment.__repr__(),
expected_device_assignment)
def test_set_device_assignment_with_partition(self):
compile_options = xb.get_compile_options(
num_replicas=2, device_assignment=[[0, 1], [2, 3]])
expected_device_assignment = ("Computations: 2 Replicas: 2\nComputation 0: "
"0 2 \nComputation 1: 1 3 \n")
self.assertEqual(compile_options.device_assignment.__repr__(),
expected_device_assignment)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/xla_bridge_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
from absl.testing import absltest
from absl.testing import parameterized
from jax import test_util as jtu
from jax import tree_util
def _dummy_func(*args, **kwargs):
return
ATuple = collections.namedtuple("ATuple", ("foo", "bar"))
class ANamedTupleSubclass(ATuple):
pass
class AnObject(object):
def __init__(self, x, y, z):
self.x = x
self.y = y
self.z = z
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __hash__(self):
return hash((self.x, self.y, self.z))
def __repr__(self):
return "AnObject({},{},{})".format(self.x, self.y, self.z)
tree_util.register_pytree_node(AnObject, lambda o: ((o.x, o.y), o.z),
lambda z, xy: AnObject(xy[0], xy[1], z))
PYTREES = [
("foo",),
((),),
(([()]),),
((1, 2),),
(((1, "foo"), ["bar", (3, None, 7)]),),
([3],),
([3, ATuple(foo=(3, ATuple(foo=3, bar=None)), bar={"baz": 34})],),
([AnObject(3, None, [4, "foo"])],),
({"a": 1, "b": 2},),
(collections.OrderedDict([("foo", 34), ("baz", 101), ("something", -42)]),),
(collections.defaultdict(dict,
[("foo", 34), ("baz", 101), ("something", -42)]),),
(ANamedTupleSubclass(foo="hello", bar=3.5),),
]
class TreeTest(jtu.JaxTestCase):
@parameterized.parameters(*PYTREES)
def testRoundtrip(self, inputs):
xs, tree = tree_util.tree_flatten(inputs)
actual = tree_util.tree_unflatten(tree, xs)
self.assertEqual(actual, inputs)
@parameterized.parameters(*PYTREES)
def testRoundtripWithFlattenUpTo(self, inputs):
_, tree = tree_util.tree_flatten(inputs)
if not hasattr(tree, "flatten_up_to"):
self.skipTest("Test requires Jaxlib >= 0.1.23")
xs = tree.flatten_up_to(inputs)
actual = tree_util.tree_unflatten(tree, xs)
self.assertEqual(actual, inputs)
@parameterized.parameters(
(tree_util.Partial(_dummy_func),),
(tree_util.Partial(_dummy_func, 1, 2),),
(tree_util.Partial(_dummy_func, x="a"),),
(tree_util.Partial(_dummy_func, 1, 2, 3, x=4, y=5),),
)
def testRoundtripPartial(self, inputs):
xs, tree = tree_util.tree_flatten(inputs)
actual = tree_util.tree_unflatten(tree, xs)
# functools.partial does not support equality comparisons:
# https://stackoverflow.com/a/32786109/809705
self.assertEqual(actual.func, inputs.func)
self.assertEqual(actual.args, inputs.args)
self.assertEqual(actual.keywords, inputs.keywords)
@parameterized.parameters(*PYTREES)
def testRoundtripViaBuild(self, inputs):
xs, tree = tree_util._process_pytree(tuple, inputs)
actual = tree_util.build_tree(tree, xs)
self.assertEqual(actual, inputs)
def testChildren(self):
_, tree = tree_util.tree_flatten(((1, 2, 3), (4,)))
_, c0 = tree_util.tree_flatten((0, 0, 0))
_, c1 = tree_util.tree_flatten((7,))
if not callable(tree.children):
self.skipTest("Test requires Jaxlib >= 0.1.23")
self.assertEqual([c0, c1], tree.children())
def testFlattenUpTo(self):
_, tree = tree_util.tree_flatten([(1, 2), None, ATuple(foo=3, bar=7)])
if not hasattr(tree, "flatten_up_to"):
self.skipTest("Test requires Jaxlib >= 0.1.23")
out = tree.flatten_up_to([({
"foo": 7
}, (3, 4)), None, ATuple(foo=(11, 9), bar=None)])
self.assertEqual(out, [{"foo": 7}, (3, 4), (11, 9), None])
def testTreeMultimap(self):
x = ((1, 2), (3, 4, 5))
y = (([3], None), ({"foo": "bar"}, 7, [5, 6]))
out = tree_util.tree_multimap(lambda *xs: tuple(xs), x, y)
self.assertEqual(out, (((1, [3]), (2, None)),
((3, {"foo": "bar"}), (4, 7), (5, [5, 6]))))
if __name__ == "__main__":
absltest.main()
|
jax-master
|
tests/tree_util_tests.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the experimental/loops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl.testing import absltest
import numpy as onp
import re
import six
from jax import api, lax, ops
from jax import numpy as np
from jax import test_util as jtu
from jax.experimental import loops
from jax.config import config
config.parse_flags_with_absl()
class LoopsTest(jtu.JaxTestCase):
def test_scope_no_loops(self):
def f_op(r):
with loops.Scope() as s:
s.x = r + 1
return s.x
self.assertAllClose(4.0, f_op(3.), check_dtypes=True)
def test_loop_empty(self):
def f_op(r):
with loops.Scope() as s:
for _ in s.range(5):
pass
return r
self.assertAllClose(3.0, f_op(3.), check_dtypes=True)
def test_loop_1(self):
"""One loop with one state var, with transforms."""
def f_op(inc):
with loops.Scope() as s:
s.out = 10.
for _ in s.range(5):
s.out += inc
return s.out
def f_expected(inc):
return 10 + 5 * inc
self.assertAllClose(f_expected(2.), f_op(2.), check_dtypes=True)
self.assertAllClose(f_expected(2.), api.jit(f_op)(2.), check_dtypes=True)
self.assertAllClose(5., api.grad(f_op)(2.), check_dtypes=True)
self.assertAllClose(5., api.grad(f_op)(2.), check_dtypes=True)
inc_batch = onp.arange(5, dtype=np.float_)
self.assertAllClose(np.array([f_expected(inc) for inc in inc_batch],
dtype=np.float_),
api.vmap(f_op)(inc_batch), check_dtypes=True)
def test_loop_2(self):
"""One loop, two state fields."""
def f_op(inc):
with loops.Scope() as s:
s.out1 = 10.
s.out2 = 20.
for i in s.range(5):
s.out1 += inc
s.out2 += 1.
return (s.out1, s.out2)
self.assertAllClose((10. + 2. * 5, 20. + 1. * 5), f_op(2.), check_dtypes=True)
def test_add_vectors(self):
def add_vec(x, y):
with loops.Scope() as s:
n = x.shape[0]
assert n == y.shape[0]
s.out = np.zeros(shape=[n], dtype=np.float32)
for i in s.range(n):
s.out = ops.index_add(s.out, i, x[i] + y[i])
return s.out
x = np.array([1., 2., 3.], dtype=np.float32)
y = np.array([4., 5., 6.], dtype=np.float32)
self.assertAllClose(np.add(x, y), add_vec(x, y), check_dtypes=True)
def test_matmul(self):
def matmul(x, y):
with loops.Scope() as s:
n, m = x.shape
m1, p = y.shape
assert m == m1
s.out = np.zeros(shape=[n, p], dtype=np.float32)
for i in s.range(n):
for j in s.range(p):
for k in s.range(m):
s.out = ops.index_add(s.out, (i, j), x[i, k] * y[k, j])
return s.out
x = np.array([[1., 2., 3.]], dtype=np.float32) # 1x3
y = np.array([[4.], [5.], [6.]], dtype=np.float32) # 3x1
self.assertAllClose(np.matmul(x, y), matmul(x, y), check_dtypes=True)
def test_reuse_range(self):
"""Ranges can be reused, as long as not nested in each other."""
def f_op():
with loops.Scope() as s:
r1 = s.range(5)
s.out = 0
for _ in r1:
s.out += 1
for _ in r1:
s.out += 1
return s.out
self.assertEqual(10, f_op())
def test_loop_nested(self):
def f_op(inc):
with loops.Scope() as s:
s.out = 10.
for i in s.range(5):
s.out += inc
for j in s.range(6):
s.out += inc
return s.out
self.assertAllClose(10. + 5 * (2. + 6 * 2.), f_op(2.), check_dtypes=True)
def test_example_doc(self):
"The example from the module docstring."
def f_expected():
arr = onp.zeros(5, dtype=np.float_)
for i in range(arr.shape[0]):
arr[i] += 2.
if i % 2 == 0:
arr[i] += 1.
return arr
def f_op_jax():
arr = np.zeros(5)
def loop_body(i, acc_arr):
arr1 = ops.index_update(acc_arr, i, acc_arr[i] + 2.)
return lax.cond(i % 2 == 0,
arr1,
lambda arr1: ops.index_update(arr1, i, arr1[i] + 1.),
arr1,
lambda arr1: arr1)
arr = lax.fori_loop(0, arr.shape[0], loop_body, arr)
return arr
def f_op_loops():
with loops.Scope() as s:
s.arr = np.zeros(5) # Must create the mutable state of the loop as `scope` fields.
for i in s.range(s.arr.shape[0]):
s.arr = ops.index_update(s.arr, i, s.arr[i] + 2.)
for _ in s.cond_range(i % 2 == 0): # Conditionals are also sugared as loops with 0 or 1 iterations
s.arr = ops.index_update(s.arr, i, s.arr[i] + 1.)
return s.arr
self.assertAllClose(f_expected(), f_op_jax(), check_dtypes=True)
self.assertAllClose(f_expected(), f_op_loops(), check_dtypes=True)
def test_loop_mutable_used_but_not_changed(self):
def f_op(inc):
with loops.Scope() as s:
s.read_only = inc
s.out = 10.
for i in s.range(5):
s.out += s.read_only
# It is Ok to use regular Python variables outside loops.
save_to_other_var = s.out
return save_to_other_var
self.assertAllClose(10. + 5 * 2., f_op(2.), check_dtypes=True)
def test_range_locations(self):
"""Ranges have locations."""
if six.PY2: self.skipTest("Source location not implemented for PY2")
with loops.Scope() as s:
r = s.range(5)
cr = s.cond_range(True)
wr = s.while_range(lambda: True)
for range in [r, cr, wr]:
self.assertIn("loops_test.py", range.location())
self.assertIn(self._testMethodName, range.location())
def test_error_reuse_range_nested(self):
"""Ranges cannot be reused nested in their own iteration."""
def f_op():
with loops.Scope() as s:
r1 = s.range(5)
s.out = 0
for _ in r1:
for _ in r1:
s.out += 1
return s.out
with self.assertRaisesWithLiteralMatch(ValueError, "Range is reused nested inside itself."):
f_op()
def test_error_early_exit_range(self):
"""Ranges do not support early exit from loop body."""
def bad_function(exit_how="break"):
with loops.Scope() as s:
for i in s.range(555):
if exit_how == "break":
break
elif exit_how == "return":
return 1.
elif exit_how == "exception":
raise ValueError("test exception")
# Start another range, we get here after a "break" above
for i in s.range(5):
pass
return 0.
if six.PY3:
with self.assertRaisesRegex(ValueError,
re.compile(("Some ranges have exited prematurely. The innermost such range is at"
".*s.range.555."), re.DOTALL)):
bad_function("break")
with self.assertRaisesRegex(ValueError, "Some ranges have exited prematurely"):
bad_function("return")
# On exception exit, we let the exception propagate
with self.assertRaisesRegex(ValueError, "test exception"):
bad_function("exception")
def test_error_early_exit_range_nested(self):
"""Exit early from a nested range."""
def bad_function():
with loops.Scope() as s:
for i in s.range(5): # When we end this range, we'll find the inner range still active
for j in s.range(6):
break
return 0.
with self.assertRaisesRegex(ValueError, "Some ranges have exited prematurely."):
bad_function()
def test_loop_index_var_live_expect_fail(self):
"""The index variable is live after the loop."""
self.skipTest("Don't know how to check that index variable is not used after loop.")
def f_op(r):
with loops.Scope() as s:
for i in s.range(r):
pass
return i
self.assertAllClose(4, f_op(4), check_dtypes=True)
def test_error_new_state_in_loop(self):
"""Error when creating new state in a loop."""
def f_op(inc):
with loops.Scope() as s:
s.out = 10.
for i in s.range(5):
s.other_state = 1.
s.out += inc
return s.out
with self.assertRaisesWithLiteralMatch(ValueError,
"New mutable state 'other_state' cannot be created inside a loop."):
f_op(2.)
def test_error_range_ends_static(self):
def f_op(start, end, inc):
with loops.Scope() as s:
s.out = 0.
for i in s.range(start, end):
s.out += inc
return s.out
self.assertAllClose(16., f_op(0, 4, 4.), check_dtypes=True)
# Ok to jit, as long as the start and end are static
self.assertAllClose(16., api.jit(f_op, static_argnums=(0, 1))(0, 4, 4.), check_dtypes=True)
with self.assertRaisesRegex(TypeError, "Abstract value passed to `int`, which requires a concrete value"):
self.assertAllClose(16., api.jit(f_op)(0, 4, 4.), check_dtypes=True)
with self.assertRaisesRegex(TypeError, "Abstract value passed to `int`, which requires a concrete value"):
self.assertAllClose(16., api.vmap(f_op)(np.zeros(10), np.ones(10), np.array([4.] * 10)), check_dtypes=True)
def test_cond(self):
def f_op(inc):
with loops.Scope() as s:
s.out = 10.
for i in s.cond_range(inc > 0):
s.out += inc
return s.out
self.assertAllClose(10. + 2., f_op(2.), check_dtypes=True)
self.assertAllClose(10., f_op(-2.), check_dtypes=True)
def test_cond_state(self):
"""Conditionals predicated on scope fields."""
def f_op(init):
with loops.Scope() as s:
s.out = init
for _ in s.cond_range(s.out > 0.):
s.out *= 2.
return s.out
self.assertAllClose(2. * 2., f_op(2.), check_dtypes=True)
self.assertAllClose(-2., f_op(-2.), check_dtypes=True)
def test_cond_nested(self):
"""Nested conditionals."""
def f_expected(init):
"""Multi-linear function.
x in (..0) x + 1.
x in [0..10) x + 1 + 2 + 4
x in [10..) x + 1 + 2 + 4 + 8
"""
out = init
if out >= 0.:
out += 2.
if out - 2. >= 10.:
out += 8.
out += 4.
out += 1.
return out
def f_op(init):
with loops.Scope() as s:
s.out = init
for _ in s.cond_range(s.out >= 0.):
s.out += 2.
for _ in s.cond_range(s.out - 2. >= 10.):
s.out += 8.
s.out += 4.
s.out += 1.
return s.out
for init in [-1., 0., 9., 10.]:
self.assertAllClose(f_expected(init), f_op(init), check_dtypes=True)
def test_error_cond_using_index_var(self):
"""Conditionals should not use the iteration index value."""
def f_op(inc):
with loops.Scope() as s:
s.out = 10.
for i in s.cond_range(inc > 0):
s.out += i
return s.out
with self.assertRaisesWithLiteralMatch(
ValueError,
"Body of cond_range or while_range should not use the index variable returned by iterator."):
api.make_jaxpr(f_op)(2.)
def test_while(self):
def f_op(init):
with loops.Scope() as s:
s.out = init
for _ in s.while_range(lambda: s.out < 5.):
s.out += 2.
s.out += 1.
return s.out
def f_expected(init):
out = init
while out < 5.:
out += 2.
out += 1.
return out
self.assertAllClose(f_expected(2.), f_op(2.), check_dtypes=True)
self.assertAllClose(f_expected(2.), api.jit(f_op)(2.), check_dtypes=True)
self.assertAllClose(f_expected(1.), f_op(1.), check_dtypes=True)
init_batch = onp.array([1., 2., 3.], dtype=onp.float32)
self.assertAllClose(onp.array([f_expected(init) for init in init_batch],
dtype=onp.float32),
api.vmap(f_op)(init_batch), check_dtypes=True)
def test_error_while_cond_mutation(self):
"""Disallow mutation in the while conditional."""
def f_op(init):
with loops.Scope() as s:
s.out = init
def cond_func():
s.out += 1. # Not allowed
return s.out < 5.
for _ in s.while_range(cond_func):
s.out += 2.
s.out += 1.
return s.out
with self.assertRaisesWithLiteralMatch(ValueError,
"Conditional function modifies scope.out field."):
f_op(0.)
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/loops_test.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
from functools import partial
import itertools
from unittest import skip, SkipTest
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
import numpy.random as npr
import six
from jax import api
from jax import core
from jax import dtypes
from jax import lax
from jax import test_util as jtu
from jax import lax_reference
from jax import dtypes
from jax.test_util import check_grads
from jax.interpreters import xla
from jax.lib import xla_client
import jax.util
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
def num_float_bits(dtype):
return dtypes.finfo(dtypes.canonicalize_dtype(dtype)).bits
### lax tests
# For standard unops and binops, we can generate a large number of tests on
# arguments of appropriate shapes and dtypes using the following table.
float_dtypes = list(jtu.supported_dtypes().intersection(
{dtypes.bfloat16, onp.float16, onp.float32, onp.float64}))
complex_elem_dtypes = list(jtu.supported_dtypes().intersection(
{onp.float32, onp.float64}))
complex_dtypes = list(jtu.supported_dtypes().intersection(
{onp.complex64, onp.complex128}))
inexact_dtypes = float_dtypes + complex_dtypes
int_dtypes = list(jtu.supported_dtypes().intersection({onp.int32, onp.int64}))
bool_dtypes = [onp.bool_]
default_dtypes = float_dtypes + int_dtypes
all_dtypes = float_dtypes + complex_dtypes + int_dtypes + bool_dtypes
compatible_shapes = [[(3,)], [(3, 4), (3, 1), (1, 4)], [(2, 3, 4), (2, 1, 4)]]
OpRecord = collections.namedtuple(
"OpRecord", ["op", "nargs", "dtypes", "rng_factory", "tol"])
def op_record(op, nargs, dtypes, rng_factory, tol=None):
return OpRecord(op, nargs, dtypes, rng_factory, tol)
LAX_OPS = [
op_record("neg", 1, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("sign", 1, default_dtypes, jtu.rand_small),
op_record("floor", 1, float_dtypes, jtu.rand_small),
op_record("ceil", 1, float_dtypes, jtu.rand_small),
op_record("round", 1, float_dtypes, jtu.rand_default),
op_record("nextafter", 2, [f for f in float_dtypes if f != dtypes.bfloat16],
jtu.rand_default, tol=0),
op_record("is_finite", 1, float_dtypes, jtu.rand_small),
op_record("exp", 1, float_dtypes + complex_dtypes, jtu.rand_small),
# TODO(b/142975473): on CPU, expm1 for float64 is only accurate to ~float32
# precision.
op_record("expm1", 1, float_dtypes + complex_dtypes, jtu.rand_small,
{onp.float64: 1e-8}),
op_record("log", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("log1p", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
# TODO(b/142975473): on CPU, tanh for complex128 is only accurate to
# ~float32 precision.
# TODO(b/143135720): on GPU, tanh has only ~float32 precision.
op_record("tanh", 1, float_dtypes + complex_dtypes, jtu.rand_small,
{onp.float64: 1e-9, onp.complex128: 1e-7}),
op_record("sin", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("cos", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("atan2", 2, float_dtypes, jtu.rand_default),
op_record("sqrt", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("rsqrt", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("square", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("reciprocal", 1, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("tan", 1, float_dtypes, jtu.rand_default, {onp.float32: 1e-5}),
op_record("asin", 1, float_dtypes, jtu.rand_small),
op_record("acos", 1, float_dtypes, jtu.rand_small),
op_record("atan", 1, float_dtypes, jtu.rand_small),
op_record("sinh", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("cosh", 1, float_dtypes + complex_dtypes, jtu.rand_default),
op_record("lgamma", 1, float_dtypes, jtu.rand_positive,
{onp.float32: 1e-3 if jtu.device_under_test() == "tpu" else 1e-5,
onp.float64: 1e-14}),
op_record("digamma", 1, float_dtypes, jtu.rand_positive,
{onp.float64: 1e-14}),
op_record("erf", 1, float_dtypes, jtu.rand_small),
op_record("erfc", 1, float_dtypes, jtu.rand_small),
# TODO(b/142976030): the approximation of erfinf used by XLA is only
# accurate to float32 precision.
op_record("erf_inv", 1, float_dtypes, jtu.rand_small,
{onp.float64: 1e-9}),
op_record("bessel_i0e", 1, float_dtypes, jtu.rand_default),
op_record("bessel_i1e", 1, float_dtypes, jtu.rand_default),
op_record("real", 1, complex_dtypes, jtu.rand_default),
op_record("imag", 1, complex_dtypes, jtu.rand_default),
op_record("complex", 2, complex_elem_dtypes, jtu.rand_default),
op_record("conj", 1, complex_elem_dtypes + complex_dtypes,
jtu.rand_default),
op_record("abs", 1, default_dtypes + complex_dtypes, jtu.rand_default),
op_record("pow", 2, float_dtypes + complex_dtypes, jtu.rand_positive),
op_record("bitwise_and", 2, bool_dtypes, jtu.rand_small),
op_record("bitwise_not", 1, bool_dtypes, jtu.rand_small),
op_record("bitwise_or", 2, bool_dtypes, jtu.rand_small),
op_record("bitwise_xor", 2, bool_dtypes, jtu.rand_small),
op_record("add", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("sub", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("mul", 2, default_dtypes + complex_dtypes, jtu.rand_small),
op_record("div", 2, default_dtypes + complex_dtypes, jtu.rand_nonzero),
op_record("rem", 2, default_dtypes, jtu.rand_nonzero),
op_record("max", 2, all_dtypes, jtu.rand_small),
op_record("min", 2, all_dtypes, jtu.rand_small),
op_record("eq", 2, all_dtypes, jtu.rand_some_equal),
op_record("ne", 2, all_dtypes, jtu.rand_small),
op_record("ge", 2, default_dtypes, jtu.rand_small),
op_record("gt", 2, default_dtypes, jtu.rand_small),
op_record("le", 2, default_dtypes, jtu.rand_small),
op_record("lt", 2, default_dtypes, jtu.rand_small),
]
CombosWithReplacement = itertools.combinations_with_replacement
class LaxTest(jtu.JaxTestCase):
"""Numerical tests for LAX operations."""
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.op, shapes, itertools.repeat(dtype)),
"op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
"dtype": dtype}
for shape_group in compatible_shapes
for shapes in CombosWithReplacement(shape_group, rec.nargs)
for dtype in rec.dtypes)
for rec in LAX_OPS))
def testOp(self, op_name, rng_factory, shapes, dtype):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.op, shapes, itertools.repeat(dtype)),
"op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
"dtype": dtype, "tol": rec.tol}
for shape_group in compatible_shapes
for shapes in CombosWithReplacement(shape_group, rec.nargs)
for dtype in rec.dtypes)
for rec in LAX_OPS))
def testOpAgainstNumpy(self, op_name, rng_factory, shapes, dtype, tol):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = getattr(lax, op_name)
numpy_op = getattr(lax_reference, op_name)
self._CheckAgainstNumpy(op, numpy_op, args_maker, tol=tol)
# TODO test shift_left, shift_right_arithmetic, shift_right_logical
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}".format(
from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for rng_factory in [jtu.rand_default]))
def testConvertElementType(self, from_dtype, to_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.convert_element_type(x, to_dtype)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for rng_factory in [jtu.rand_default]))
def testConvertElementTypeAgainstNumpy(self, from_dtype, to_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.convert_element_type(x, to_dtype)
numpy_op = lambda x: lax_reference.convert_element_type(x, to_dtype)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for rng_factory in [jtu.rand_default]))
def testBitcastConvertType(self, from_dtype, to_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}"
.format(from_dtype, to_dtype),
"from_dtype": from_dtype, "to_dtype": to_dtype, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for rng_factory in [jtu.rand_default]))
def testBitcastConvertTypeAgainstNumpy(self, from_dtype, to_dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng((2, 3), from_dtype)]
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
numpy_op = lambda x: lax_reference.bitcast_convert_type(x, to_dtype)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format(
jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype)),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype, "rng_factory": rng_factory}
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testClamp(self, min_shape, operand_shape, max_shape, dtype, rng_factory):
rng = rng_factory()
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CompileAndCheck(lax.clamp, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format(
jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype)),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype, "rng_factory": rng_factory}
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testClampAgainstNumpy(self, min_shape, operand_shape, max_shape, dtype,
rng_factory):
rng = rng_factory()
shapes = [min_shape, operand_shape, max_shape]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
self._CheckAgainstNumpy(lax.clamp, lax_reference.clamp, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format(
dim, ",".join(str(d) for d in base_shape), onp.dtype(dtype).name,
num_arrs),
"dim": dim, "base_shape": base_shape, "dtype": dtype,
"num_arrs": num_arrs, "rng_factory": rng_factory}
for num_arrs in [3]
for dtype in default_dtypes
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))
for rng_factory in [jtu.rand_default]))
def testConcatenate(self, dim, base_shape, dtype, num_arrs, rng_factory):
rng = rng_factory()
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format(
dim, ",".join(str(d) for d in base_shape), onp.dtype(dtype).name,
num_arrs),
"dim": dim, "base_shape": base_shape, "dtype": dtype,
"num_arrs": num_arrs, "rng_factory": rng_factory}
for num_arrs in [3]
for dtype in default_dtypes
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))
for rng_factory in [jtu.rand_default]))
def testConcatenateAgainstNumpy(self, dim, base_shape, dtype, num_arrs, rng_factory):
rng = rng_factory()
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
args_maker = lambda: [rng(shape, dtype) for shape in shapes]
op = lambda *args: lax.concatenate(args, dim)
numpy_op = lambda *args: lax_reference.concatenate(args, dim)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testConv(self, lhs_shape, rhs_shape, dtype, strides, padding, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv(lhs, rhs, strides, padding)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testConvAgainstNumpy(self, lhs_shape, rhs_shape, dtype, strides, padding,
rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda lhs, rhs: lax.conv(lhs, rhs, strides, padding)
numpy_op = lambda lhs, rhs: lax_reference.conv(lhs, rhs, strides, padding)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1)]
for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]
for lhs_dilation, rhs_dilation in itertools.product(
[(1, 1), (1, 2), (2, 2)], repeat=2)
for rng_factory in [jtu.rand_small]))
def testConvWithGeneralPadding(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((b, i, 9, 10), (j, i, 4, 5))
for b, i, j in itertools.product([1, 2, 3], repeat=3)]
for dtype in [onp.float32] for strides in [(1, 1), (1, 2), (2, 1)]
for padding in [((0, 0), (0, 0)), ((1, 2), (2, 0))]
for lhs_dilation, rhs_dilation in itertools.product(
[(1, 1), (1, 2), (2, 2)], repeat=2)
for rng_factory in [jtu.rand_small]))
def DISABLED_testConvWithGeneralPaddingAgainstNumpy(
self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dilation,
rhs_dilation, rng_factory):
rng = rng_factory()
# TODO(mattjj): make this test pass
raise SkipTest("this test is incomplete")
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
def numpy_fun(lhs, rhs):
return lax_reference.conv_with_general_padding(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation)
self._CheckAgainstNumpy(fun, numpy_fun, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
"_lhs_dilation={}_rhs_dilation={}"
"_dims={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dilation, rhs_dilation,
",".join(dim_nums)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dilation": lhs_dilation,
"rhs_dilation": rhs_dilation, "dimension_numbers": dim_nums,
"perms": perms, "rng_factory": rng_factory}
for lhs_shape, rhs_shape in [
((b, i, 9, w), (j, i, 4, 5))
for w in [0, 10]
for b, i, j in itertools.product([2, 3], repeat=3)]
for dtype in float_dtypes for strides in [(1, 1), (2, 1)]
for padding in [((1, 2), (2, 0)), ((10, 8), (7, 13))]
for lhs_dilation, rhs_dilation in itertools.product(
[(1, 1), (1, 2), (1, 4)], repeat=2)
for rng_factory in [jtu.rand_small]
for dim_nums, perms in [
(("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
(("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
(("NCHW", "HWIO", "NHWC"), ([0, 1, 2, 3], [2, 3, 1, 0])),
]))
def testConvGeneralDilated(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dilation, rhs_dilation,
dimension_numbers, perms, rng_factory):
rng = rng_factory()
lhs_perm, rhs_perm = perms # permute to compatible shapes
def args_maker():
return [lax.transpose(rng(lhs_shape, dtype), lhs_perm),
lax.transpose(rng(rhs_shape, dtype), rhs_perm)]
def fun(lhs, rhs):
return lax.conv_general_dilated(
lhs, rhs, strides, padding, lhs_dilation, rhs_dilation,
dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
# TODO(mattjj): test conv_general_dilated against numpy
@staticmethod
def _conv_transpose_via_grad(data, kernel, strides, padding,
rhs_dilation=None, dimension_numbers=None):
"""Helper method: calculates conv transpose via grad for testing."""
assert len(data.shape) == len(kernel.shape)
nspatial = len(data.shape) - 2
one = (1,) * nspatial
rhs_dilation = rhs_dilation or one
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
in_shape = onp.take(data.shape, dn.lhs_spec)
in_sdims = in_shape[2:]
k_shape = onp.take(kernel.shape, dn.rhs_spec)
k_sdims = k_shape[2:]
e_k_sdims = [(k-1) * r + 1 for k, r in zip(k_sdims, rhs_dilation)]
if padding == 'VALID':
o_sdims = [in_sdims[i]*strides[i] + max(e_k_sdims[i]-strides[i],0)
for i in range(nspatial)]
elif padding == 'SAME':
o_sdims = [in_sdims[i]*strides[i] for i in range(nspatial)]
o_shape = [in_shape[0], k_shape[1]] + o_sdims
out_spec_inv = [x[0] for x in
sorted(enumerate(dn.out_spec), key=lambda x: x[1])]
o_layout = onp.take(onp.array(o_shape), out_spec_inv)
placeholder = onp.ones(o_layout, data.dtype)
conv = lambda x: lax.conv_general_dilated(x, kernel, strides, padding,
one, rhs_dilation, dn)
_, g = api.vjp(conv, placeholder)
return g(data)[0]
@staticmethod
def _transpose_conv_kernel(data, kernel, dimension_numbers):
dn = lax.conv_dimension_numbers(data.shape, kernel.shape,
dimension_numbers)
spatial_axes = onp.array(dn.rhs_spec)[2:]
for axis in spatial_axes:
kernel = onp.flip(kernel, axis)
kernel = onp.swapaxes(kernel, dn.rhs_spec[0], dn.rhs_spec[1])
return kernel
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"rng_factory": rng_factory, 'dspec': dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, j, i)) # NB: i,j flipped in RHS for transpose
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]
for rng_factory in [jtu.rand_small]))
def testConvTranspose2DT(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
# NB: this test calculates conv_transpose performing identically to the
# lhs-grad of conv.
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=True)
def fun_via_grad(lhs, rhs):
return self._conv_transpose_via_grad(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"rng_factory": rng_factory, 'dspec': dspec}
for lhs_shape, rhs_shape in [
((b, 9, 10, i), (k, k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1, 1), (1, 2), (2, 1), (2, 2), (3, 3)]
for padding in ["VALID", "SAME"]
for dspec in [('NHWC', 'HWIO', 'NHWC'),]
for rhs_dilation in [None, (2, 2)]
for rng_factory in [jtu.rand_small]))
def testConvTranspose2D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_rhs_dilation={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype), strides, padding, rhs_dilation),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rhs_dilation": rhs_dilation,
"rng_factory": rng_factory, 'dspec': dspec}
for lhs_shape, rhs_shape in [
((b, 10, i), (k, i, j))
for b, i, j, k in itertools.product([2,3],[2,3],[2,3],[3,4,5])]
for dtype in float_dtypes
for strides in [(1,), (2,), (3,)]
for padding in ["VALID", "SAME"]
for dspec in [('NHC', 'HIO', 'NHC'),]
for rhs_dilation in [None, (2,)]
for rng_factory in [jtu.rand_small]))
def testConvTranspose1D(self, lhs_shape, rhs_shape, dtype, strides,
padding, dspec, rhs_dilation, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.conv_transpose(lhs, rhs, strides, padding,
dimension_numbers=dspec,
rhs_dilation=rhs_dilation,
transpose_kernel=False)
def fun_via_grad(lhs, rhs):
rhs_t = self._transpose_conv_kernel(lhs, rhs, dimension_numbers=dspec)
return self._conv_transpose_via_grad(lhs, rhs_t, strides, padding,
rhs_dilation=rhs_dilation,
dimension_numbers=dspec)
# NB: below just checks for agreement, we're not calling numpy.
self._CheckAgainstNumpy(fun, fun_via_grad, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_precision={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
precision),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"precision": precision, "rng_factory": rng_factory}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype in all_dtypes
for precision in [None, lax.Precision.DEFAULT, lax.Precision.HIGH,
lax.Precision.HIGHEST]
for rng_factory in [jtu.rand_default]))
def testDot(self, lhs_shape, rhs_shape, dtype, precision, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(partial(lax.dot, precision=precision), args_maker,
check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for dtype in all_dtypes
for rng_factory in [jtu.rand_default]))
def testDotAgainstNumpy(self, lhs_shape, rhs_shape, dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
tol = {
onp.float16: 1e-2,
onp.float64: max(jtu.default_tolerance()[onp.dtype(onp.float64)], 1e-14),
onp.complex128: max(jtu.default_tolerance()[onp.dtype(onp.complex128)],
1e-14)
}
lax_op = partial(lax.dot, precision=lax.Precision.HIGHEST)
self._CheckAgainstNumpy(lax_op, lax_reference.dot, args_maker, tol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lhs_contracting, rhs_contracting),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting,
"rng_factory": rng_factory}
for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[(3, 5), (2, 5), [1], [1]],
[(5, 3), (5, 2), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0,2], [0,1]],
[(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],
[(3, 2), (2, 4), [1], [0]],
]
for dtype in all_dtypes
for rng_factory in [jtu.rand_small]))
def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,
lhs_contracting, rhs_contracting, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers, "rng_factory": rng_factory}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
for dtype in all_dtypes
for rng_factory in [jtu.rand_small]))
def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,
dimension_numbers, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
def fun(lhs, rhs):
return lax.dot_general(lhs, rhs, dimension_numbers)
self._CompileAndCheck(fun, args_maker, check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers, "rng_factory": rng_factory}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
for dtype in all_dtypes
for rng_factory in [jtu.rand_small]))
def testDotGeneralAgainstNumpy(self, lhs_shape, rhs_shape, dtype,
dimension_numbers, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
op = lambda x, y: lax.dot_general(x, y, dimension_numbers)
numpy_op = lambda x, y: lax_reference.dot_general(x, y, dimension_numbers)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}".format(
shape, onp.dtype(dtype).name, broadcast_sizes),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes,
"rng_factory": rng_factory}
for shape in [(), (2, 3)]
for dtype in default_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]
for rng_factory in [jtu.rand_default]))
def testBroadcast(self, shape, dtype, broadcast_sizes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_broadcast_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), broadcast_sizes),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes,
"rng_factory": rng_factory}
for shape in [(), (2, 3)]
for dtype in default_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]
for rng_factory in [jtu.rand_default]))
def testBroadcastAgainstNumpy(self, shape, dtype, broadcast_sizes, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.broadcast(x, broadcast_sizes)
numpy_op = lambda x: lax_reference.broadcast(x, broadcast_sizes)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions, "rng_factory": rng_factory}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testBroadcastInDim(self, inshape, dtype, outshape, dimensions, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions, "rng_factory": rng_factory}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testBroadcastInDimAgainstNumpy(self, inshape, dtype, outshape,
dimensions, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(inshape, dtype)]
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
numpy_op = lambda x: lax_reference.broadcast_in_dim(x, outshape, dimensions)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": rng_factory}
for dtype in default_dtypes
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]
for rng_factory in [jtu.rand_default]))
def testReshape(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype)),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": rng_factory}
for dtype in default_dtypes
for arg_shape, out_shape in [
[(3, 4), (12,)], [(2, 1, 4), (8,)], [(2, 2, 4), (2, 8)]
]
for rng_factory in [jtu.rand_default]))
def testReshapeAgainstNumpy(self, arg_shape, out_shape, dtype, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(arg_shape, dtype)]
op = lambda x: lax.reshape(x, out_shape)
numpy_op = lambda x: lax_reference.reshape(x, out_shape)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads, "rng_factory": jtu.rand_small}
for shape in [(2, 3)]
for dtype in default_dtypes
for pads in [[(1, 2, 1), (0, 1, 0)]]))
def testPad(self, shape, dtype, pads, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
fun = lambda operand: lax.pad(operand, onp.array(0, dtype), pads)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads, "rng_factory": jtu.rand_small}
for shape in [(2, 3)]
for dtype in default_dtypes
for pads in [[(1, 2, 1), (0, 1, 0)]]))
def testPadAgainstNumpy(self, shape, dtype, pads, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.pad(x, onp.array(0, dtype), pads)
numpy_op = lambda x: lax_reference.pad(x, onp.array(0, dtype), pads)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
def testReverse(self):
rev = api.jit(lambda operand: lax.rev(operand, dimensions))
dimensions = [0]
self.assertAllClose(onp.array([3, 2, 1]), rev(onp.array([1, 2, 3])),
check_dtypes=False)
dimensions = [0, 1]
self.assertAllClose(onp.array([[6, 5, 4], [3, 2, 1]]),
rev(onp.array([[1, 2, 3], [4, 5, 6]])),
check_dtypes=False)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, onp.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype,
"rng_factory": rng_factory}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSelect(self, pred_shape, arg_shape, arg_dtype, rng_factory):
def args_maker():
return [rng(pred_shape, onp.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
rng = rng_factory()
return self._CompileAndCheck(lax.select, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, onp.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype,
"rng_factory": rng_factory}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for arg_dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSelectAgainstNumpy(self, pred_shape, arg_shape, arg_dtype, rng_factory):
def args_maker():
return [rng(pred_shape, onp.bool_), rng(arg_shape, arg_dtype),
rng(arg_shape, arg_dtype)]
rng = rng_factory()
return self._CheckAgainstNumpy(lax.select, lax_reference.select, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_start_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": start_indices,
"limits": limit_indices, "strides": strides, "rng_factory": rng_factory}
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSlice(self, shape, dtype, starts, limits, strides, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_start_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": start_indices,
"limits": limit_indices, "strides": strides, "rng_factory": rng_factory}
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSliceAgainstNumpy(self, shape, dtype, starts, limits,
strides, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.slice(x, starts, limits, strides)
numpy_op = lambda x: lax_reference.slice(x, starts, limits, strides)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, size_indices),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"size_indices": size_indices, "rng_factory": rng_factory}
for shape, start_indices, size_indices in [
[(3,), onp.array((1,)), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(5, 3), onp.array((1, 1)), (3, 1)],
[(7, 5, 3), onp.array((4, 1, 0)), (2, 0, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicSlice(self, shape, dtype, start_indices, size_indices, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]
op = lambda x, starts: lax.dynamic_slice(x, starts, size_indices)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, size_indices),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"size_indices": size_indices, "rng_factory": rng_factory}
for shape, start_indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicSliceAgainstNumpy(self, shape, dtype, start_indices,
size_indices, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype), onp.array(start_indices)]
op = lambda x, s: lax.dynamic_slice(x, s, size_indices)
numpy_op = lambda x, s: lax_reference.dynamic_slice(x, s, size_indices)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
def testDynamicSliceInDim(self):
# Regression test for mixed type problem in dynamic_slice_in_dim.
rng = jtu.rand_default()
x = rng((6, 7), onp.int32)
onp.testing.assert_equal(lax.dynamic_slice_in_dim(x, 2, 3), x[2:5])
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, update_shape),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"update_shape": update_shape, "rng_factory": rng_factory}
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicUpdateSlice(self, shape, dtype, start_indices, update_shape,
rng_factory):
rng = rng_factory()
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype),
onp.array(start_indices)]
self._CompileAndCheck(lax.dynamic_update_slice, args_maker,
check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, update_shape),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"update_shape": update_shape, "rng_factory": rng_factory}
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicUpdateSliceAgainstNumpy(self, shape, dtype, start_indices,
update_shape, rng_factory):
rng = rng_factory()
def args_maker():
return [rng(shape, dtype), rng(update_shape, dtype),
onp.array(start_indices)]
self._CheckAgainstNumpy(lax.dynamic_update_slice,
lax_reference.dynamic_update_slice, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm, "rng_factory": rng_factory}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTranspose(self, shape, dtype, perm, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
self._CompileAndCheck(op, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm, "rng_factory": rng_factory}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTransposeAgainstNumpy(self, shape, dtype, perm, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.transpose(x, perm)
numpy_op = lambda x: lax_reference.transpose(x, perm)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}"
.format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,
init_val),
"op": op, "init_val": init_val, "shape": shape, "dtype": dtype,
"dims": dims, "rng_factory": rng_factory}
for init_val, op, types in [
(0, lax.add, default_dtypes),
(1, lax.mul, default_dtypes),
(0, lax.max, all_dtypes), # non-monoidal
(-onp.inf, lax.max, float_dtypes),
(dtypes.iinfo(onp.int32).min, lax.max, [onp.int32]),
# (dtypes.iinfo(onp.int64).min, lax.max, [onp.int64]), # TODO fails
(dtypes.iinfo(onp.uint32).min, lax.max, [onp.uint32]),
(dtypes.iinfo(onp.uint64).min, lax.max, [onp.uint64]),
(onp.inf, lax.min, float_dtypes),
(dtypes.iinfo(onp.int32).max, lax.min, [onp.int32]),
# (dtypes.iinfo(onp.int64).max, lax.min, [onp.int64]), # TODO fails
(dtypes.iinfo(onp.uint32).max, lax.min, [onp.uint32]),
(dtypes.iinfo(onp.uint64).max, lax.min, [onp.uint64]),
]
for dtype in types
for shape, dims in [
[(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]
]
for rng_factory in [
jtu.rand_default if dtypes.issubdtype(dtype, onp.integer)
else jtu.rand_small]))
def testReduce(self, op, init_val, shape, dtype, dims, rng_factory):
rng = rng_factory()
init_val = onp.asarray(init_val, dtype=dtype)
fun = lambda operand, init_val: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
fun = lambda operand: lax.reduce(operand, init_val, op, dims)
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}_padding={}"
.format(op.__name__, onp.dtype(dtype).name, padding),
"op": op, "init_val": init_val, "dtype": dtype, "padding": padding,
"rng_factory": rng_factory}
for init_val, op, dtypes in [
(0, lax.add, [onp.float32]),
(-onp.inf, lax.max, [onp.float32]),
(onp.inf, lax.min, [onp.float32]),
]
for dtype in dtypes
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testReduceWindow(self, op, init_val, dtype, padding, rng_factory):
rng = rng_factory()
init_val = onp.asarray(init_val, dtype=dtype)
all_configs = itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)]),
itertools.product(
[(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)]))
def fun(operand, init_val):
return lax.reduce_window(operand, init_val, op, dims, strides, padding)
# pylint: disable=cell-var-from-loop
for shape, dims, strides in all_configs:
args_maker = lambda: [rng(shape, dtype), init_val]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
# pylint: enable=cell-var-from-loop
# we separately test the version that uses a concrete init_val because it
# can hit different code paths
def fun(operand):
return lax.reduce_window(operand, init_val, op, dims, strides, padding)
# pylint: disable=cell-var-from-loop
for shape, dims, strides in all_configs:
args_maker = lambda: [rng(shape, dtype)]
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
# pylint: enable=cell-var-from-loop
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for dtype in [onp.float32, onp.int32, onp.uint32]
for shape in [(5,), (5, 7)]
for axis in [-1, len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSort(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
fun = lambda x: lax.sort(x, axis)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for dtype in [onp.float32, onp.int32, onp.uint32]
for shape in [(5,), (5, 7)]
for axis in [-1, len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSortAgainstNumpy(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
args_maker = lambda: [rng(shape, dtype)]
op = lambda x: lax.sort(x, axis)
numpy_op = lambda x: lax_reference.sort(x, axis)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_keyshape={}_valshape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, key_dtype),
jtu.format_shape_dtype_string(shape, val_dtype),
axis),
"rng_factory": rng_factory, "shape": shape,
"key_dtype": key_dtype, "val_dtype": val_dtype, "axis": axis}
for key_dtype in [onp.float32, onp.int32, onp.uint32]
for val_dtype in [onp.float32, onp.int32, onp.uint32]
for shape in [(3,), (5, 3)]
for axis in [-1, len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSortKeyVal(self, shape, key_dtype, val_dtype, axis, rng_factory):
rng = rng_factory()
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
perm_rng = onp.random.RandomState(0)
def args_maker():
flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)
keys = perm_rng.permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
fun = lambda keys, values: lax.sort_key_val(keys, values, axis)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_keyshape={}_valshape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, key_dtype),
jtu.format_shape_dtype_string(shape, val_dtype),
axis),
"rng_factory": rng_factory, "shape": shape,
"key_dtype": key_dtype, "val_dtype": val_dtype, "axis": axis}
for key_dtype in [onp.float32, onp.int32, onp.uint32]
for val_dtype in [onp.float32, onp.int32, onp.uint32]
for shape in [(3,), (5, 3)]
for axis in [-1, len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSortKeyValAgainstNumpy(self, shape, key_dtype, val_dtype, axis, rng_factory):
rng = rng_factory()
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
perm_rng = onp.random.RandomState(0)
def args_maker():
flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)
keys = perm_rng.permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
op = lambda ks, vs: lax.sort_key_val(ks, vs, axis)
numpy_op = lambda ks, vs: lax_reference.sort_key_val(ks, vs, axis)
self._CheckAgainstNumpy(op, numpy_op, args_maker)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"rng_factory": rng_factory}
for lhs_shape, rhs_shape in [((3, 2), (2, 4)),
((5, 3, 2), (5, 2, 4)),
((1, 2, 2, 3), (1, 2, 3, 1))]
for dtype in float_dtypes
for rng_factory in [jtu.rand_small]))
def testBatchMatMul(self, lhs_shape, rhs_shape, dtype, rng_factory):
rng = rng_factory()
arg_maker = lambda: [rng(lhs_shape, dtype), rng(rhs_shape, dtype)]
self._CompileAndCheck(lax.batch_matmul, arg_maker, check_dtypes=True)
def testCollapse(self):
@api.jit
def collapse_first_two(x):
return lax.collapse(x, 0, 2)
self.assertEqual((6,), collapse_first_two(onp.zeros((2, 3))).shape)
self.assertEqual((6, 4), collapse_first_two(onp.zeros((2, 3, 4))).shape)
self.assertEqual((2, 3, 4),
collapse_first_two(onp.zeros((1, 2, 3, 4))).shape)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), idxs, axes),
"shape": shape, "dtype": dtype, "idxs": idxs, "axes": axes, "rng_factory": rng_factory}
for dtype in all_dtypes
for shape, idxs, axes in [
[(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],
[(3, 4, 5), (onp.array([-1, -2]),), (0,)],
[(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],
[(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],
]
for rng_factory in [jtu.rand_default]))
def testIndexTake(self, shape, dtype, idxs, axes, rng_factory):
rng = rng_factory()
rand_idxs = lambda: tuple(rng(e.shape, e.dtype) for e in idxs)
args_maker = lambda: [rng(shape, dtype), rand_idxs()]
fun = lambda src, idxs: lax.index_take(src, idxs, axes)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,
slice_sizes),
"shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in all_dtypes
for shape, idxs, dnums, slice_sizes in [
((5,), onp.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default]))
def testGather(self, shape, dtype, idxs, dnums, slice_sizes, rng_factory,
rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(shape, dtype), rand_idxs()]
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter_add, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatterMin(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter_min, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatterMax(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter_max, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums,
"rng_factory": rng_factory, "rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatter(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
rand_idxs = lambda: rng_idx(idxs.shape, idxs.dtype)
args_maker = lambda: [rng(arg_shape, dtype), rand_idxs(),
rng(update_shape, dtype)]
fun = partial(lax.scatter, dimension_numbers=dnums)
self._CompileAndCheck(fun, args_maker, check_dtypes=True)
def testLongConstantHandling(self):
if six.PY3:
self.skipTest("Test is Python 2 specific")
self.assertTrue(api.jit(lambda x: lax.lt(x, long(10)))(long(3))) # noqa: F821
def testIssue831(self):
# Tests the DeviceTuple constant handler
def f(x):
g = lambda *args: args[1]
return api.jit(lax.fori_loop, static_argnums=(2,))( 0, 10, g, x)
api.jit(f)(1.) # doesn't crash
def testReshapeWithUnusualShapes(self):
ans = lax.reshape(onp.ones((3,), onp.float32), (lax.add(1, 2), 1))
self.assertAllClose(ans, onp.ones((3, 1), onp.float32), check_dtypes=True)
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: lax.reshape(onp.ones(3,), (onp.array([3, 1]),)))
self.assertRaisesRegex(
TypeError,
"Shapes must be 1D sequences of concrete values of integer type.*",
lambda: lax.reshape(onp.ones(3,), (1.5, 2.0)))
@jtu.skip_on_devices("tpu") # S16 not supported on TPU
def testDynamicSliceTypeErrors(self):
self.assertRaisesRegex(
TypeError,
"index arguments to dynamic_slice must be integers of the same type",
lambda: lax.dynamic_slice(onp.ones((3, 4), dtype=onp.float32),
(onp.int32(1), onp.int16(2)), (2, 2)))
@jtu.skip_on_devices("tpu") # S16 not supported on TPU
def testDynamicUpdateSliceTypeErrors(self):
self.assertRaisesRegex(
TypeError,
"index arguments to dynamic_update_slice must be integers of the same "
"type",
lambda: lax.dynamic_update_slice(onp.ones((3, 4), dtype=onp.float32),
onp.zeros((2, 2), dtype=onp.float32),
(onp.int32(1), onp.int16(2))))
class DeviceConstantTest(jtu.JaxTestCase):
def _CheckDeviceConstant(self, make_const, expected):
# check casting to ndarray works
asarray_result = onp.asarray(make_const())
# check passing as an argument works (should hit constant handler)
zero = onp.array(0, expected.dtype)
argument_result = lax.add(zero, make_const())
# check looping into a compiled computation works
jit_result = api.jit(lambda x: lax.add(x, make_const()))(zero)
# ensure they're all the same
self.assertAllClose(asarray_result, expected, check_dtypes=True)
self.assertAllClose(argument_result, expected, check_dtypes=True)
self.assertAllClose(jit_result, expected, check_dtypes=True)
# ensure repr doesn't crash
repr(make_const())
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_fill={}".format(
jtu.format_shape_dtype_string(shape, dtype) if dtype else shape,
fill_value),
"shape": shape, "dtype": dtype, "fill_value": fill_value}
for dtype in itertools.chain(default_dtypes, [None])
for shape in [(), (3,), (2, 3), (2, 3, 4), (1001, 1001)]
for fill_value in [0, 1, onp.pi]))
def testFilledConstant(self, shape, fill_value, dtype):
make_const = lambda: lax.full(shape, fill_value, dtype)
expected = onp.full(shape, fill_value,
dtype or dtypes.result_type(fill_value))
self._CheckDeviceConstant(make_const, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_dim={}".format(
jtu.format_shape_dtype_string(shape, dtype), dimension),
"shape": shape, "dtype": dtype, "dimension": dimension}
for dtype in default_dtypes
for shape in [(), (3,), (2, 3), (2, 3, 4),
# TODO(mattjj): re-enable
# (1001, 1001), (101, 101, 101),
]
for dimension in range(len(shape))))
def testIotaConstant(self, dtype, shape, dimension):
make_const = lambda: lax.broadcasted_iota(dtype, shape, dimension)
arr = onp.arange(shape[dimension], dtype=dtypes.canonicalize_dtype(dtype))
singleton_shape = [1] * len(shape)
singleton_shape[dimension] = shape[dimension]
expected = onp.broadcast_to(arr.reshape(singleton_shape), shape)
self._CheckDeviceConstant(make_const, expected)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_{}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), axes),
"shape": shape, "dtype": dtype, "axes": axes}
for dtype in default_dtypes
for shape, axes in [
[(2, 3), (0, 1)],
[(2, 3, 4), (0, 1)],
[(2, 3, 4), (0, 2)],
[(2, 3, 4), (1, 2)],
[(2, 3, 4), (0, 1, 2)],
[(2, 3, 4, 2), (0, 1, 2)],
[(2, 3, 4, 2), (0, 2, 3)],
[(1001, 1001), (0, 1)],
]))
def testEyeConstant(self, dtype, shape, axes):
make_const = lambda: lax.broadcasted_eye(dtype, shape, axes)
# don't check the asarray case, just assume it's right
expected = onp.asarray(make_const())
self._CheckDeviceConstant(make_const, expected)
GradTestSpec = collections.namedtuple(
"GradTestSpec",
["op", "nargs", "order", "rng_factory", "dtypes", "name", "tol"])
def grad_test_spec(op, nargs, order, rng_factory, dtypes, name=None, tol=None):
return GradTestSpec(
op, nargs, order, rng_factory, dtypes, name or op.__name__, tol)
grad_float_dtypes = list(jtu.supported_dtypes().intersection(
{onp.float32, onp.float64}))
grad_complex_dtypes = list(jtu.supported_dtypes().intersection(
{onp.complex64, onp.complex128}))
grad_inexact_dtypes = grad_float_dtypes + grad_complex_dtypes
LAX_GRAD_OPS = [
grad_test_spec(lax.neg, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.floor, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.ceil, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.round, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.exp, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.expm1, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.log, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.log1p, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.sinh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes + [onp.complex64], tol=1e-5),
grad_test_spec(lax.cosh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes, tol=1e-5),
grad_test_spec(lax.tanh, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes, tol=1e-5),
grad_test_spec(lax.sin, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.cos, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.tan, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -1.3, 1.3),
dtypes=grad_inexact_dtypes, tol=1e-3),
grad_test_spec(lax.asin, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -1.3, 1.3),
dtypes=grad_float_dtypes, tol=1e-3),
grad_test_spec(lax.acos, nargs=1, order=2,
rng_factory=partial(jtu.rand_uniform, -1.3, 1.3),
dtypes=grad_float_dtypes, tol=1e-3),
# TODO(proteneer): atan2 input is already a representation of a
# complex number. Need to think harder about what this even means
# if each input itself is a complex number.
grad_test_spec(lax.atan2, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erf, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erfc, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
grad_test_spec(lax.erf_inv, nargs=1, order=2, rng_factory=jtu.rand_small,
dtypes=grad_float_dtypes),
# grad_test_spec(lax.lgamma, nargs=1, order=2, rng_factory=jtu.rand_small,
# dtypes=grad_float_dtypes), # TODO(mattjj): enable
grad_test_spec(lax.bessel_i0e, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.bessel_i1e, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.real, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.imag, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_complex_dtypes),
grad_test_spec(lax.complex, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.conj, nargs=1, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.abs, nargs=1, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.pow, nargs=2, order=2, rng_factory=jtu.rand_positive,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.add, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.sub, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.mul, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.div, nargs=2, order=1, rng_factory=jtu.rand_not_small,
dtypes=grad_inexact_dtypes),
grad_test_spec(lax.max, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
grad_test_spec(lax.min, nargs=2, order=2, rng_factory=jtu.rand_default,
dtypes=grad_float_dtypes),
# TODO(mattjj): make some-equal checks more robust, enable second-order
# grad_test_spec(lax.max, nargs=2, order=1, rng_factory=jtu.rand_some_equal,
# dtypes=grad_float_dtypes, name="MaxSomeEqual"),
# grad_test_spec(lax.min, nargs=2, order=1, rng_factory=jtu.rand_some_equal,
# dtypes=grad_float_dtypes, name="MinSomeEqual"),
]
GradSpecialValuesTestSpec = collections.namedtuple(
"GradSpecialValuesTestSpec", ["op", "values", "tol"])
def grad_special_values_test_spec(op, values, tol=None):
return GradSpecialValuesTestSpec(op, values, tol)
LAX_GRAD_SPECIAL_VALUE_TESTS = [
grad_special_values_test_spec(
lax.sinh, [0.],
tol={onp.float32: 1e-2} if jtu.device_under_test() == "tpu" else None),
grad_special_values_test_spec(
lax.cosh, [0.],
tol={onp.float32: 1e-2} if jtu.device_under_test() == "tpu" else None),
grad_special_values_test_spec(lax.tanh, [0., 1000.]),
grad_special_values_test_spec(lax.sin, [0., onp.pi, onp.pi/2., onp.pi/4.]),
grad_special_values_test_spec(lax.cos, [0., onp.pi, onp.pi/2., onp.pi/4.]),
grad_special_values_test_spec(lax.tan, [0.]),
grad_special_values_test_spec(lax.asin, [0.]),
grad_special_values_test_spec(lax.acos, [0.]),
grad_special_values_test_spec(lax.atan, [0., 1000.]),
grad_special_values_test_spec(lax.erf, [0., 10.]),
grad_special_values_test_spec(lax.erfc, [0., 10.]),
]
def check_grads_bilinear(f, args, order,
modes=["fwd", "rev"], atol=None, rtol=None):
# Can use large eps to make up for numerical inaccuracies since the op is
# bilinear (relying on the fact that we only check one arg at a time)
lhs, rhs = args
check_grads(lambda lhs: f(lhs, rhs), (lhs,), order,
modes=modes, atol=atol, rtol=rtol, eps=1.)
check_grads(lambda rhs: f(lhs, rhs), (rhs,), order,
modes=modes, atol=atol, rtol=rtol, eps=1.)
class LaxAutodiffTest(jtu.JaxTestCase):
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": jtu.format_test_name_suffix(
rec.name, shapes, itertools.repeat(dtype)),
"op": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes, "dtype": dtype,
"order": rec.order, "tol": rec.tol}
for shape_group in compatible_shapes
for shapes in CombosWithReplacement(shape_group, rec.nargs)
for dtype in rec.dtypes)
for rec in LAX_GRAD_OPS))
def testOpGrad(self, op, rng_factory, shapes, dtype, order, tol):
rng = rng_factory()
if jtu.device_under_test() == "tpu" and op is lax.pow:
raise SkipTest("pow grad imprecise on tpu")
tol = 1e-1 if num_float_bits(dtype) == 32 else tol
args = tuple(rng(shape, dtype) for shape in shapes)
check_grads(op, args, order, ["fwd", "rev"], tol, tol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "_{}_{}".format(rec.op.__name__, special_value),
"op": rec.op, "special_value": special_value, "tol": rec.tol}
for special_value in rec.values)
for rec in LAX_GRAD_SPECIAL_VALUE_TESTS))
def testOpGradSpecialValue(self, op, special_value, tol):
check_grads(op, (special_value,), 2, ["fwd", "rev"], rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_from_dtype={}_to_dtype={}".format(
jtu.dtype_str(from_dtype), jtu.dtype_str(to_dtype)),
"from_dtype": from_dtype, "to_dtype": to_dtype, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
float_dtypes + complex_dtypes, repeat=2)
for rng_factory in [jtu.rand_default]))
def testConvertElementTypeGrad(self, from_dtype, to_dtype, rng_factory):
rng = rng_factory()
tol = max(jtu.tolerance(to_dtype, jtu.default_gradient_tolerance),
jtu.tolerance(from_dtype, jtu.default_gradient_tolerance))
args = (rng((2, 3), from_dtype),)
convert_element_type = lambda x: lax.convert_element_type(x, to_dtype)
check_grads(convert_element_type, args, 2, ["fwd", "rev"], tol, tol, eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}".format(
jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype)),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype, "rng_factory": rng_factory}
for min_shape, operand_shape, max_shape in [
[(), (), ()],
[(), (2, 3), ()],
[(2, 3), (2, 3), (2, 3)],
]
# TODO(phawkins): this test fails for bfloat16.
for dtype in [t for t in float_dtypes if t != dtypes.bfloat16]
for rng_factory in [jtu.rand_default]))
def testClampGrad(self, min_shape, operand_shape, max_shape, dtype, rng_factory):
rng = rng_factory()
tol = {dtypes.bfloat16: 1e-1, onp.float16: 1e-1, onp.float32: 1e-2}
shapes = [min_shape, operand_shape, max_shape]
min, operand, max = (rng(shape, dtype) for shape in shapes)
min, max = onp.minimum(min, max), onp.maximum(min, max) # broadcast
eps = 1e-1 if dtypes.finfo(dtype).bits == 16 else 1e-2
check_grads(lax.clamp, (min, operand, max), 2, ["fwd", "rev"], tol, tol,
eps=eps)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dim={}_baseshape=[{}]_dtype={}_narrs={}".format(
dim, ",".join(str(d) for d in base_shape), onp.dtype(dtype).name,
num_arrs),
"dim": dim, "base_shape": base_shape, "dtype": dtype,
"num_arrs": num_arrs, "rng_factory": rng_factory}
for num_arrs in [3]
for dtype in float_dtypes
for base_shape in [(4,), (3, 4), (2, 3, 4)]
for dim in range(len(base_shape))
for rng_factory in [jtu.rand_default]))
def testConcatenateGrad(self, dim, base_shape, dtype, num_arrs, rng_factory):
rng = rng_factory()
shapes = [base_shape[:dim] + (size,) + base_shape[dim+1:]
for size, _ in zip(itertools.cycle([3, 1, 4]), range(num_arrs))]
operands = tuple(rng(shape, dtype) for shape in shapes)
concatenate = lambda *args: lax.concatenate(args, dim)
check_grads(concatenate, operands, 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "rng_factory": rng_factory,}
for lhs_shape, rhs_shape, all_strides in itertools.chain(
[((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)])
for b, i, j in itertools.product([2, 3], repeat=3)],
[((4, 2, 1), (3, 2, 1), [(1,)])])
for strides in all_strides
for dtype in float_dtypes
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testConvGrad(self, lhs_shape, rhs_shape, dtype, strides, padding, rng_factory):
rng = rng_factory()
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv, window_strides=strides, padding=padding,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=1e-2, rtol=1e-2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_"
"rhs_dilation={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dil, rhs_dil),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dil": lhs_dil,
"rhs_dil": rhs_dil, "rng_factory": rng_factory}
for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in
itertools.chain(
[((b, i, 3, 4), (j, i, 1, 2), [(1, 1), (1, 2), (2, 1)],
[((0, 0), (0, 0)), ((-1, 0), (0, -1)), ((1, 0), (0, 1))],
[(1, 1), (2, 1)], [(1, 1)])
for b, i, j in itertools.product([2, 3], repeat=3)],
[((4, 2, 1), (3, 2, 1), [(1,)], [((1, 1),), ((0, 0),)],
[(1,), (2,)], [(1,), (2,)])])
for strides in all_strides
for rhs_dil in rhs_dils
for lhs_dil in lhs_dils
for dtype in float_dtypes
for padding in all_pads
for rng_factory in [jtu.rand_small]))
def testConvWithGeneralPaddingGrad(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dil, rhs_dil, rng_factory):
rng = rng_factory()
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv_with_general_padding, window_strides=strides,
padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=1e-2, rtol=1e-2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_"
"rhs_dilation={}_dims={}_feature_group_count={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dil, rhs_dil, ",".join(dim_nums),
feature_group_count),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dil": lhs_dil,
"rhs_dil": rhs_dil, "rng_factory": rng_factory, "dimension_numbers": dim_nums,
"perms": perms, "feature_group_count": feature_group_count}
for lhs_shapes, rhs_shape, all_strides, lhs_dils, rhs_dils in [
([(b, i, 6, 7), (b, i, 0, 4)], # lhs_shape
(j, i, 1, 2), # rhs_shape
[(1, 1), (1, 2), (2, 1)], # strides
[(1, 1), (2, 1)], # lhs_dils
[(1, 1), (2, 2)]) # rhs_dils
for b, i, j in itertools.product([1, 2], repeat=3)]
for lhs_shape in lhs_shapes
for feature_group_count in [1, 2]
for strides in all_strides
for rhs_dil in rhs_dils
for lhs_dil in lhs_dils
for dtype in float_dtypes
for padding in ([((0, 0), (0, 0)), ((1, 0), (0, 1))] +
([((0, -1), (0, 0))] if lhs_shape[2] != 0 else []))
for dim_nums, perms in [
(("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
(("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
(("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3]))]
for rng_factory in [jtu.rand_default]
))
@jtu.skip_on_devices("tpu") # TODO(phawkins): precision problems on TPU.
def testConvGeneralDilatedGrad(self, lhs_shape, rhs_shape, dtype, strides,
padding, lhs_dil, rhs_dil, dimension_numbers,
perms, feature_group_count, rng_factory):
rng = rng_factory()
tol = {dtypes.bfloat16: 1e-0, onp.float16: 5e-1, onp.float32: 1e-4}
# permute shapes to match dim_spec, scale by feature_group_count
lhs_perm, rhs_perm = perms
lhs_shape = list(onp.take(lhs_shape, lhs_perm))
rhs_shape = list(onp.take(rhs_shape, rhs_perm))
dim_spec = lax.conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers)
lhs_shape[dim_spec.lhs_spec[1]] *= feature_group_count
rhs_shape[dim_spec.rhs_spec[0]] *= feature_group_count
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
conv = partial(lax.conv_general_dilated, window_strides=strides,
padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(conv, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=tol, rtol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype)),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"rng_factory": jtu.rand_default}
for lhs_shape in [(2,), (3, 2)] for rhs_shape in [(2,), (2, 4)]
for dtype in float_dtypes))
def testDotGrad(self, lhs_shape, rhs_shape, dtype, rng_factory):
rng = rng_factory()
tol = {onp.float16: 1e-1, onp.float32: 1e-4}
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
dot = partial(lax.dot, precision=lax.Precision.HIGHEST)
check_grads_bilinear(dot, (lhs, rhs), order=2, modes=["fwd", "rev"],
atol=tol, rtol=tol)
# check that precision config is preserved
result, pullback = api.vjp(dot, lhs, rhs)
gresult = lax.zeros_like_array(result)
s = str(api.make_jaxpr(pullback)(gresult))
assert "precision=HIGHEST" in s
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers, "rng_factory": jtu.rand_small}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 2), (2, 4), (([1], [0]), ([], []))),
((3, 5), (2, 5), (([1], [1]), ([], []))),
((5, 3), (5, 2), (([0], [0]), ([], []))),
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
]
for dtype in float_dtypes))
def testDotGeneralContractAndBatchGrads(self, lhs_shape, rhs_shape, dtype,
dimension_numbers, rng_factory):
rng = rng_factory()
lhs = rng(lhs_shape, dtype)
rhs = rng(rhs_shape, dtype)
dot_general = partial(lax.dot_general, dimension_numbers=dimension_numbers,
precision=lax.Precision.HIGHEST)
check_grads_bilinear(dot_general, (lhs, rhs), order=2, modes=["fwd", "rev"])
# check that precision config is preserved
result, pullback = api.vjp(dot_general, lhs, rhs)
gresult = lax.zeros_like_array(result)
s = str(api.make_jaxpr(pullback)(gresult))
assert "precision=HIGHEST" in s
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}".format(
shape, onp.dtype(dtype).name, broadcast_sizes),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes,
"rng_factory": rng_factory}
for shape in [(), (2, 3)]
for dtype in float_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]
for rng_factory in [jtu.rand_default]))
def testBroadcastGrad(self, shape, dtype, broadcast_sizes, rng_factory):
rng = rng_factory()
args = (rng(shape, dtype),)
broadcast = lambda x: lax.broadcast(x, broadcast_sizes)
check_grads(broadcast, args, 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions, "rng_factory": rng_factory}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
]
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testBroadcastInDimGrad(self, inshape, dtype, outshape, dimensions, rng_factory):
rng = rng_factory()
operand = rng(inshape, dtype)
broadcast_in_dim = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
check_grads(broadcast_in_dim, (operand,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_perm={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
permutation),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"rng_factory": rng_factory, "permutation": permutation}
for dtype in float_dtypes
for arg_shape, out_shape, permutation in [
[(3, 4), (12,), None],
[(2, 1, 4), (8,), None],
[(2, 2, 4), (2, 8), None],
[(3, 4), (12,), (0, 1)],
[(3, 4), (12,), (1, 0)],
[(2, 1, 4), (8,), (0, 2, 1)],
[(2, 1, 4), (8,), (2, 0, 1)],
[(2, 2, 4), (2, 8), (0, 2, 1)],
[(2, 2, 4), (2, 8), (2, 0, 1)],
]
for rng_factory in [jtu.rand_default]))
def testReshapeGrad(self, arg_shape, out_shape, permutation, dtype, rng_factory):
rng = rng_factory()
operand = rng(arg_shape, dtype)
reshape = lambda x: lax.reshape(x, out_shape, permutation)
check_grads(reshape, (operand,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads),
"shape": shape, "dtype": dtype, "pads": pads, "rng_factory": jtu.rand_small}
for shape in [(2, 3)]
for dtype in float_dtypes
for pads in [[(1, 2, 1), (0, 1, 0)], [(-1, 0, 0), (-1, 0, 2)]]))
def testPadGrad(self, shape, dtype, pads, rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
pad = lambda operand: lax.pad(operand, onp.array(0, dtype), pads)
check_grads(pad, (operand,), 2, ["fwd", "rev"], eps=1.)
operand = rng(shape, dtype)
padding_value = onp.array(0., dtype)
pad = lambda operand, padding_value: lax.pad(operand, padding_value, pads)
check_grads(pad, (operand, padding_value), 2, ["fwd", "rev"], eps=1.)
def testReverseGrad(self):
rev = lambda operand: lax.rev(operand, dimensions)
dimensions = [0]
check_grads(rev, (onp.array([3., 2., 1.]),), 2)
dimensions = [0, 1]
check_grads(rev, (onp.array([[6., 5., 4.], [3., 2., 1.]]),), 2,
rtol={onp.float32: 3e-3})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}".format(
jtu.format_shape_dtype_string(pred_shape, onp.bool_),
jtu.format_shape_dtype_string(arg_shape, dtype)),
"pred_shape": pred_shape, "arg_shape": arg_shape, "dtype": dtype,
"rng_factory": rng_factory}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testSelectGrad(self, pred_shape, arg_shape, dtype, rng_factory):
rng = rng_factory()
pred = rng(pred_shape, onp.bool_)
on_true = rng(arg_shape, dtype)
on_false = rng(arg_shape, dtype)
select = lambda on_true, on_false: lax.select(pred, on_true, on_false)
check_grads(select, (on_true, on_false), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_start_indices={}_limit_indices={}_strides={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, limit_indices, strides),
"shape": shape, "dtype": dtype, "starts": start_indices,
"limits": limit_indices, "strides": strides, "rng_factory": rng_factory}
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testSliceGrad(self, shape, dtype, starts, limits, strides, rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
slice = lambda x: lax.slice(x, starts, limits, strides)
check_grads(slice, (operand,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_size_indices={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, size_indices),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"size_indices": size_indices, "rng_factory": rng_factory}
for shape, start_indices, size_indices in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicSliceGrad(self, shape, dtype, start_indices, size_indices,
rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
dynamic_slice = lambda x: lax.dynamic_slice(x, start_indices, size_indices)
check_grads(dynamic_slice, (operand,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_start_indices={}_update_shape={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, update_shape),
"shape": shape, "dtype": dtype, "start_indices": start_indices,
"update_shape": update_shape, "rng_factory": rng_factory}
for shape, start_indices, update_shape in [
[(3,), (1,), (1,)],
[(5, 3), (1, 1), (3, 1)],
[(7, 5, 3), (4, 1, 0), (2, 0, 1)],
]
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testDynamicUpdateSliceGrad(self, shape, dtype, start_indices,
update_shape, rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
update = rng(update_shape, dtype)
start_indices = onp.array(start_indices)
dus = lambda x, y: lax.dynamic_update_slice(x, y, start_indices)
check_grads(dus, (operand, update), 2, ["fwd", "rev"], eps=1.)
dus = lambda x: lax.dynamic_update_slice(x, update, start_indices)
check_grads(dus, (operand,), 2, ["fwd", "rev"], eps=1.)
dus = lambda y: lax.dynamic_update_slice(operand, y, start_indices)
check_grads(dus, (update,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm),
"shape": shape, "dtype": dtype, "perm": perm, "rng_factory": rng_factory}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for dtype in float_dtypes
for rng_factory in [jtu.rand_default]))
def testTransposeGrad(self, shape, dtype, perm, rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
transpose = lambda x: lax.transpose(x, perm)
check_grads(transpose, (operand,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_inshape={}_reducedims={}"
.format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims),
"op": op, "init_val": init_val, "shape": shape, "dtype": dtype,
"dims": dims, "rng_factory": rng_factory}
for init_val, op, dtypes in [
(0, lax.add, inexact_dtypes),
# Precision problems for float16 tests.
(-onp.inf, lax.max, [t for t in inexact_dtypes if t != onp.float16]),
(onp.inf, lax.min, [t for t in inexact_dtypes if t != onp.float16]),
# The mul test overflows the range of a float16.
(1, lax.mul, [t for t in inexact_dtypes
if t not in (onp.float16, dtypes.bfloat16)]),
]
for dtype in dtypes
for shape, dims in [
[(3, 4, 5), ()],
[(3, 4, 5), (0,)],
[(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)],
[(3, 4, 5), (0, 1, 2)],
[(3, 1), (1,)],
]
for rng_factory in [jtu.rand_default]))
def testReduceGrad(self, op, init_val, shape, dtype, dims, rng_factory):
rng = rng_factory()
if jtu.device_under_test() == "tpu" and op is lax.mul:
raise SkipTest("unimplemented case")
tol = {dtypes.bfloat16: 2e-1, onp.float16: 1e-1, onp.float32: 4e-2,
onp.float64: 1e-3, onp.complex64: 1e-2}
operand = rng(shape, dtype)
init_val = onp.asarray(init_val, dtype=dtype)
reduce = lambda operand: lax.reduce(operand, init_val, op, dims)
eps = (1.0 if dtypes.finfo(dtype).bits == 16 and op is lax.add else
1e-1 if dtype == dtypes.bfloat16 else
1e-2 if dtypes.finfo(dtype).bits == 32 else None)
check_grads(reduce, (operand,), 1, ["fwd", "rev"], tol, tol, eps)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}_padding={}"
.format(op.__name__, onp.dtype(dtype).name, padding),
"op": op, "init_val": init_val, "dtype": dtype, "padding": padding,
"rng_factory": rng_factory}
for init_val, op, dtypes, rng in [
(0, lax.add, float_dtypes, jtu.rand_small),
(-onp.inf, lax.max, [onp.float32], jtu.rand_default),
(onp.inf, lax.min, [onp.float32], jtu.rand_default),
]
for dtype in dtypes
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_default]))
def testReduceWindowGrad(self, op, init_val, dtype, padding, rng_factory):
rng = rng_factory()
tol = {onp.float16: 1e-1, onp.float32: 1e-3}
init_val = onp.asarray(init_val, dtype=dtype)
# We need this conditional and the corresponding loop logic to be in the
# test method, rather than at the parameterized test level, because it
# depends on FLAGS for the device under test.
# TODO(b/31565929): enable when fixed.
if jtu.device_under_test() == "tpu" and op is not lax.add:
all_configs = [((6, 5, 4, 3), (2, 2, 1, 1), (1, 2, 1, 1))]
# TODO(b/73062247): need variadic reduce-window for better precision.
gradient_order = 1
else:
all_configs = itertools.chain(
itertools.product(
[(4, 6)], # shapes
[(2, 1), (1, 2)], # window_dimensions
[(1, 1), (2, 1), (1, 2)] # strides
),
itertools.product(
[(3, 2, 4, 6)], # shapes
[(1, 1, 2, 1), (2, 1, 2, 1)], # window_dimensions
[(1, 2, 2, 1), (1, 1, 1, 1)]), # strides
)
gradient_order = 3
def fun(operand):
return lax.reduce_window(operand, init_val, op, dims, strides, padding)
for shape, dims, strides in all_configs:
operand = rng(shape, dtype)
if op is lax.add:
eps = 1.
else:
# this test can fail if there are duplicates in operand
self.assertEqual(onp.unique(operand).size, operand.size,
msg="test requires operand elements to be unique.")
eps = 1e-2
check_grads(fun, (operand,), gradient_order, ["fwd", "rev"], tol, tol,
eps)
# TODO(b/205052657): enable more tests when supported
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, dtype), axis),
"rng_factory": rng_factory, "shape": shape, "dtype": dtype, "axis": axis}
for dtype in [onp.float32]
for shape in [(5,), (5, 7)]
for axis in [len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSortGrad(self, shape, dtype, axis, rng_factory):
rng = rng_factory()
operand = rng(shape, dtype)
sort = lambda x: lax.sort(x, axis)
check_grads(sort, (operand,), 2, ["fwd", "rev"], eps=1e-2)
# TODO(b/205052657): enable more tests when supported
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_keyshape={}_valshape={}_axis={}".format(
jtu.format_shape_dtype_string(shape, key_dtype),
jtu.format_shape_dtype_string(shape, val_dtype),
axis),
"rng_factory": rng_factory, "shape": shape,
"key_dtype": key_dtype, "val_dtype": val_dtype, "axis": axis}
for key_dtype in [onp.float32]
for val_dtype in [onp.float32]
for shape in [(3,), (5, 3)]
for axis in [len(shape) - 1]
for rng_factory in [jtu.rand_default]))
def testSortKeyValGrad(self, shape, key_dtype, val_dtype, axis, rng_factory):
rng = rng_factory()
# This test relies on the property that wherever keys are tied, values are
# too, since we don't guarantee the same ordering of values with equal keys.
# To avoid that case, we generate unique keys (globally in the key array).
perm_rng = onp.random.RandomState(0)
def args_maker():
flat_keys = onp.arange(onp.prod(shape, dtype=int), dtype=key_dtype)
keys = perm_rng.permutation(flat_keys).reshape(shape)
values = rng(shape, val_dtype)
return keys, values
keys, values = args_maker()
fun = lambda keys, values: lax.sort_key_val(keys, values, axis)
check_grads(fun, (keys, values), 2, ["fwd", "rev"], 1e-2, 1e-2, 1e-2)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_axes={}".format(
jtu.format_shape_dtype_string(shape, dtype), idxs, axes),
"shape": shape, "dtype": dtype, "idxs": idxs, "axes": axes,
"rng_factory": rng_factory}
for dtype in float_dtypes
for shape, idxs, axes in [
[(3, 4, 5), (onp.array([0, 2, 1]),), (0,)],
[(3, 4, 5), (onp.array([-1, -2]),), (0,)],
[(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 1)],
[(3, 4, 5), (onp.array([0, 2]), onp.array([1, 3])), (0, 2)],
]
for rng_factory in [jtu.rand_default]))
def testIndexTakeGrad(self, shape, dtype, idxs, axes, rng_factory):
rng = rng_factory()
src = rng(shape, dtype)
index_take = lambda src: lax.index_take(src, idxs, axes)
check_grads(index_take, (src,), 2, ["fwd", "rev"], eps=1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}".format(
jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,
slice_sizes),
"shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for shape, idxs, dnums, slice_sizes in [
((5,), onp.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
]
for rng_idx_factory in [partial(jtu.rand_int, max(shape))]
for rng_factory in [jtu.rand_default]))
def testGatherGrad(self, shape, dtype, idxs, dnums, slice_sizes, rng_factory,
rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
idxs = rng_idx(idxs.shape, idxs.dtype)
gather = lambda x: lax.gather(x, idxs, dimension_numbers=dnums,
slice_sizes=slice_sizes)
x = rng(shape, dtype)
check_grads(gather, (x,), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatterAddGrad(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
idxs = rng_idx(idxs.shape, idxs.dtype)
scatter_add = lambda x, y: lax.scatter_add(x, idxs, y,
dimension_numbers=dnums)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter_add, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums, "rng_factory": rng_factory,
"rng_idx_factory": rng_idx_factory}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for rng_idx_factory in [partial(jtu.rand_int, max(arg_shape))]
for rng_factory in [jtu.rand_default]))
def testScatterGrad(self, arg_shape, dtype, idxs, update_shape, dnums,
rng_factory, rng_idx_factory):
rng = rng_factory()
rng_idx = rng_idx_factory()
idxs = rng_idx(idxs.shape, idxs.dtype)
scatter = lambda x, y: lax.scatter(x, idxs, y, dimension_numbers=dnums)
x = rng(arg_shape, dtype)
y = rng(update_shape, dtype)
check_grads(scatter, (x, y), 2, ["fwd", "rev"], 1e-2, 1e-2, 1.)
def testScatterGradSymbolicZeroUpdate(self):
# https://github.com/google/jax/issues/1901
def f(x):
n = x.shape[0]
y = onp.arange(n, dtype=x.dtype)
return jax.ops.index_update(x, onp.diag_indices(n), y)
rng = jtu.rand_default()
check_grads(f, (rng((5, 5), onp.float32),), 2, ["fwd", "rev"], 1e-2, 1e-2,
1.)
def testStopGradient(self):
def f(x):
return lax.sin(x) * lax.cos(lax.stop_gradient(x))
def f2(x, y):
return lax.sin(x) * lax.cos(y)
x = 3.14
ans = api.grad(f)(x)
expected = api.grad(f2)(x, x)
self.assertAllClose(ans, expected, check_dtypes=True)
ans = api.grad(api.grad(f))(x)
expected = api.grad(api.grad(f2))(x, x)
self.assertAllClose(ans, expected, check_dtypes=True)
ans = api.grad(lambda x: lax.stop_gradient({'foo':x})['foo'])(3.)
expected = onp.array(0.0)
self.assertAllClose(ans, expected, check_dtypes=False)
# TODO(mattjj): make this a more systematic test
def testRemainder(self):
rng = onp.random.RandomState(0)
x = rng.uniform(-0.9, 9, size=(3, 4))
y = rng.uniform(0.7, 1.9, size=(3, 1))
assert not set(onp.unique(x)) & set(onp.unique(y))
tol = 1e-1 if num_float_bits(onp.float64) == 32 else 1e-3
check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol)
rng = onp.random.RandomState(0)
x = rng.uniform(-0.9, 9, size=(1, 4))
y = rng.uniform(0.7, 1.9, size=(3, 4))
assert not set(onp.unique(x)) & set(onp.unique(y))
tol = 1e-1 if num_float_bits(onp.float64) == 32 else 1e-3
check_grads(lax.rem, (x, y), 2, ["fwd", "rev"], tol, tol)
def all_bdims(*shapes):
bdims = (itertools.chain([None], range(len(shape) + 1)) for shape in shapes)
return (t for t in itertools.product(*bdims) if not all(e is None for e in t))
def add_bdim(bdim_size, bdim, shape):
shape = list(shape)
if bdim is not None:
shape.insert(bdim, bdim_size)
return tuple(shape)
def slicer(x, bdim):
if bdim is None:
return lambda _: x
else:
return lambda i: lax.index_in_dim(x, i, bdim, keepdims=False)
def args_slicer(args, bdims):
slicers = list(map(slicer, args, bdims))
return lambda i: [sl(i) for sl in slicers]
class LaxVmapTest(jtu.JaxTestCase):
def _CheckBatching(self, op, bdim_size, bdims, shapes, dtypes, rng,
rtol=None, atol=None):
batched_shapes = list(map(partial(add_bdim, bdim_size), bdims, shapes))
args = [rng(shape, dtype)
for shape, dtype in jax.util.safe_zip(batched_shapes, dtypes)]
args_slice = args_slicer(args, bdims)
ans = api.vmap(op, bdims)(*args)
expected = onp.stack([op(*args_slice(i)) for i in range(bdim_size)])
self.assertAllClose(ans, expected, check_dtypes=True, rtol=rtol, atol=atol)
@parameterized.named_parameters(itertools.chain.from_iterable(
jtu.cases_from_list(
{"testcase_name": "{}_bdims={}".format(
jtu.format_test_name_suffix(rec.op, shapes,
itertools.repeat(dtype)), bdims),
"op_name": rec.op, "rng_factory": rec.rng_factory, "shapes": shapes,
"dtype": dtype, "bdims": bdims}
for shape_group in compatible_shapes
for shapes in CombosWithReplacement(shape_group, rec.nargs)
for bdims in all_bdims(*shapes)
for dtype in rec.dtypes)
for rec in LAX_OPS))
def testOp(self, op_name, rng_factory, shapes, dtype, bdims):
rng = rng_factory()
op = getattr(lax, op_name)
self._CheckBatching(op, 10, bdims, shapes, [dtype] * len(shapes), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_strides={}_padding={}_lhs_dilation={}_"
"rhs_dilation={}_dims={}_feature_group_count={}_lhs_bdim={}_rhs_bdim={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
strides, padding, lhs_dil, rhs_dil, ",".join(dim_nums),
feature_group_count, lhs_bdim, rhs_bdim),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"strides": strides, "padding": padding, "lhs_dil": lhs_dil,
"rhs_dil": rhs_dil, "rng_factory": rng_factory, "dimension_numbers": dim_nums,
"perms": perms, "lhs_bdim": lhs_bdim, "rhs_bdim": rhs_bdim,
"feature_group_count": feature_group_count}
for lhs_shape, rhs_shape, all_strides, all_pads, lhs_dils, rhs_dils in [
((b, i, 6, 7), # lhs_shape
(j, i, 1, 2), # rhs_shape
[(1, 1), (1, 2), (2, 1)], # strides
[((0, 0), (0, 0)), ((1, 0), (0, 1)), ((0, -1), (0, 0))], # pads
[(1, 1), (2, 1)], # lhs_dils
[(1, 1), (2, 2)]) # rhs_dils
for b, i, j in itertools.product([1, 2], repeat=3)]
for feature_group_count in [1, 2]
for strides in all_strides
for rhs_dil in rhs_dils
for lhs_dil in lhs_dils
for dtype in [onp.float32]
for padding in all_pads
for dim_nums, perms in [
(("NCHW", "OIHW", "NCHW"), ([0, 1, 2, 3], [0, 1, 2, 3])),
(("NHWC", "HWIO", "NHWC"), ([0, 2, 3, 1], [2, 3, 1, 0])),
(("NHWC", "OIHW", "NCHW"), ([0, 2, 3, 1], [0, 1, 2, 3]))]
for lhs_bdim in itertools.chain([None], range(len(lhs_shape) + 1))
for rhs_bdim in itertools.chain([None], range(len(rhs_shape) + 1))
if (lhs_bdim, rhs_bdim) != (None, None)
for rng_factory in [jtu.rand_default]
))
# TODO(mattjj): some cases fail on TPU just due to numerical tolerances
@jtu.skip_on_devices("tpu")
def testConvGeneralDilatedBatching(
self, lhs_shape, rhs_shape, dtype, strides, padding, lhs_dil, rhs_dil,
dimension_numbers, perms, feature_group_count, lhs_bdim, rhs_bdim, rng_factory):
rng = rng_factory()
tol = 1e-1 if dtypes.finfo(dtype).bits <= 32 else 1e-3
# permute shapes to match dim_spec, scale by feature_group_count
lhs_perm, rhs_perm = perms
lhs_shape = list(onp.take(lhs_shape, lhs_perm))
rhs_shape = list(onp.take(rhs_shape, rhs_perm))
dim_spec = lax.conv_dimension_numbers(lhs_shape, rhs_shape, dimension_numbers)
lhs_shape[dim_spec.lhs_spec[1]] *= feature_group_count
rhs_shape[dim_spec.rhs_spec[0]] *= feature_group_count
conv = partial(lax.conv_general_dilated, window_strides=strides,
padding=padding, lhs_dilation=lhs_dil, rhs_dilation=rhs_dil,
dimension_numbers=dimension_numbers,
feature_group_count=feature_group_count,
precision=lax.Precision.HIGHEST)
self._CheckBatching(conv, 5, (lhs_bdim, rhs_bdim), (lhs_shape, rhs_shape),
(dtype, dtype), rng, rtol=tol, atol=tol)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format(
shape, from_dtype, to_dtype, bdims),
"shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype,
"bdims": bdims, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for shape in [(2, 3)]
for bdims in all_bdims(shape)
for rng_factory in [jtu.rand_default]))
def testConvertElementType(self, shape, from_dtype, to_dtype, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.convert_element_type(x, to_dtype)
self._CheckBatching(op, 10, bdims, (shape,), (from_dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_from_dtype={}_to_dtype={}_bdims={}".format(
shape, from_dtype, to_dtype, bdims),
"shape": shape, "from_dtype": from_dtype, "to_dtype": to_dtype,
"bdims": bdims, "rng_factory": rng_factory}
for from_dtype, to_dtype in itertools.product(
[onp.float32, onp.int32, "float32", "int32"], repeat=2)
for shape in [(2, 3)]
for bdims in all_bdims(shape)
for rng_factory in [jtu.rand_default]))
def testBitcastElementType(self, shape, from_dtype, to_dtype, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.bitcast_convert_type(x, to_dtype)
self._CheckBatching(op, 10, bdims, (shape,), (from_dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_min_shape={}_operand_shape={}_max_shape={}_bdims={}"
.format(jtu.format_shape_dtype_string(min_shape, dtype),
jtu.format_shape_dtype_string(operand_shape, dtype),
jtu.format_shape_dtype_string(max_shape, dtype),
bdims),
"min_shape": min_shape, "operand_shape": operand_shape,
"max_shape": max_shape, "dtype": dtype, "bdims": bdims, "rng_factory": rng_factory}
for min_shape, operand_shape, max_shape in [
[(), (2, 3), ()],
[(2, 3), (2, 3), ()],
[(), (2, 3), (2, 3)],
[(2, 3), (2, 3), (2, 3)],
]
for dtype in default_dtypes
for bdims in all_bdims(min_shape, operand_shape, max_shape)
for rng_factory in [jtu.rand_default]))
def testClamp(self, min_shape, operand_shape, max_shape, dtype, bdims, rng_factory):
rng = rng_factory()
raise SkipTest("batching rule for clamp not implemented") # TODO(mattj)
shapes = [min_shape, operand_shape, max_shape]
self._CheckBatching(lax.clamp, 10, bdims, shapes, [dtype] * 3, rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_lhs_shape={}_rhs_shape={}_bdims={}".format(
jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
bdims),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"bdims": bdims, "rng_factory": rng_factory}
for lhs_shape in [(3,), (4, 3)] for rhs_shape in [(3,), (3, 6)]
for bdims in all_bdims(lhs_shape, rhs_shape)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testDot(self, lhs_shape, rhs_shape, dtype, bdims, rng_factory):
rng = rng_factory()
op = partial(lax.dot, precision=lax.Precision.HIGHEST)
self._CheckBatching(op, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
rng, rtol={onp.float16: 5e-2})
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_lhs_contracting={}_rhs_contracting={}_bdims={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
lhs_contracting, rhs_contracting, bdims),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"lhs_contracting": lhs_contracting, "rhs_contracting": rhs_contracting,
"bdims": bdims, "rng_factory": rng_factory}
for lhs_shape, rhs_shape, lhs_contracting, rhs_contracting in [
[(3, 5), (2, 5), [1], [1]],
[(5, 3), (5, 2), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0], [0]],
[(5, 3, 2), (5, 2, 4), [0,2], [0,1]],
[(1, 2, 2, 3), (1, 2, 3, 1), [1], [1]],
[(3, 2), (2, 4), [1], [0]],
]
for bdims in all_bdims(lhs_shape, rhs_shape)
for dtype in default_dtypes
for rng_factory in [jtu.rand_small]))
def testDotGeneralContractOnly(self, lhs_shape, rhs_shape, dtype,
lhs_contracting, rhs_contracting, bdims, rng_factory):
rng = rng_factory()
dimension_numbers = ((lhs_contracting, rhs_contracting), ([], []))
dot = partial(lax.dot_general, dimension_numbers=dimension_numbers)
self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_lhs_shape={}_rhs_shape={}_dimension_numbers={}_bdims={}"
.format(jtu.format_shape_dtype_string(lhs_shape, dtype),
jtu.format_shape_dtype_string(rhs_shape, dtype),
dimension_numbers, bdims),
"lhs_shape": lhs_shape, "rhs_shape": rhs_shape, "dtype": dtype,
"dimension_numbers": dimension_numbers, "bdims": bdims, "rng_factory": rng_factory}
for lhs_shape, rhs_shape, dimension_numbers in [
((3, 3, 2), (3, 2, 4), (([2], [1]), ([0], [0]))),
((3, 4, 2, 4), (3, 4, 3, 2), (([2], [3]), ([0, 1], [0, 1]))),
]
for bdims in all_bdims(lhs_shape, rhs_shape)
for dtype in default_dtypes
for rng_factory in [jtu.rand_small]))
def testDotGeneralContractAndBatch(self, lhs_shape, rhs_shape, dtype,
dimension_numbers, bdims, rng_factory):
rng = rng_factory()
dot = partial(lax.dot_general, dimension_numbers=dimension_numbers)
self._CheckBatching(dot, 5, bdims, (lhs_shape, rhs_shape), (dtype, dtype),
rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_dtype={}_broadcast_sizes={}_bdims={}".format(
shape, onp.dtype(dtype).name, broadcast_sizes, bdims),
"shape": shape, "dtype": dtype, "broadcast_sizes": broadcast_sizes,
"bdims": bdims, "rng_factory": rng_factory}
for shape in [(), (2, 3)]
for dtype in default_dtypes
for broadcast_sizes in [(), (2,), (1, 2)]
for bdims in all_bdims(shape)
for rng_factory in [jtu.rand_default]))
def testBroadcast(self, shape, dtype, broadcast_sizes, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.broadcast(x, broadcast_sizes)
self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_bcdims={}_bdims={}".format(
jtu.format_shape_dtype_string(inshape, dtype),
outshape, broadcast_dimensions, bdims),
"inshape": inshape, "dtype": dtype, "outshape": outshape,
"dimensions": broadcast_dimensions, "bdims": bdims,
"rng_factory": rng_factory}
for inshape, outshape, broadcast_dimensions in [
([2], [2, 2], [0]),
([2], [2, 2], [1]),
([2], [2, 3], [0]),
([], [2, 3], []),
]
for dtype in default_dtypes
for bdims in all_bdims(inshape)
for rng_factory in [jtu.rand_default]))
def testBroadcastInDim(self, inshape, dtype, outshape, dimensions, bdims, rng_factory):
rng = rng_factory()
raise SkipTest("this test has failures in some cases") # TODO(mattjj)
op = lambda x: lax.broadcast_in_dim(x, outshape, dimensions)
self._CheckBatching(op, 5, bdims, (inshape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_outshape={}_dims={}_bdims={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
jtu.format_shape_dtype_string(out_shape, dtype),
dimensions, bdims),
"arg_shape": arg_shape, "out_shape": out_shape, "dtype": dtype,
"dimensions": dimensions, "bdims": bdims, "rng_factory": rng_factory}
for dtype in default_dtypes
for arg_shape, dimensions, out_shape in [
[(3, 4), None, (12,)],
[(2, 1, 4), None, (8,)],
[(2, 2, 4), None, (2, 8)],
[(2, 2, 4), (0, 1, 2), (2, 8)],
[(2, 2, 4), (1, 0, 2), (8, 2)],
[(2, 2, 4), (2, 1, 0), (4, 2, 2)]
]
for bdims in all_bdims(arg_shape)
for rng_factory in [jtu.rand_default]))
def testReshape(self, arg_shape, out_shape, dtype, dimensions, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.reshape(x, out_shape, dimensions=dimensions)
self._CheckBatching(op, 10, bdims, (arg_shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_inshape={}_pads={}_bdims={}"
.format(jtu.format_shape_dtype_string(shape, dtype), pads, bdims),
"shape": shape, "dtype": dtype, "pads": pads,
"rng_factory": jtu.rand_small, "bdims": bdims}
for shape in [(2, 3)]
for bdims in all_bdims(shape)
for dtype in default_dtypes
for pads in [[(1, 2, 1), (0, 1, 0)]]))
def testPad(self, shape, dtype, pads, bdims, rng_factory):
rng = rng_factory()
fun = lambda operand: lax.pad(operand, onp.array(0, dtype), pads)
self._CheckBatching(fun, 5, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_predshape={}_argshapes={}_bdims={}".format(
jtu.format_shape_dtype_string(pred_shape, onp.bool_),
jtu.format_shape_dtype_string(arg_shape, arg_dtype),
bdims),
"pred_shape": pred_shape, "arg_shape": arg_shape, "arg_dtype": arg_dtype,
"bdims": bdims, "rng_factory": rng_factory}
for arg_shape in [(), (3,), (2, 3)]
for pred_shape in ([(), arg_shape] if arg_shape else [()])
for bdims in all_bdims(pred_shape, arg_shape, arg_shape)
for arg_dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSelect(self, pred_shape, arg_shape, arg_dtype, bdims, rng_factory):
rng = rng_factory()
op = lambda c, x, y: lax.select(c < 0, x, y)
self._CheckBatching(op, 5, bdims, (pred_shape, arg_shape, arg_shape,),
(onp.bool_, arg_dtype, arg_dtype), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name":
"_shape={}_start_indices={}_limit_indices={}_strides={}_bdims={}".format(
jtu.format_shape_dtype_string(shape, dtype),
start_indices, limit_indices, strides, bdims),
"shape": shape, "dtype": dtype, "starts": start_indices,
"limits": limit_indices, "strides": strides, "bdims": bdims, "rng_factory": rng_factory}
for shape, start_indices, limit_indices, strides in [
[(3,), (1,), (2,), None],
[(7,), (4,), (7,), None],
[(5,), (1,), (5,), (2,)],
[(8,), (1,), (6,), (2,)],
[(5, 3), (1, 1), (3, 2), None],
[(5, 3), (1, 1), (3, 1), None],
[(7, 5, 3), (4, 0, 1), (7, 1, 3), None],
[(5, 3), (1, 1), (2, 1), (1, 1)],
[(5, 3), (1, 1), (5, 3), (2, 1)],
]
for bdims in all_bdims(shape)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testSlice(self, shape, dtype, starts, limits, strides, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.slice(x, starts, limits, strides)
self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_perm={}_bdims={}".format(
jtu.format_shape_dtype_string(shape, dtype), perm, bdims),
"shape": shape, "dtype": dtype, "perm": perm, "bdims": bdims, "rng_factory": rng_factory}
for shape, perm in [
[(3, 4), (1, 0)],
[(3, 4), (0, 1)],
[(3, 4, 5), (2, 1, 0)],
[(3, 4, 5), (1, 0, 2)],
]
for bdims in all_bdims(shape)
for dtype in default_dtypes
for rng_factory in [jtu.rand_default]))
def testTranspose(self, shape, dtype, perm, bdims, rng_factory):
rng = rng_factory()
op = lambda x: lax.transpose(x, perm)
self._CheckBatching(op, 5, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_inshape={}_reducedims={}_initval={}_bdims={}"
.format(op.__name__, jtu.format_shape_dtype_string(shape, dtype), dims,
init_val, bdims),
"op": op, "init_val": init_val, "shape": shape, "dtype": dtype,
"dims": dims, "bdims": bdims, "rng_factory": rng_factory}
for init_val, op, dtypes in [
(0, lax.add, default_dtypes),
(1, lax.mul, default_dtypes),
(0, lax.max, all_dtypes), # non-monoidal
(-onp.inf, lax.max, float_dtypes),
(dtypes.iinfo(onp.int32).min, lax.max, [onp.int32]),
(dtypes.iinfo(onp.int64).min, lax.max, [onp.int64]),
(dtypes.iinfo(onp.uint32).min, lax.max, [onp.uint32]),
(dtypes.iinfo(onp.uint64).min, lax.max, [onp.uint64]),
(onp.inf, lax.min, float_dtypes),
(dtypes.iinfo(onp.int32).max, lax.min, [onp.int32]),
(dtypes.iinfo(onp.int64).max, lax.min, [onp.int64]),
(dtypes.iinfo(onp.uint32).max, lax.min, [onp.uint32]),
(dtypes.iinfo(onp.uint64).max, lax.min, [onp.uint64]),
]
for dtype in dtypes
for shape, dims in [
[(3, 4, 5), (0,)], [(3, 4, 5), (1, 2)],
[(3, 4, 5), (0, 2)], [(3, 4, 5), (0, 1, 2)]
]
for bdims in all_bdims(shape)
for rng_factory in [jtu.rand_small]))
def testReduce(self, op, init_val, shape, dtype, dims, bdims, rng_factory):
rng = rng_factory()
init_val = onp.asarray(init_val, dtype=dtype)
fun = lambda operand: lax.reduce(operand, init_val, op, dims)
self._CheckBatching(fun, 5, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_op={}_dtype={}_padding={}"
.format(op.__name__, onp.dtype(dtype).name, padding),
"op": op, "init_val": init_val, "dtype": dtype, "padding": padding,
"rng_factory": rng_factory}
for init_val, op, dtypes in [
(0, lax.add, [onp.float32]),
(-onp.inf, lax.max, [onp.float32]),
(onp.inf, lax.min, [onp.float32]),
]
for dtype in dtypes
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testReduceWindow(self, op, init_val, dtype, padding, rng_factory):
rng = rng_factory()
init_val = onp.asarray(init_val, dtype=dtype)
all_configs = itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)]),
itertools.product(
[(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)]))
def fun(operand):
return lax.reduce_window(operand, init_val, op, dims, strides, padding)
for shape, dims, strides in all_configs:
for bdims in all_bdims(shape):
self._CheckBatching(fun, 3, bdims, (shape,), (dtype,), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_dtype={}_padding={}".format(onp.dtype(dtype).name,
padding),
"dtype": dtype, "padding": padding, "rng_factory": rng_factory}
for dtype in float_dtypes
for padding in ["VALID", "SAME"]
for rng_factory in [jtu.rand_small]))
def testSelectAndGatherAdd(self, dtype, padding, rng_factory):
if jtu.device_under_test() == "tpu" and dtype == dtypes.bfloat16:
raise SkipTest("bfloat16 _select_and_gather_add doesn't work on tpu")
rng = rng_factory()
all_configs = itertools.chain(
itertools.product(
[(4, 6)],
[(2, 1), (1, 2)],
[(1, 1), (2, 1), (1, 2)]),
itertools.product(
[(3, 2, 4, 6)], [(1, 1, 2, 1), (2, 1, 2, 1)],
[(1, 2, 2, 1), (1, 1, 1, 1)]))
def fun(operand, tangents):
return lax._select_and_gather_add(operand, tangents, lax.ge_p, dims,
strides, padding)
for shape, dims, strides in all_configs:
for bdims in all_bdims(shape, shape):
self._CheckBatching(fun, 3, bdims, (shape, shape), (dtype, dtype), rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_bdims={}_fft_ndims={}"
.format(shape, bdims, fft_ndims),
"shape": shape, "bdims": bdims, "fft_ndims": fft_ndims, "rng_factory": rng_factory}
for shape in [(5,), (3, 4, 5), (2, 3, 4, 5)]
for bdims in all_bdims(shape)
for fft_ndims in range(0, min(3, len(shape)) + 1)
for rng_factory in [jtu.rand_default]))
@jtu.skip_on_devices("tpu") # TODO(b/137993701): unimplemented cases.
def testFft(self, fft_ndims, shape, bdims, rng_factory):
rng = rng_factory()
ndims = len(shape)
axes = range(ndims - fft_ndims, ndims)
fft_lengths = [shape[axis] for axis in axes]
op = lambda x: lax.fft(x, xla_client.FftType.FFT, fft_lengths)
self._CheckBatching(op, 5, bdims, [shape], [onp.complex64], rng)
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_dnums={}_slice_sizes={}_bdims={}"
.format(jtu.format_shape_dtype_string(shape, dtype), idxs, dnums,
slice_sizes, bdims),
"shape": shape, "dtype": dtype, "idxs": idxs, "dnums": dnums,
"slice_sizes": slice_sizes, "bdims": bdims}
for dtype in all_dtypes
for shape, idxs, dnums, slice_sizes in [
((5,), onp.array([[0], [2]]), lax.GatherDimensionNumbers(
offset_dims=(), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1,)),
((10,), onp.array([[0], [0], [0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(), start_index_map=(0,)),
(2,)),
((10, 5,), onp.array([[0], [2], [1]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0,)),
(1, 3)),
((10, 5), onp.array([[0, 2], [1, 0]]), lax.GatherDimensionNumbers(
offset_dims=(1,), collapsed_slice_dims=(0,), start_index_map=(0, 1)),
(1, 3)),
]
for bdims in all_bdims(shape, idxs.shape)))
def testGather(self, shape, dtype, idxs, dnums, slice_sizes, bdims):
fun = partial(lax.gather, dimension_numbers=dnums, slice_sizes=slice_sizes)
self._CheckBatching(fun, 5, bdims, [shape, idxs.shape], [dtype, idxs.dtype],
jtu.rand_default())
@parameterized.named_parameters(jtu.cases_from_list(
{"testcase_name": "_shape={}_idxs={}_update={}_dnums={}_bdims={}".format(
jtu.format_shape_dtype_string(arg_shape, dtype),
idxs, update_shape, dnums, bdims),
"arg_shape": arg_shape, "dtype": dtype, "idxs": idxs,
"update_shape": update_shape, "dnums": dnums, "bdims": bdims}
for dtype in float_dtypes
for arg_shape, idxs, update_shape, dnums in [
((5,), onp.array([[0], [2]]), (2,), lax.ScatterDimensionNumbers(
update_window_dims=(), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
((10,), onp.array([[0], [0], [0]]), (3, 2), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(),
scatter_dims_to_operand_dims=(0,))),
((10, 5,), onp.array([[0], [2], [1]]), (3, 3), lax.ScatterDimensionNumbers(
update_window_dims=(1,), inserted_window_dims=(0,),
scatter_dims_to_operand_dims=(0,))),
]
for bdims in all_bdims(arg_shape, idxs.shape, update_shape)))
def testScatterAdd(self, arg_shape, dtype, idxs, update_shape, dnums, bdims):
fun = partial(lax.scatter_add, dimension_numbers=dnums)
self._CheckBatching(fun, 5, bdims, [arg_shape, idxs.shape, update_shape],
[dtype, idxs.dtype, dtype], jtu.rand_default(),
rtol={onp.float16: 5e-3})
# TODO Concatenate
# TODO Reverse
# TODO DynamicSlice
# TODO DynamicUpdateSlice
# TODO Sort
# TODO SortKeyVal
# TODO Collapse
# TODO Scatter
if __name__ == '__main__':
absltest.main()
|
jax-master
|
tests/lax_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
import numpy as np
import six
import jax
from jax import numpy as jnp
from jax.interpreters import xla
_abstractify_args = [
3,
3.5,
np.int32(3),
np.uint32(7),
np.random.randn(3, 4, 5, 6),
np.arange(100, dtype=np.float32),
jnp.int64(-3),
jnp.array([1, 2, 3])
]
if six.PY3:
import enum
class AnEnum(enum.IntEnum):
A = 123
B = 456
_abstractify_args.append(AnEnum.B)
@pytest.mark.parametrize("arg", _abstractify_args)
def test_abstractify(benchmark, arg):
benchmark(xla.abstractify, arg)
|
jax-master
|
tests/benchmarks/xla.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('..'))
# -- Project information -----------------------------------------------------
project = 'JAX'
copyright = '2019, Google LLC. NumPy and SciPy documentation are copyright the respective authors.'
author = 'The JAX authors'
# The short X.Y version
version = ''
# The full version, including alpha/beta/rc tags
release = ''
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.intersphinx',
'sphinx.ext.mathjax',
'sphinx.ext.napoleon',
'sphinx.ext.viewcode',
'nbsphinx',
]
intersphinx_mapping = {
'python': ('https://docs.python.org/3/', None),
'numpy': ('https://docs.scipy.org/doc/numpy/', None),
'scipy': ('https://docs.scipy.org/doc/scipy/reference/', None),
}
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = [
# Slow notebook: long time to load tf.ds
'notebooks/neural_network_with_tfds_data.ipynb',
# Slow notebook
'notebooks/Neural_Network_and_Data_Loading.ipynb',
]
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = None
autosummary_generate = True
napolean_use_rtype = False
# -- Options for nbsphinx -----------------------------------------------------
# Execute notebooks before conversion: 'always', 'never', 'auto' (default)
# We execute all notebooks, exclude the slow ones using 'exclude_patterns'
nbsphinx_execute = 'always'
# Use this kernel instead of the one stored in the notebook metadata:
#nbsphinx_kernel_name = 'python3'
# List of arguments to be passed to the kernel that executes the notebooks:
# nbsphinx_execute_arguments = []
# If True, the build process is continued even if an exception occurs:
#nbsphinx_allow_errors = True
# Controls when a cell will time out (defaults to 30; use -1 for no timeout):
nbsphinx_timeout = 90
# Default Pygments lexer for syntax highlighting in code cells:
#nbsphinx_codecell_lexer = 'ipython3'
# Width of input/output prompts used in CSS:
#nbsphinx_prompt_width = '8ex'
# If window is narrower than this, input/output prompts are on separate lines:
#nbsphinx_responsive_width = '700px'
# This is processed by Jinja2 and inserted before each notebook
nbsphinx_prolog = r"""
{% set docname = 'docs/' + env.doc2path(env.docname, base=None) %}
.. only:: html
.. role:: raw-html(raw)
:format: html
.. nbinfo::
Interactive online version:
:raw-html:`<a href="https://colab.research.google.com/github/google/jax/blob/master/{{ docname }}"><img alt="Open In Colab" src="https://colab.research.google.com/assets/colab-badge.svg" style="vertical-align:text-bottom"></a>`
__ https://github.com/google/jax/blob/
{{ env.config.release }}/{{ docname }}
"""
# This is processed by Jinja2 and inserted after each notebook
# nbsphinx_epilog = r"""
# """
# Input prompt for code cells. "%s" is replaced by the execution count.
#nbsphinx_input_prompt = 'In [%s]:'
# Output prompt for code cells. "%s" is replaced by the execution count.
#nbsphinx_output_prompt = 'Out[%s]:'
# Specify conversion functions for custom notebook formats:
#import jupytext
#nbsphinx_custom_formats = {
# '.Rmd': lambda s: jupytext.reads(s, '.Rmd'),
#}
# Link or path to require.js, set to empty string to disable
#nbsphinx_requirejs_path = ''
# Options for loading require.js
#nbsphinx_requirejs_options = {'async': 'async'}
# mathjax_config = {
# 'TeX': {'equationNumbers': {'autoNumber': 'AMS', 'useLabelIds': True}},
# }
# Additional files needed for generating LaTeX/PDF output:
# latex_additional_files = ['references.bib']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'sphinx_rtd_theme'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {
'logo_only': True,
}
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = '_static/jax_logo_250px.png'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'JAXdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'JAX.tex', 'JAX Documentation',
'The JAX authors', 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'jax', 'JAX Documentation',
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'JAX', 'JAX Documentation',
author, 'JAX', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output -------------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
# -- Extension configuration -------------------------------------------------
|
jax-master
|
docs/conf.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "0.1.37"
|
jax-master
|
jaxlib/version.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import operator
import numpy as np
from six.moves import reduce
from jaxlib import xla_client
try:
from jaxlib import cuda_prng_kernels
for _name, _value in cuda_prng_kernels.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
pass
_prod = lambda xs: reduce(operator.mul, xs, 1)
def threefry2x32(c, keys, data):
"""ThreeFry2x32 kernel for GPU."""
assert len(keys) == 2, keys
assert len(data) == 2, data
dims = c.GetShape(keys[0]).dimensions()
dtype = np.dtype(np.uint32)
for x in itertools.chain(keys, data):
x_shape = c.GetShape(x)
assert x_shape.element_type() == dtype
assert dims == x_shape.dimensions(), (dims, x_shape)
ndims = len(dims)
opaque = cuda_prng_kernels.cuda_threefry2x32_descriptor(_prod(dims))
layout = tuple(range(ndims - 1, -1, -1))
shape = xla_client.Shape.array_shape(dtype, dims, layout)
return c.CustomCall(
b"cuda_threefry2x32",
operands=(keys[0], keys[1], data[0], data[1]),
shape_with_layout=xla_client.Shape.tuple_shape([shape, shape]),
operand_shapes_with_layout=(shape,) * 4,
opaque=opaque)
|
jax-master
|
jaxlib/cuda_prng.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import operator
import numpy as np
from six.moves import reduce
from jaxlib import xla_client
try:
from jaxlib import cublas_kernels
for _name, _value in cublas_kernels.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
pass
try:
from jaxlib import cusolver_kernels
for _name, _value in cusolver_kernels.registrations().items():
xla_client.register_custom_call_target(_name, _value, platform="gpu")
except ImportError:
pass
_Shape = xla_client.Shape
def _real_type(dtype):
"""Returns the real equivalent of 'dtype'."""
if dtype == np.float32:
return np.float32
elif dtype == np.float64:
return np.float64
elif dtype == np.complex64:
return np.float32
elif dtype == np.complex128:
return np.float64
else:
raise NotImplementedError("Unsupported dtype {}".format(dtype))
_prod = lambda xs: reduce(operator.mul, xs, 1)
def trsm(c, a, b, left_side=False, lower=False, trans_a=False, conj_a=False,
diag=False):
"""Batched triangular solve.
XLA implements unbatched triangular solve directly, so we need only implement
the batched case."""
b_shape = c.GetShape(b)
dtype = b_shape.element_type()
dims = b_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
k = m if left_side else n
a_shape = c.GetShape(a)
if (batch_dims + (k, k) != a_shape.dimensions() or
a_shape.element_type() != dtype):
raise ValueError("Argument mismatch for trsm, got {} and {}".format(
a_shape, b_shape))
if conj_a and not trans_a:
raise NotImplementedError("Conjugation without transposition not supported")
lwork, opaque = cublas_kernels.build_trsm_batched_descriptor(
np.dtype(dtype), batch, m, n, left_side, lower, trans_a, conj_a, diag)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
out = c.CustomCall(
b"cublas_trsm_batched",
operands=(a, b),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, b_shape.dimensions(), layout),
_Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)),
_Shape.array_shape(np.dtype(np.int8), (lwork,), (0,)))),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, a_shape.dimensions(), layout),
_Shape.array_shape(dtype, b_shape.dimensions(), layout),
),
opaque=opaque)
return c.GetTupleElement(out, 0)
def getrf(c, a):
"""LU decomposition."""
a_shape = c.GetShape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
if batch > 1 and m == n and m // batch <= 128:
lwork, opaque = cublas_kernels.build_getrf_batched_descriptor(
np.dtype(dtype), batch, m)
workspace = _Shape.array_shape(np.dtype(np.int8), (lwork,), (0,))
kernel = b"cublas_getrf_batched"
else:
lwork, opaque = cusolver_kernels.build_getrf_descriptor(
np.dtype(dtype), batch, m, n)
workspace = _Shape.array_shape(dtype, (lwork,), (0,))
kernel = b"cusolver_getrf"
out = c.CustomCall(
kernel,
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(
np.dtype(np.int32), batch_dims + (min(m, n),),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(
np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),),
opaque=opaque)
return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1),
c.GetTupleElement(out, 2))
def geqrf(c, a):
"""QR decomposition."""
a_shape = c.GetShape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
lwork, opaque = cusolver_kernels.build_geqrf_descriptor(
np.dtype(dtype), batch, m, n)
workspace = _Shape.array_shape(dtype, (lwork,), (0,))
kernel = b"cusolver_geqrf"
out = c.CustomCall(
kernel,
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(
dtype, batch_dims + (min(m, n),),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(
np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),),
opaque=opaque)
return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1),
c.GetTupleElement(out, 2))
def orgqr(c, a, tau):
"""Product of elementary Householder reflections."""
a_shape = c.GetShape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
tau_dims = c.GetShape(tau).dimensions()
assert tau_dims[:-1] == dims[:-2]
k = tau_dims[-1]
lwork, opaque = cusolver_kernels.build_orgqr_descriptor(
np.dtype(dtype), batch, m, n, k)
workspace = _Shape.array_shape(dtype, (lwork,), (0,))
kernel = b"cusolver_orgqr"
out = c.CustomCall(
kernel,
operands=(a, tau),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(
np.dtype(np.int32), batch_dims, tuple(range(num_bd - 1, -1, -1))),
workspace,
)),
operand_shapes_with_layout=(
_Shape.array_shape(
dtype, batch_dims + (m, n),
(num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(
dtype, batch_dims + (k,),
tuple(range(num_bd, -1, -1))),
),
opaque=opaque)
return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1))
def syevd(c, a, lower=False):
"""Symmetric (Hermitian) eigendecomposition."""
a_shape = c.GetShape(a)
dtype = a_shape.element_type()
dims = a_shape.dimensions()
assert len(dims) >= 2
m, n = dims[-2:]
assert m == n
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
batch = _prod(batch_dims)
layout = (num_bd, num_bd + 1) + tuple(range(num_bd - 1, -1, -1))
if n <= 32:
kernel = b"cusolver_syevj"
lwork, opaque = cusolver_kernels.build_syevj_descriptor(
np.dtype(dtype), lower, batch, n)
else:
kernel = b"cusolver_syevd"
lwork, opaque = cusolver_kernels.build_syevd_descriptor(
np.dtype(dtype), lower, batch, n)
eigvals_type = _real_type(dtype)
out = c.CustomCall(
kernel,
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, dims, layout),
_Shape.array_shape(
np.dtype(eigvals_type), batch_dims + (n,),
tuple(range(num_bd, -1, -1))),
_Shape.array_shape(
np.dtype(np.int32), batch_dims,
tuple(range(num_bd - 1, -1, -1))),
_Shape.array_shape(dtype, (lwork,), (0,))
)),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, dims, layout),
),
opaque=opaque)
return (c.GetTupleElement(out, 0), c.GetTupleElement(out, 1),
c.GetTupleElement(out, 2))
def gesvd(c, a, full_matrices=True, compute_uv=True):
"""Singular value decomposition."""
a_shape = c.GetShape(a)
dims = a_shape.dimensions()
dtype = a_shape.element_type()
assert len(dims) >= 2
m, n = dims[-2:]
batch_dims = tuple(dims[:-2])
num_bd = len(batch_dims)
b = _prod(batch_dims)
singular_vals_dtype = np.dtype(_real_type(dtype))
if m < 32 and n < 32:
lwork, opaque = cusolver_kernels.build_gesvdj_descriptor(
np.dtype(dtype), b, m, n, compute_uv)
scalar_layout = tuple(range(num_bd - 1, -1, -1))
vector_layout = (num_bd,) + scalar_layout
matrix_layout = (num_bd, num_bd + 1) + scalar_layout
out = c.CustomCall(
b"cusolver_gesvdj",
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
_Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),
vector_layout),
_Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),
_Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),
_Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),
_Shape.array_shape(dtype, (lwork,), (0,)),
)),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
),
opaque=opaque)
s = c.GetTupleElement(out, 1)
u = c.GetTupleElement(out, 2)
v = c.GetTupleElement(out, 3)
info = c.GetTupleElement(out, 4)
vt = c.Transpose(v, tuple(range(num_bd)) + (num_bd + 1, num_bd))
if np.issubdtype(dtype, np.complexfloating):
vt = c.Conj(vt)
elif m < n:
lwork, opaque = cusolver_kernels.build_gesvd_descriptor(
np.dtype(dtype), b, n, m, compute_uv, full_matrices)
scalar_layout = tuple(range(num_bd - 1, -1, -1))
vector_layout = (num_bd,) + scalar_layout
matrix_layout = (num_bd + 1, num_bd) + scalar_layout
out = c.CustomCall(
b"cusolver_gesvd",
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
_Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),
vector_layout),
_Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),
_Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),
_Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),
_Shape.array_shape(dtype, (lwork,), (0,)),
)),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
),
opaque=opaque)
s = c.GetTupleElement(out, 1)
vt = c.GetTupleElement(out, 2)
u = c.GetTupleElement(out, 3)
info = c.GetTupleElement(out, 4)
else:
lwork, opaque = cusolver_kernels.build_gesvd_descriptor(
np.dtype(dtype), b, m, n, compute_uv, full_matrices)
scalar_layout = tuple(range(num_bd - 1, -1, -1))
vector_layout = (num_bd,) + scalar_layout
matrix_layout = (num_bd, num_bd + 1) + scalar_layout
out = c.CustomCall(
b"cusolver_gesvd",
operands=(a,),
shape_with_layout=_Shape.tuple_shape((
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
_Shape.array_shape(singular_vals_dtype, batch_dims + (min(m, n),),
vector_layout),
_Shape.array_shape(dtype, batch_dims + (m, m), matrix_layout),
_Shape.array_shape(dtype, batch_dims + (n, n), matrix_layout),
_Shape.array_shape(np.dtype(np.int32), batch_dims, scalar_layout),
_Shape.array_shape(dtype, (lwork,), (0,)),
)),
operand_shapes_with_layout=(
_Shape.array_shape(dtype, batch_dims + (m, n), matrix_layout),
),
opaque=opaque)
s = c.GetTupleElement(out, 1)
u = c.GetTupleElement(out, 2)
vt = c.GetTupleElement(out, 3)
info = c.GetTupleElement(out, 4)
if not full_matrices:
u = c.Slice(u, (0,) * len(dims), batch_dims + (m, min(m, n)))
vt = c.Slice(vt, (0,) * len(dims), batch_dims + (min(m, n), n))
return s, u, vt, info
|
jax-master
|
jaxlib/cusolver.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
from absl.testing import absltest
from absl.testing import parameterized
import numpy as onp
from jax import test_util as jtu
from jax import random
import jax.numpy as np
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from examples import kernel_lsq
from examples import resnet50
sys.path.pop()
from jax.config import config
config.parse_flags_with_absl()
FLAGS = config.FLAGS
def _CheckShapeAgreement(test_case, init_fun, apply_fun, input_shape):
jax_rng = random.PRNGKey(0)
result_shape, params = init_fun(jax_rng, input_shape)
rng = onp.random.RandomState(0)
result = apply_fun(params, rng.randn(*input_shape).astype(dtype="float32"))
test_case.assertEqual(result.shape, result_shape)
class ExamplesTest(jtu.JaxTestCase):
@parameterized.named_parameters(
{"testcase_name": "_input_shape={}".format(input_shape),
"input_shape": input_shape}
for input_shape in [(2, 20, 25, 2)])
@jtu.skip_on_flag('jax_enable_x64', True)
def testIdentityBlockShape(self, input_shape):
init_fun, apply_fun = resnet50.IdentityBlock(2, (4, 3))
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(
{"testcase_name": "_input_shape={}".format(input_shape),
"input_shape": input_shape}
for input_shape in [(2, 20, 25, 3)])
@jtu.skip_on_flag('jax_enable_x64', True)
def testConvBlockShape(self, input_shape):
init_fun, apply_fun = resnet50.ConvBlock(3, (2, 3, 4))
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
@parameterized.named_parameters(
{"testcase_name": "_num_classes={}_input_shape={}"
.format(num_classes, input_shape),
"num_classes": num_classes, "input_shape": input_shape}
for num_classes in [5, 10]
for input_shape in [(224, 224, 3, 2)])
@jtu.skip_on_flag('jax_enable_x64', True)
def testResNet50Shape(self, num_classes, input_shape):
init_fun, apply_fun = resnet50.ResNet50(num_classes)
_CheckShapeAgreement(self, init_fun, apply_fun, input_shape)
def testKernelRegressionGram(self):
n, d = 100, 20
rng = onp.random.RandomState(0)
truth = rng.randn(d)
xs = rng.randn(n, d)
ys = np.dot(xs, truth)
kernel = lambda x, y: np.dot(x, y)
self.assertAllClose(kernel_lsq.gram(kernel, xs), np.dot(xs, xs.T),
check_dtypes=False)
def testKernelRegressionTrainAndPredict(self):
n, d = 100, 20
rng = onp.random.RandomState(0)
truth = rng.randn(d)
xs = rng.randn(n, d)
ys = np.dot(xs, truth)
kernel = lambda x, y: np.dot(x, y)
predict = kernel_lsq.train(kernel, xs, ys)
self.assertAllClose(predict(xs), ys, atol=1e-3, rtol=1e-3,
check_dtypes=False)
if __name__ == "__main__":
absltest.main()
|
jax-master
|
examples/examples_test.py
|
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""JAX efficiently trains a differentially private conv net on MNIST.
This script contains a JAX implementation of Differentially Private Stochastic
Gradient Descent (https://arxiv.org/abs/1607.00133). DPSGD requires clipping
the per-example parameter gradients, which is non-trivial to implement
efficiently for convolutional neural networks. The JAX XLA compiler shines in
this setting by optimizing the minibatch-vectorized computation for
convolutional architectures. Train time takes a few seconds per epoch on a
commodity GPU.
This code depends on tensorflow_privacy (https://github.com/tensorflow/privacy)
Install instructions:
$ pip install tensorflow
$ git clone https://github.com/tensorflow/privacy
$ cd privacy
$ pip install .
The results match those in the reference TensorFlow baseline implementation:
https://github.com/tensorflow/privacy/tree/master/tutorials
Example invocations:
# this non-private baseline should get ~99% acc
python -m examples.differentially_private_sgd \
--dpsgd=False \
--learning_rate=.1 \
--epochs=20 \
this private baseline should get ~95% acc
python -m examples.differentially_private_sgd \
--dpsgd=True \
--noise_multiplier=1.3 \
--l2_norm_clip=1.5 \
--epochs=15 \
--learning_rate=.25 \
# this private baseline should get ~96.6% acc
python -m examples.differentially_private_sgd \
--dpsgd=True \
--noise_multiplier=1.1 \
--l2_norm_clip=1.0 \
--epochs=60 \
--learning_rate=.15 \
# this private baseline should get ~97% acc
python -m examples.differentially_private_sgd \
--dpsgd=True \
--noise_multiplier=0.7 \
--l2_norm_clip=1.5 \
--epochs=45 \
--learning_rate=.25 \
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import time
import warnings
from absl import app
from absl import flags
from jax import grad
from jax import jit
from jax import partial
from jax import random
from jax import tree_util
from jax import vmap
from jax.experimental import optimizers
from jax.experimental import stax
from jax.lax import stop_gradient
import jax.numpy as np
from examples import datasets
import numpy.random as npr
# https://github.com/tensorflow/privacy
from privacy.analysis.rdp_accountant import compute_rdp
from privacy.analysis.rdp_accountant import get_privacy_spent
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
'dpsgd', True, 'If True, train with DP-SGD. If False, '
'train with vanilla SGD.')
flags.DEFINE_float('learning_rate', .15, 'Learning rate for training')
flags.DEFINE_float('noise_multiplier', 1.1,
'Ratio of the standard deviation to the clipping norm')
flags.DEFINE_float('l2_norm_clip', 1.0, 'Clipping norm')
flags.DEFINE_integer('batch_size', 256, 'Batch size')
flags.DEFINE_integer('epochs', 60, 'Number of epochs')
flags.DEFINE_integer('seed', 0, 'Seed for jax PRNG')
flags.DEFINE_integer(
'microbatches', None, 'Number of microbatches '
'(must evenly divide batch_size)')
flags.DEFINE_string('model_dir', None, 'Model directory')
init_random_params, predict = stax.serial(
stax.Conv(16, (8, 8), padding='SAME', strides=(2, 2)),
stax.Relu,
stax.MaxPool((2, 2), (1, 1)),
stax.Conv(32, (4, 4), padding='VALID', strides=(2, 2)),
stax.Relu,
stax.MaxPool((2, 2), (1, 1)),
stax.Flatten,
stax.Dense(32),
stax.Relu,
stax.Dense(10),
)
def loss(params, batch):
inputs, targets = batch
logits = predict(params, inputs)
logits = stax.logsoftmax(logits) # log normalize
return -np.mean(np.sum(logits * targets, axis=1)) # cross entropy loss
def accuracy(params, batch):
inputs, targets = batch
target_class = np.argmax(targets, axis=1)
predicted_class = np.argmax(predict(params, inputs), axis=1)
return np.mean(predicted_class == target_class)
def private_grad(params, batch, rng, l2_norm_clip, noise_multiplier,
batch_size):
"""Return differentially private gradients for params, evaluated on batch."""
def _clipped_grad(params, single_example_batch):
"""Evaluate gradient for a single-example batch and clip its grad norm."""
grads = grad(loss)(params, single_example_batch)
nonempty_grads, tree_def = tree_util.tree_flatten(grads)
total_grad_norm = np.linalg.norm(
[np.linalg.norm(neg.ravel()) for neg in nonempty_grads])
divisor = stop_gradient(np.amax((total_grad_norm / l2_norm_clip, 1.)))
normalized_nonempty_grads = [g / divisor for g in nonempty_grads]
return tree_util.tree_unflatten(tree_def, normalized_nonempty_grads)
px_clipped_grad_fn = vmap(partial(_clipped_grad, params))
std_dev = l2_norm_clip * noise_multiplier
noise_ = lambda n: n + std_dev * random.normal(rng, n.shape)
normalize_ = lambda n: n / float(batch_size)
tree_map = tree_util.tree_map
sum_ = lambda n: np.sum(n, 0) # aggregate
aggregated_clipped_grads = tree_map(sum_, px_clipped_grad_fn(batch))
noised_aggregated_clipped_grads = tree_map(noise_, aggregated_clipped_grads)
normalized_noised_aggregated_clipped_grads = (
tree_map(normalize_, noised_aggregated_clipped_grads)
)
return normalized_noised_aggregated_clipped_grads
def shape_as_image(images, labels, dummy_dim=False):
target_shape = (-1, 1, 28, 28, 1) if dummy_dim else (-1, 28, 28, 1)
return np.reshape(images, target_shape), labels
def compute_epsilon(steps, num_examples=60000, target_delta=1e-5):
if num_examples * target_delta > 1.:
warnings.warn('Your delta might be too high.')
q = FLAGS.batch_size / float(num_examples)
orders = list(np.linspace(1.1, 10.9, 99)) + range(11, 64)
rdp_const = compute_rdp(q, FLAGS.noise_multiplier, steps, orders)
eps, _, _ = get_privacy_spent(orders, rdp_const, target_delta=target_delta)
return eps
def main(_):
if FLAGS.microbatches:
raise NotImplementedError(
'Microbatches < batch size not currently supported'
)
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, FLAGS.batch_size)
num_batches = num_complete_batches + bool(leftover)
key = random.PRNGKey(FLAGS.seed)
def data_stream():
rng = npr.RandomState(FLAGS.seed)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * FLAGS.batch_size:(i + 1) * FLAGS.batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
opt_init, opt_update, get_params = optimizers.sgd(FLAGS.learning_rate)
@jit
def update(_, i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
@jit
def private_update(rng, i, opt_state, batch):
params = get_params(opt_state)
rng = random.fold_in(rng, i) # get new key for new random numbers
return opt_update(
i,
private_grad(params, batch, rng, FLAGS.l2_norm_clip,
FLAGS.noise_multiplier, FLAGS.batch_size), opt_state)
_, init_params = init_random_params(key, (-1, 28, 28, 1))
opt_state = opt_init(init_params)
itercount = itertools.count()
steps_per_epoch = 60000 // FLAGS.batch_size
print('\nStarting training...')
for epoch in range(1, FLAGS.epochs + 1):
start_time = time.time()
# pylint: disable=no-value-for-parameter
for _ in range(num_batches):
if FLAGS.dpsgd:
opt_state = \
private_update(
key, next(itercount), opt_state,
shape_as_image(*next(batches), dummy_dim=True))
else:
opt_state = update(
key, next(itercount), opt_state, shape_as_image(*next(batches)))
# pylint: enable=no-value-for-parameter
epoch_time = time.time() - start_time
print('Epoch {} in {:0.2f} sec'.format(epoch, epoch_time))
# evaluate test accuracy
params = get_params(opt_state)
test_acc = accuracy(params, shape_as_image(test_images, test_labels))
test_loss = loss(params, shape_as_image(test_images, test_labels))
print('Test set loss, accuracy (%): ({:.2f}, {:.2f})'.format(
test_loss, 100 * test_acc))
# determine privacy loss so far
if FLAGS.dpsgd:
delta = 1e-5
num_examples = 60000
eps = compute_epsilon(epoch * steps_per_epoch, num_examples, delta)
print(
'For delta={:.0e}, the current epsilon is: {:.2f}'.format(delta, eps))
else:
print('Trained with vanilla non-private SGD optimizer')
if __name__ == '__main__':
app.run(main)
|
jax-master
|
examples/differentially_private_sgd.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic MNIST example using JAX with the mini-libraries stax and optimizers.
The mini-library jax.experimental.stax is for neural network building, and
the mini-library jax.experimental.optimizers is for first-order stochastic
optimization.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import itertools
import numpy.random as npr
import jax.numpy as np
from jax.config import config
from jax import jit, grad, random
from jax.experimental import optimizers
from jax.experimental import stax
from jax.experimental.stax import Dense, Relu, LogSoftmax
from examples import datasets
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -np.mean(np.sum(preds * targets, axis=1))
def accuracy(params, batch):
inputs, targets = batch
target_class = np.argmax(targets, axis=1)
predicted_class = np.argmax(predict(params, inputs), axis=1)
return np.mean(predicted_class == target_class)
init_random_params, predict = stax.serial(
Dense(1024), Relu,
Dense(1024), Relu,
Dense(10), LogSoftmax)
if __name__ == "__main__":
rng = random.PRNGKey(0)
step_size = 0.001
num_epochs = 10
batch_size = 128
momentum_mass = 0.9
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass=momentum_mass)
@jit
def update(i, opt_state, batch):
params = get_params(opt_state)
return opt_update(i, grad(loss)(params, batch), opt_state)
_, init_params = init_random_params(rng, (-1, 28 * 28))
opt_state = opt_init(init_params)
itercount = itertools.count()
print("\nStarting training...")
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
opt_state = update(next(itercount), opt_state, next(batches))
epoch_time = time.time() - start_time
params = get_params(opt_state)
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
print("Training set accuracy {}".format(train_acc))
print("Test set accuracy {}".format(test_acc))
|
jax-master
|
examples/mnist_classifier.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic example demonstrating using JAX to do Gaussian process regression.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import app
from absl import flags
from functools import partial
from jax import grad
from jax import jit
from jax import vmap
from jax.config import config
import jax.numpy as np
import jax.random as random
import jax.scipy as scipy
import matplotlib.pyplot as plt
FLAGS = flags.FLAGS
def main(unused_argv):
numpts = 7
key = random.PRNGKey(0)
eye = np.eye(numpts)
def cov_map(cov_func, xs, xs2=None):
"""Compute a covariance matrix from a covariance function and data points.
Args:
cov_func: callable function, maps pairs of data points to scalars.
xs: array of data points, stacked along the leading dimension.
Returns:
A 2d array `a` such that `a[i, j] = cov_func(xs[i], xs[j])`.
"""
if xs2 is None:
return vmap(lambda x: vmap(lambda y: cov_func(x, y))(xs))(xs)
else:
return vmap(lambda x: vmap(lambda y: cov_func(x, y))(xs))(xs2).T
def softplus(x):
return np.logaddexp(x, 0.)
# Note, writing out the vectorized form of the identity
# ||x-y||^2 = <x-y,x-y> = ||x||^2 + ||y||^2 - 2<x,y>
# for computing squared distances would be more efficient (but less succinct).
def exp_quadratic(x1, x2):
return np.exp(-np.sum((x1 - x2)**2))
def gp(params, x, y, xtest=None, compute_marginal_likelihood=False):
noise = softplus(params['noise'])
amp = softplus(params['amplitude'])
ls = softplus(params['lengthscale'])
ymean = np.mean(y)
y = y - ymean
x = x / ls
train_cov = amp*cov_map(exp_quadratic, x) + eye * (noise + 1e-6)
chol = scipy.linalg.cholesky(train_cov, lower=True)
kinvy = scipy.linalg.solve_triangular(
chol.T, scipy.linalg.solve_triangular(chol, y, lower=True))
if compute_marginal_likelihood:
log2pi = np.log(2. * 3.1415)
ml = np.sum(
-0.5 * np.dot(y.T, kinvy) -
np.sum(np.log(np.diag(chol))) -
(numpts / 2.) * log2pi)
ml -= np.sum(-0.5 * np.log(2 * 3.1415) - np.log(amp)**2) # lognormal prior
return -ml
if xtest is not None:
xtest = xtest / ls
cross_cov = amp*cov_map(exp_quadratic, x, xtest)
mu = np.dot(cross_cov.T, kinvy) + ymean
v = scipy.linalg.solve_triangular(chol, cross_cov, lower=True)
var = (amp * cov_map(exp_quadratic, xtest) - np.dot(v.T, v))
return mu, var
marginal_likelihood = partial(gp, compute_marginal_likelihood=True)
predict = partial(gp, compute_marginal_likelihood=False)
grad_fun = jit(grad(marginal_likelihood))
# Covariance hyperparameters to be learned
params = {"amplitude": np.zeros((1, 1)),
"noise": np.zeros((1, 1)) - 5.,
"lengthscale": np.zeros((1, 1))}
momentums = dict([(k, p * 0.) for k, p in params.items()])
scales = dict([(k, p * 0. + 1.) for k, p in params.items()])
lr = 0.01 # Learning rate
def train_step(params, momentums, scales, x, y):
grads = grad_fun(params, x, y)
for k in (params):
momentums[k] = 0.9 * momentums[k] + 0.1 * grads[k][0]
scales[k] = 0.9 * scales[k] + 0.1 * grads[k][0]**2
params[k] -= lr * momentums[k]/np.sqrt(scales[k] + 1e-5)
return params, momentums, scales
# Create a really simple toy 1D function
y_fun = lambda x: np.sin(x) + 0.1 * random.normal(key, shape=(x.shape[0], 1))
x = (random.uniform(key, shape=(numpts, 1)) * 4.) + 1
y = y_fun(x)
xtest = np.linspace(0, 6., 200)[:, None]
ytest = y_fun(xtest)
for i in range(1000):
params, momentums, scales = train_step(params, momentums, scales, x, y)
if i % 50 == 0:
ml = marginal_likelihood(params, x, y)
print("Step: %d, neg marginal likelihood: %f" % (i, ml))
print(params)
mu, var = predict(params, x, y, xtest)
std = np.sqrt(np.diag(var))
plt.plot(x, y, "k.")
plt.plot(xtest, mu)
plt.fill_between(xtest.flatten(),
mu.flatten() - std * 2, mu.flatten() + std * 2)
if __name__ == "__main__":
config.config_with_absl()
app.run(main)
|
jax-master
|
examples/gaussian_process_regression.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
from six.moves import xrange
import numpy.random as npr
import jax.numpy as np
from jax.config import config
from jax.experimental import optimizers
from jax import grad, jit, make_jaxpr, vmap
def gram(kernel, xs):
'''Compute a Gram matrix from a kernel and an array of data points.
Args:
kernel: callable, maps pairs of data points to scalars.
xs: array of data points, stacked along the leading dimension.
Returns:
A 2d array `a` such that `a[i, j] = kernel(xs[i], xs[j])`.
'''
return vmap(lambda x: vmap(lambda y: kernel(x, y))(xs))(xs)
def minimize(f, x, num_steps=10000, step_size=0.000001, mass=0.9):
opt_init, opt_update, get_params = optimizers.momentum(step_size, mass)
@jit
def update(i, opt_state):
x = get_params(opt_state)
return opt_update(i, grad(f)(x), opt_state)
opt_state = opt_init(x)
for i in xrange(num_steps):
opt_state = update(i, opt_state)
return get_params(opt_state)
def train(kernel, xs, ys, regularization=0.01):
gram_ = jit(partial(gram, kernel))
gram_mat = gram_(xs)
n = xs.shape[0]
def objective(v):
risk = .5 * np.sum((np.dot(gram_mat, v) - ys) ** 2.0)
reg = regularization * np.sum(v ** 2.0)
return risk + reg
v = minimize(objective, np.zeros(n))
def predict(x):
prods = vmap(lambda x_: kernel(x, x_))(xs)
return np.sum(v * prods)
return jit(vmap(predict))
if __name__ == "__main__":
n = 100
d = 20
# linear kernel
linear_kernel = lambda x, y: np.dot(x, y)
truth = npr.randn(d)
xs = npr.randn(n, d)
ys = np.dot(xs, truth)
predict = train(linear_kernel, xs, ys)
print('MSE:', np.sum((predict(xs) - ys) ** 2.))
def gram_jaxpr(kernel):
return make_jaxpr(partial(gram, kernel))(xs)
rbf_kernel = lambda x, y: np.exp(-np.sum((x - y) ** 2))
print()
print('jaxpr of gram(linear_kernel):')
print(gram_jaxpr(linear_kernel))
print()
print('jaxpr of gram(rbf_kernel):')
print(gram_jaxpr(rbf_kernel))
|
jax-master
|
examples/kernel_lsq.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A basic MNIST example using Numpy and JAX.
The primary aim here is simplicity and minimal dependencies.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy.random as npr
from jax.api import jit, grad
from jax.config import config
from jax.scipy.special import logsumexp
import jax.numpy as np
from examples import datasets
def init_random_params(scale, layer_sizes, rng=npr.RandomState(0)):
return [(scale * rng.randn(m, n), scale * rng.randn(n))
for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
def predict(params, inputs):
activations = inputs
for w, b in params[:-1]:
outputs = np.dot(activations, w) + b
activations = np.tanh(outputs)
final_w, final_b = params[-1]
logits = np.dot(activations, final_w) + final_b
return logits - logsumexp(logits, axis=1, keepdims=True)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -np.mean(np.sum(preds * targets, axis=1))
def accuracy(params, batch):
inputs, targets = batch
target_class = np.argmax(targets, axis=1)
predicted_class = np.argmax(predict(params, inputs), axis=1)
return np.mean(predicted_class == target_class)
if __name__ == "__main__":
layer_sizes = [784, 1024, 1024, 10]
param_scale = 0.1
step_size = 0.001
num_epochs = 10
batch_size = 128
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
yield train_images[batch_idx], train_labels[batch_idx]
batches = data_stream()
@jit
def update(params, batch):
grads = grad(loss)(params, batch)
return [(w - step_size * dw, b - step_size * db)
for (w, b), (dw, db) in zip(params, grads)]
params = init_random_params(param_scale, layer_sizes)
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
params = update(params, next(batches))
epoch_time = time.time() - start_time
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
print("Training set accuracy {}".format(train_acc))
print("Test set accuracy {}".format(test_acc))
|
jax-master
|
examples/mnist_classifier_fromscratch.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Datasets used in examples."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import array
import gzip
import os
from os import path
import struct
from six.moves.urllib.request import urlretrieve
import numpy as np
_DATA = "/tmp/jax_example_data/"
def _download(url, filename):
"""Download a url to a file in the JAX data temp directory."""
if not path.exists(_DATA):
os.makedirs(_DATA)
out_file = path.join(_DATA, filename)
if not path.isfile(out_file):
urlretrieve(url, out_file)
print("downloaded {} to {}".format(url, _DATA))
def _partial_flatten(x):
"""Flatten all but the first dimension of an ndarray."""
return np.reshape(x, (x.shape[0], -1))
def _one_hot(x, k, dtype=np.float32):
"""Create a one-hot encoding of x of size k."""
return np.array(x[:, None] == np.arange(k), dtype)
def mnist_raw():
"""Download and parse the raw MNIST dataset."""
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
base_url = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def parse_labels(filename):
with gzip.open(filename, "rb") as fh:
_ = struct.unpack(">II", fh.read(8))
return np.array(array.array("B", fh.read()), dtype=np.uint8)
def parse_images(filename):
with gzip.open(filename, "rb") as fh:
_, num_data, rows, cols = struct.unpack(">IIII", fh.read(16))
return np.array(array.array("B", fh.read()),
dtype=np.uint8).reshape(num_data, rows, cols)
for filename in ["train-images-idx3-ubyte.gz", "train-labels-idx1-ubyte.gz",
"t10k-images-idx3-ubyte.gz", "t10k-labels-idx1-ubyte.gz"]:
_download(base_url + filename, filename)
train_images = parse_images(path.join(_DATA, "train-images-idx3-ubyte.gz"))
train_labels = parse_labels(path.join(_DATA, "train-labels-idx1-ubyte.gz"))
test_images = parse_images(path.join(_DATA, "t10k-images-idx3-ubyte.gz"))
test_labels = parse_labels(path.join(_DATA, "t10k-labels-idx1-ubyte.gz"))
return train_images, train_labels, test_images, test_labels
def mnist(permute_train=False):
"""Download, parse and process MNIST data to unit scale and one-hot labels."""
train_images, train_labels, test_images, test_labels = mnist_raw()
train_images = _partial_flatten(train_images) / np.float32(255.)
test_images = _partial_flatten(test_images) / np.float32(255.)
train_labels = _one_hot(train_labels, 10)
test_labels = _one_hot(test_labels, 10)
if permute_train:
perm = np.random.RandomState(0).permutation(train_images.shape[0])
train_images = train_images[perm]
train_labels = train_labels[perm]
return train_images, train_labels, test_images, test_labels
|
jax-master
|
examples/datasets.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
|
jax-master
|
examples/__init__.py
|
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An MNIST example with single-program multiple-data (SPMD) data parallelism.
The aim here is to illustrate how to use JAX's `pmap` to express and execute
SPMD programs for data parallelism along a batch dimension, while also
minimizing dependencies by avoiding the use of higher-level layers and
optimizers libraries.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import time
import numpy as onp
import numpy.random as npr
from jax import jit, grad, pmap
from jax.config import config
from jax.scipy.special import logsumexp
from jax.lib import xla_bridge
from jax.tree_util import tree_map
from jax import lax
import jax.numpy as np
from examples import datasets
def init_random_params(scale, layer_sizes, rng=npr.RandomState(0)):
return [(scale * rng.randn(m, n), scale * rng.randn(n))
for m, n, in zip(layer_sizes[:-1], layer_sizes[1:])]
def predict(params, inputs):
activations = inputs
for w, b in params[:-1]:
outputs = np.dot(activations, w) + b
activations = np.tanh(outputs)
final_w, final_b = params[-1]
logits = np.dot(activations, final_w) + final_b
return logits - logsumexp(logits, axis=1, keepdims=True)
def loss(params, batch):
inputs, targets = batch
preds = predict(params, inputs)
return -np.mean(np.sum(preds * targets, axis=1))
@jit
def accuracy(params, batch):
inputs, targets = batch
target_class = np.argmax(targets, axis=1)
predicted_class = np.argmax(predict(params, inputs), axis=1)
return np.mean(predicted_class == target_class)
if __name__ == "__main__":
layer_sizes = [784, 1024, 1024, 10]
param_scale = 0.1
step_size = 0.001
num_epochs = 10
batch_size = 128
train_images, train_labels, test_images, test_labels = datasets.mnist()
num_train = train_images.shape[0]
num_complete_batches, leftover = divmod(num_train, batch_size)
num_batches = num_complete_batches + bool(leftover)
# For this manual SPMD example, we get the number of devices (e.g. GPUs or
# TPU cores) that we're using, and use it to reshape data minibatches.
num_devices = xla_bridge.device_count()
def data_stream():
rng = npr.RandomState(0)
while True:
perm = rng.permutation(num_train)
for i in range(num_batches):
batch_idx = perm[i * batch_size:(i + 1) * batch_size]
images, labels = train_images[batch_idx], train_labels[batch_idx]
# For this SPMD example, we reshape the data batch dimension into two
# batch dimensions, one of which is mapped over parallel devices.
batch_size_per_device, ragged = divmod(images.shape[0], num_devices)
if ragged:
msg = "batch size must be divisible by device count, got {} and {}."
raise ValueError(msg.format(batch_size, num_devices))
shape_prefix = (num_devices, batch_size_per_device)
images = images.reshape(shape_prefix + images.shape[1:])
labels = labels.reshape(shape_prefix + labels.shape[1:])
yield images, labels
batches = data_stream()
@partial(pmap, axis_name='batch')
def spmd_update(params, batch):
grads = grad(loss)(params, batch)
# We compute the total gradients, summing across the device-mapped axis,
# using the `lax.psum` SPMD primitive, which does a fast all-reduce-sum.
grads = [(lax.psum(dw, 'batch'), lax.psum(db, 'batch')) for dw, db in grads]
return [(w - step_size * dw, b - step_size * db)
for (w, b), (dw, db) in zip(params, grads)]
# We replicate the parameters so that the constituent arrays have a leading
# dimension of size equal to the number of devices we're pmapping over.
init_params = init_random_params(param_scale, layer_sizes)
replicate_array = lambda x: onp.broadcast_to(x, (num_devices,) + x.shape)
replicated_params = tree_map(replicate_array, init_params)
for epoch in range(num_epochs):
start_time = time.time()
for _ in range(num_batches):
replicated_params = spmd_update(replicated_params, next(batches))
epoch_time = time.time() - start_time
# We evaluate using the jitted `accuracy` function (not using pmap) by
# grabbing just one of the replicated parameter values.
params = tree_map(lambda x: x[0], replicated_params)
train_acc = accuracy(params, (train_images, train_labels))
test_acc = accuracy(params, (test_images, test_labels))
print("Epoch {} in {:0.2f} sec".format(epoch, epoch_time))
print("Training set accuracy {}".format(train_acc))
print("Test set accuracy {}".format(test_acc))
|
jax-master
|
examples/spmd_mnist_classifier_fromscratch.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.