python_code
stringlengths 0
1.02M
| repo_name
stringlengths 9
48
| file_path
stringlengths 5
114
|
---|---|---|
import torch
def check_error(desc, fn, *required_substrings):
try:
fn()
except Exception as e:
error_message = e.args[0]
print('=' * 80)
print(desc)
print('-' * 80)
print(error_message)
print('')
for sub in required_substrings:
assert sub in error_message
return
raise AssertionError("given function ({}) didn't raise an error".format(desc))
check_error(
'Wrong argument types',
lambda: torch.FloatStorage(object()),
'object')
check_error('Unknown keyword argument',
lambda: torch.FloatStorage(content=1234.),
'keyword')
check_error('Invalid types inside a sequence',
lambda: torch.FloatStorage(['a', 'b']),
'list', 'str')
check_error('Invalid size type',
lambda: torch.FloatStorage(1.5),
'float')
check_error('Invalid offset',
lambda: torch.FloatStorage(torch.FloatStorage(2), 4),
'2', '4')
check_error('Negative offset',
lambda: torch.FloatStorage(torch.FloatStorage(2), -1),
'2', '-1')
check_error('Invalid size',
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, 5),
'2', '1', '5')
check_error('Negative size',
lambda: torch.FloatStorage(torch.FloatStorage(3), 1, -5),
'2', '1', '-5')
check_error('Invalid index type',
lambda: torch.FloatStorage(10)['first item'],
'str')
def assign():
torch.FloatStorage(10)[1:-1] = '1'
check_error('Invalid value type',
assign,
'str')
check_error('resize_ with invalid type',
lambda: torch.FloatStorage(10).resize_(1.5),
'float')
check_error('fill_ with invalid type',
lambda: torch.IntStorage(10).fill_('asdf'),
'str')
# TODO: frombuffer
|
pytorch-master
|
test/error_messages/storage.py
|
# flake8: noqa
import torch
torch.tensor([3], dtype='int32') # E: expected "Optional[dtype]"
torch.ones(3, dtype='int32') # E: No overload variant of "ones" matches argument types "int", "str"
torch.zeros(3, dtype='int32') # E: No overload variant of "zeros" matches argument types "int", "str"
|
pytorch-master
|
test/typing/fail/creation_ops.py
|
# flake8: noqa
import torch
torch.set_rng_state([1, 2, 3]) # E: Argument 1 to "set_rng_state" has incompatible type "List[int]"; expected "Tensor"
|
pytorch-master
|
test/typing/fail/random.py
|
# flake8: noqa
import torch
# binary ops: <<, >>, |, &, ~, ^
a = torch.ones(3, dtype=torch.float64)
i = int()
i | a # E: Unsupported operand types
|
pytorch-master
|
test/typing/fail/bitwise_ops.py
|
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
import numpy as np
# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html
# torch.tensor()
torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
torch.tensor([0, 1])
torch.tensor([[0.11111, 0.222222, 0.3333333]],
dtype=torch.float64,
device=torch.device('cuda:0'))
torch.tensor(3.14159)
# torch.sparse_coo_tensor
i = torch.tensor([[0, 1, 1],
[2, 0, 2]])
v = torch.tensor([3, 4, 5], dtype=torch.float32)
torch.sparse_coo_tensor(i, v, [2, 4])
torch.sparse_coo_tensor(i, v)
torch.sparse_coo_tensor(i, v, [2, 4],
dtype=torch.float64,
device=torch.device('cuda:0'))
torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])
torch.sparse_coo_tensor(torch.empty([1, 0]),
torch.empty([0, 2]), [1, 2])
# torch.as_tensor
a = [1, 2, 3]
torch.as_tensor(a)
torch.as_tensor(a, device=torch.device('cuda'))
# torch.as_strided
x = torch.randn(3, 3)
torch.as_strided(x, (2, 2), (1, 2))
torch.as_strided(x, (2, 2), (1, 2), 1)
# torch.from_numpy
if TEST_NUMPY:
torch.from_numpy(np.array([1, 2, 3]))
# torch.zeros/zeros_like
torch.zeros(2, 3)
torch.zeros((2, 3))
torch.zeros([2, 3])
torch.zeros(5)
torch.zeros_like(torch.empty(2, 3))
# torch.ones/ones_like
torch.ones(2, 3)
torch.ones((2, 3))
torch.ones([2, 3])
torch.ones(5)
torch.ones_like(torch.empty(2, 3))
# torch.arange
torch.arange(5)
torch.arange(1, 4)
torch.arange(1, 2.5, 0.5)
# torch.range
torch.range(1, 4)
torch.range(1, 4, 0.5)
# torch.linspace
torch.linspace(3, 10, steps=5)
torch.linspace(-10, 10, steps=5)
torch.linspace(start=-10, end=10, steps=5)
torch.linspace(start=-10, end=10, steps=1)
# torch.logspace
torch.logspace(start=-10, end=10, steps=5)
torch.logspace(start=0.1, end=1.0, steps=5)
torch.logspace(start=0.1, end=1.0, steps=1)
torch.logspace(start=2, end=2, steps=1, base=2)
# torch.eye
torch.eye(3)
# torch.empty/empty_like/empty_strided
torch.empty(2, 3)
torch.empty((2, 3))
torch.empty([2, 3])
torch.empty_like(torch.empty(2, 3), dtype=torch.int64)
torch.empty_strided((2, 3), (1, 2))
# torch.full/full_like
torch.full((2, 3), 3.141592)
torch.full_like(torch.full((2, 3), 3.141592), 2.71828)
# torch.quantize_per_tensor
torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)
# torch.quantize_per_channel
x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
# torch.dequantize
torch.dequantize(x)
# torch.complex
real = torch.tensor([1, 2], dtype=torch.float32)
imag = torch.tensor([3, 4], dtype=torch.float32)
torch.complex(real, imag)
# torch.polar
abs = torch.tensor([1, 2], dtype=torch.float64)
pi = torch.acos(torch.zeros(1)).item() * 2
angle = torch.tensor([pi / 2, 5 * pi / 4], dtype=torch.float64)
torch.polar(abs, angle)
# torch.heaviside
inp = torch.tensor([-1.5, 0, 2.0])
values = torch.tensor([0.5])
torch.heaviside(inp, values)
|
pytorch-master
|
test/typing/pass/creation_ops.py
|
# flake8: noqa
import torch
import math
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
# abs/absolute
torch.abs(torch.tensor([-1, -2, 3]))
torch.absolute(torch.tensor([-1, -2, 3]))
# acos/arccos
torch.acos(a)
torch.arccos(a)
# acosh/arccosh
torch.acosh(a.uniform_(1, 2))
# add
torch.add(a, 20)
torch.add(a, torch.randn(4, 1), alpha=10)
# addcdiv
torch.addcdiv(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1)
# addcmul
torch.addcmul(torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1)
# angle
torch.angle(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))*180/3.14159
# asin/arcsin
torch.asin(a)
torch.arcsin(a)
# asinh/arcsinh
torch.asinh(a)
torch.arcsinh(a)
# atan/arctan
torch.atan(a)
torch.arctan(a)
# atanh/arctanh
torch.atanh(a.uniform_(-1, 1))
torch.arctanh(a.uniform_(-1, 1))
# atan2
torch.atan2(a, a)
# bitwise_not
torch.bitwise_not(t)
# bitwise_and
torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8))
torch.bitwise_and(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
# bitwise_or
torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8))
torch.bitwise_or(torch.tensor([True, True, False]), torch.tensor([False, True, False]))
# bitwise_xor
torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8))
# ceil
torch.ceil(a)
# clamp/clip
torch.clamp(a, min=-0.5, max=0.5)
torch.clamp(a, min=0.5)
torch.clamp(a, max=0.5)
torch.clip(a, min=-0.5, max=0.5)
# conj
torch.conj(torch.tensor([-1 + 1j, -2 + 2j, 3 - 3j]))
# copysign
torch.copysign(a, 1)
torch.copysign(a, b)
# cos
torch.cos(a)
# cosh
torch.cosh(a)
# deg2rad
torch.deg2rad(torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]]))
# div/divide/true_divide
x = torch.tensor([ 0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
torch.div(x, 0.5)
p = torch.tensor([[-0.3711, -1.9353, -0.4605, -0.2917],
[ 0.1815, -1.0111, 0.9805, -1.5923],
[ 0.1062, 1.4581, 0.7759, -1.2344],
[-0.1830, -0.0313, 1.1908, -1.4757]])
q = torch.tensor([ 0.8032, 0.2930, -0.8113, -0.2308])
torch.div(p, q)
torch.divide(p, q, rounding_mode='trunc')
torch.divide(p, q, rounding_mode='floor')
# digamma
torch.digamma(torch.tensor([1, 0.5]))
# erf
torch.erf(torch.tensor([0, -1., 10.]))
# erfc
torch.erfc(torch.tensor([0, -1., 10.]))
# erfinv
torch.erfinv(torch.tensor([0, 0.5, -1.]))
# exp
torch.exp(torch.tensor([0, math.log(2.)]))
# exp2
torch.exp2(torch.tensor([0, math.log2(2.), 3, 4]))
# expm1
torch.expm1(torch.tensor([0, math.log(2.)]))
# fake_quantize_per_channel_affine
x = torch.randn(2, 2, 2)
scales = (torch.randn(2) + 1) * 0.05
zero_points = torch.zeros(2).to(torch.long)
torch.fake_quantize_per_channel_affine(x, scales, zero_points, 1, 0, 255)
# fake_quantize_per_tensor_affine
torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255)
# float_power
torch.float_power(torch.randint(10, (4,)), 2)
torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5]))
# floor
torch.floor(a)
# floor_divide
torch.floor_divide(torch.tensor([4., 3.]), torch.tensor([2., 2.]))
torch.floor_divide(torch.tensor([4., 3.]), 1.4)
# fmod
torch.fmod(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5)
# frac
torch.frac(torch.tensor([1, 2.5, -3.2]))
# imag
torch.randn(4, dtype=torch.cfloat).imag
# ldexp
torch.ldexp(torch.tensor([1.]), torch.tensor([1]))
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4]))
# lerp
start = torch.arange(1., 5.)
end = torch.empty(4).fill_(10)
torch.lerp(start, end, 0.5)
torch.lerp(start, end, torch.full_like(start, 0.5))
# lgamma
torch.lgamma(torch.arange(0.5, 2, 0.5))
# log
torch.log(torch.arange(5) + 10)
# log10
torch.log10(torch.rand(5))
# log1p
torch.log1p(torch.randn(5))
# log2
torch.log2(torch.rand(5))
# logaddexp
torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
torch.logaddexp(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
torch.logaddexp(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
# logaddexp2
torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1.0, -2, -3]))
torch.logaddexp2(torch.tensor([-100.0, -200, -300]), torch.tensor([-1.0, -2, -3]))
torch.logaddexp2(torch.tensor([1.0, 2000, 30000]), torch.tensor([-1.0, -2, -3]))
# logical_and
torch.logical_and(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
torch.logical_and(r, s)
torch.logical_and(r.double(), s.double())
torch.logical_and(r.double(), s)
torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool))
# logical_not
torch.logical_not(torch.tensor([True, False]))
torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8))
torch.logical_not(torch.tensor([0., 1.5, -10.], dtype=torch.double))
torch.logical_not(torch.tensor([0., 1., -10.], dtype=torch.double), out=torch.empty(3, dtype=torch.int16))
# logical_or
torch.logical_or(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
torch.logical_or(r, s)
torch.logical_or(r.double(), s.double())
torch.logical_or(r.double(), s)
torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool))
# logical_xor
torch.logical_xor(torch.tensor([True, False, True]), torch.tensor([True, False, False]))
torch.logical_xor(r, s)
torch.logical_xor(r.double(), s.double())
torch.logical_xor(r.double(), s)
torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool))
# logit
torch.logit(torch.rand(5), eps=1e-6)
# hypot
torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0]))
# i0
torch.i0(torch.arange(5, dtype=torch.float32))
# igamma/igammac
a1 = torch.tensor([4.0])
a2 = torch.tensor([3.0, 4.0, 5.0])
torch.igamma(a1, a2)
torch.igammac(a1, a2)
# mul/multiply
torch.mul(torch.randn(3), 100)
torch.multiply(torch.randn(4, 1), torch.randn(1, 4))
# mvlgamma
torch.mvlgamma(torch.empty(2, 3).uniform_(1, 2), 2)
# nan_to_num
w = torch.tensor([float('nan'), float('inf'), -float('inf'), 3.14])
torch.nan_to_num(x)
torch.nan_to_num(x, nan=2.0)
torch.nan_to_num(x, nan=2.0, posinf=1.0)
# neg/negative
torch.neg(torch.randn(5))
# nextafter
eps = torch.finfo(torch.float32).eps
torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps])
# polygamma
torch.polygamma(1, torch.tensor([1, 0.5]))
torch.polygamma(2, torch.tensor([1, 0.5]))
torch.polygamma(3, torch.tensor([1, 0.5]))
torch.polygamma(4, torch.tensor([1, 0.5]))
# pow
torch.pow(a, 2)
torch.pow(torch.arange(1., 5.), torch.arange(1., 5.))
# rad2deg
torch.rad2deg(torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]]))
# real
torch.randn(4, dtype=torch.cfloat).real
# reciprocal
torch.reciprocal(a)
# remainder
torch.remainder(torch.tensor([-3., -2, -1, 1, 2, 3]), 2)
torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5)
# round
torch.round(a)
# rsqrt
torch.rsqrt(a)
# sigmoid
torch.sigmoid(a)
# sign
torch.sign(torch.tensor([0.7, -1.2, 0., 2.3]))
# sgn
torch.tensor([3+4j, 7-24j, 0, 1+2j]).sgn()
# signbit
torch.signbit(torch.tensor([0.7, -1.2, 0., 2.3]))
# sin
torch.sin(a)
# sinc
torch.sinc(a)
# sinh
torch.sinh(a)
# sqrt
torch.sqrt(a)
# square
torch.square(a)
# sub/subtract
torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2)
# tan
torch.tan(a)
# tanh
torch.tanh(a)
# trunc/fix
torch.trunc(a)
# xlogy
f = torch.zeros(5,)
g = torch.tensor([-1, 0, 1, float('inf'), float('nan')])
torch.xlogy(f, g)
f = torch.tensor([1, 2, 3])
g = torch.tensor([3, 2, 1])
torch.xlogy(f, g)
torch.xlogy(f, 4)
torch.xlogy(2, g)
|
pytorch-master
|
test/typing/pass/math_ops.py
|
import torch
avg_pool1 = torch.nn.AdaptiveAvgPool2d((1, None))
reveal_type(avg_pool1) # E: {AdaptiveAvgPool2d}
avg_pool2 = torch.nn.AdaptiveAvgPool2d((None, 1))
reveal_type(avg_pool2) # E: {AdaptiveAvgPool2d}
max_pool1 = torch.nn.AdaptiveMaxPool2d((1, None))
reveal_type(max_pool1) # E: {AdaptiveMaxPool2d}
max_pool2 = torch.nn.AdaptiveMaxPool2d((None, 1))
reveal_type(max_pool2) # E: {AdaptiveMaxPool2d}
|
pytorch-master
|
test/typing/reveal/opt_size.py
|
import torch
t = torch.randn(2, 3)
reveal_type(t) # E: {Tensor}
u = torch.randn(2, 3)
reveal_type(u) # E: {Tensor}
t.copy_(u)
reveal_type(t) # E: {Tensor}
r = (t == u).all()
reveal_type(r) # E: {Tensor}
|
pytorch-master
|
test/typing/reveal/tensor_copy.py
|
import torch
input = []
input.append(torch.tensor([1.0, 2.0, 3.0, 4.0]))
input.append(torch.tensor([[1.0, 2.0, 3.0, 4.0]]))
input.append(torch.tensor([[[1.0, 2.0, 3.0, 4.0]]]))
reveal_type(input[0].shape[0]) # E: int
reveal_type(input[1].shape[1]) # E: int
reveal_type(input[2].shape[2]) # E: int
|
pytorch-master
|
test/typing/reveal/size.py
|
import torch
t = torch.tensor([[3.0, 1.5], [2.0, 1.5]])
t_sort = t.sort()
t_sort[0][0, 0] == 1.5 # noqa: B015
t_sort.indices[0, 0] == 1 # noqa: B015
t_sort.values[0, 0] == 1.5 # noqa: B015
reveal_type(t_sort) # E: Tuple[{Tensor}, {Tensor}, fallback=torch.return_types.sort]
t_qr = torch.linalg.qr(t)
t_qr[0].shape == [2, 2] # noqa: B015
t_qr.Q.shape == [2, 2] # noqa: B015
reveal_type(t_qr) # E: Tuple[{Tensor}, {Tensor}, fallback=torch.return_types.qr]
|
pytorch-master
|
test/typing/reveal/namedtuple.py
|
import torch
def foo(opt: torch.optim.Optimizer) -> None:
opt.zero_grad()
opt_adagrad = torch.optim.Adagrad([torch.tensor(0.0)])
reveal_type(opt_adagrad) # E: {Adagrad}
foo(opt_adagrad)
opt_adam = torch.optim.Adam([torch.tensor(0.0)], lr=1e-2, eps=1e-6)
reveal_type(opt_adam) # E: {Adam}
foo(opt_adam)
|
pytorch-master
|
test/typing/reveal/torch_optim.py
|
# flake8: noqa
import torch
from torch.testing._internal.common_utils import TEST_NUMPY
if TEST_NUMPY:
import numpy as np
# From the docs, there are quite a few ways to create a tensor:
# https://pytorch.org/docs/stable/tensors.html
# torch.tensor()
reveal_type(torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])) # E: {Tensor}
reveal_type(torch.tensor([0, 1])) # E: {Tensor}
reveal_type(torch.tensor([[0.11111, 0.222222, 0.3333333]],
dtype=torch.float64,
device=torch.device('cuda:0'))) # E: {Tensor}
reveal_type(torch.tensor(3.14159)) # E: {Tensor}
# torch.sparse_coo_tensor
i = torch.tensor([[0, 1, 1],
[2, 0, 2]]) # E: {Tensor}
v = torch.tensor([3, 4, 5], dtype=torch.float32) # E: {Tensor}
reveal_type(torch.sparse_coo_tensor(i, v, [2, 4])) # E: {Tensor}
reveal_type(torch.sparse_coo_tensor(i, v)) # E: {Tensor}
reveal_type(torch.sparse_coo_tensor(i, v, [2, 4],
dtype=torch.float64,
device=torch.device('cuda:0'))) # E: {Tensor}
reveal_type(torch.sparse_coo_tensor(torch.empty([1, 0]), [], [1])) # E: {Tensor}
reveal_type(torch.sparse_coo_tensor(torch.empty([1, 0]),
torch.empty([0, 2]), [1, 2])) # E: {Tensor}
# torch.as_tensor
if TEST_NUMPY:
a = np.array([1, 2, 3])
reveal_type(torch.as_tensor(a)) # E: {Tensor}
reveal_type(torch.as_tensor(a, device=torch.device('cuda'))) # E: {Tensor}
# torch.as_strided
x = torch.randn(3, 3)
reveal_type(torch.as_strided(x, (2, 2), (1, 2))) # E: {Tensor}
reveal_type(torch.as_strided(x, (2, 2), (1, 2), 1)) # E: {Tensor}
# torch.from_numpy
if TEST_NUMPY:
a = np.array([1, 2, 3])
reveal_type(torch.from_numpy(a)) # E: {Tensor}
# torch.zeros/zeros_like
reveal_type(torch.zeros(2, 3)) # E: {Tensor}
reveal_type(torch.zeros(5)) # E: {Tensor}
reveal_type(torch.zeros_like(torch.empty(2, 3))) # E: {Tensor}
# torch.ones/ones_like
reveal_type(torch.ones(2, 3)) # E: {Tensor}
reveal_type(torch.ones(5)) # E: {Tensor}
reveal_type(torch.ones_like(torch.empty(2, 3))) # E: {Tensor}
# torch.arange
reveal_type(torch.arange(5)) # E: {Tensor}
reveal_type(torch.arange(1, 4)) # E: {Tensor}
reveal_type(torch.arange(1, 2.5, 0.5)) # E: {Tensor}
# torch.range
reveal_type(torch.range(1, 4)) # E: {Tensor}
reveal_type(torch.range(1, 4, 0.5)) # E: {Tensor}
# torch.linspace
reveal_type(torch.linspace(3, 10, steps=5)) # E: {Tensor}
reveal_type(torch.linspace(-10, 10, steps=5)) # E: {Tensor}
reveal_type(torch.linspace(start=-10, end=10, steps=5)) # E: {Tensor}
reveal_type(torch.linspace(start=-10, end=10, steps=1)) # E: {Tensor}
# torch.logspace
reveal_type(torch.logspace(start=-10, end=10, steps=5)) # E: {Tensor}
reveal_type(torch.logspace(start=0.1, end=1.0, steps=5)) # E: {Tensor}
reveal_type(torch.logspace(start=0.1, end=1.0, steps=1)) # E: {Tensor}
reveal_type(torch.logspace(start=2, end=2, steps=1, base=2)) # E: {Tensor}
# torch.eye
reveal_type(torch.eye(3)) # E: {Tensor}
# torch.empty/empty_like/empty_strided
reveal_type(torch.empty(2, 3)) # E: {Tensor}
reveal_type(torch.empty_like(torch.empty(2, 3), dtype=torch.int64)) # E: {Tensor}
reveal_type(torch.empty_strided((2, 3), (1, 2))) # E: {Tensor}
# torch.full/full_like
reveal_type(torch.full((2, 3), 3.141592)) # E: {Tensor}
reveal_type(torch.full_like(torch.full((2, 3), 3.141592), 2.71828)) # E: {Tensor}
# torch.quantize_per_tensor
reveal_type(torch.quantize_per_tensor(torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8)) # E: {Tensor}
# torch.quantize_per_channel
x = torch.tensor([[-1.0, 0.0], [1.0, 2.0]])
quant = torch.quantize_per_channel(x, torch.tensor([0.1, 0.01]), torch.tensor([10, 0]), 0, torch.quint8)
reveal_type(x) # E: {Tensor}
# torch.dequantize
reveal_type(torch.dequantize(x)) # E: {Tensor}
# torch.complex
real = torch.tensor([1, 2], dtype=torch.float32)
imag = torch.tensor([3, 4], dtype=torch.float32)
reveal_type(torch.complex(real, imag)) # E: {Tensor}
# torch.polar
abs = torch.tensor([1, 2], dtype=torch.float64)
pi = torch.acos(torch.zeros(1)).item() * 2
angle = torch.tensor([pi / 2, 5 * pi / 4], dtype=torch.float64)
reveal_type(torch.polar(abs, angle)) # E: {Tensor}
# torch.heaviside
inp = torch.tensor([-1.5, 0, 2.0])
values = torch.tensor([0.5])
reveal_type(torch.heaviside(inp, values)) # E: {Tensor}
|
pytorch-master
|
test/typing/reveal/tensor_constructors.py
|
# flake8: noqa
import torch
# seed
reveal_type(torch.seed()) # E: int
# manual_seed
reveal_type(torch.manual_seed(3)) # E: torch._C.Generator
# initial_seed
reveal_type(torch.initial_seed()) # E: int
# get_rng_state
reveal_type(torch.get_rng_state()) # E: {Tensor}
# bernoulli
reveal_type(torch.bernoulli(torch.empty(3, 3).uniform_(0, 1))) # E: {Tensor}
# multinomial
weights = torch.tensor([0, 10, 3, 0], dtype=torch.float)
reveal_type(torch.multinomial(weights, 2)) # E: {Tensor}
# normal
reveal_type(torch.normal(2, 3, size=(1, 4))) # E: {Tensor}
# poisson
reveal_type(torch.poisson(torch.rand(4, 4) * 5)) # E: {Tensor}
# rand
reveal_type(torch.rand(4)) # E: {Tensor}
reveal_type(torch.rand(2, 3)) # E: {Tensor}
# rand_like
a = torch.rand(4)
reveal_type(torch.rand_like(a)) # E: {Tensor}
# randint
reveal_type(torch.randint(3, 5, (3,))) # E: {Tensor}
reveal_type(torch.randint(10, (2, 2))) # E: {Tensor}
reveal_type(torch.randint(3, 10, (2, 2))) # E: {Tensor}
# randint_like
b = torch.randint(3, 50, (3, 4))
reveal_type(torch.randint_like(b, 3, 10)) # E: {Tensor}
# randn
reveal_type(torch.randn(4)) # E: {Tensor}
reveal_type(torch.randn(2, 3)) # E: {Tensor}
# randn_like
c = torch.randn(2, 3)
reveal_type(torch.randn_like(c)) # E: {Tensor}
# randperm
reveal_type(torch.randperm(4)) # E: {Tensor}
# soboleng
d = torch.quasirandom.SobolEngine(dimension=5)
reveal_type(d) # E: torch.quasirandom.SobolEngine
reveal_type(d.draw()) # E: {Tensor}
|
pytorch-master
|
test/typing/reveal/tensor_sampling.py
|
import torch
# ModuleList with elements of type Module
class FooModule(torch.nn.Module):
pass
class BarModule(torch.nn.Module):
pass
ml: torch.nn.ModuleList = torch.nn.ModuleList([FooModule(), BarModule()])
ml[0].children() == [] # noqa: B015
reveal_type(ml) # E: {ModuleList}
|
pytorch-master
|
test/typing/reveal/module_list.py
|
# Owner(s): ["oncall: mobile"]
import torch
import torch.utils.bundled_inputs
import io
from torch.jit.mobile import _load_for_lite_interpreter
from torch.testing._internal.common_utils import TestCase, run_tests
from pathlib import Path
from itertools import product
pytorch_test_dir = Path(__file__).resolve().parents[1]
class TestLiteScriptModule(TestCase):
def _save_load_mobile_module(self, script_module: torch.jit.ScriptModule):
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=True))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
return mobile_module
def _try_fn(self, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
return e
def test_versioned_div_tensor(self):
def div_tensor_0_3(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
model_path = pytorch_test_dir / "cpp" / "jit" / "upgrader_models" / "test_versioned_div_tensor_v2.ptl"
mobile_module_v2 = _load_for_lite_interpreter(str(model_path))
jit_module_v2 = torch.jit.load(str(model_path))
current_mobile_module = self._save_load_mobile_module(jit_module_v2)
vals = (2., 3., 2, 3)
for val_a, val_b in product(vals, vals):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
m_results = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_results, Exception):
self.assertTrue(isinstance(fn_result, Exception))
else:
for result in m_results:
print("result: ", result)
print("fn_result: ", fn_result)
print(result == fn_result)
self.assertTrue(result.eq(fn_result))
# self.assertEqual(result, fn_result)
# old operator should produce the same result as applying upgrader of torch.div op
# _helper(mobile_module_v2, div_tensor_0_3)
# latest operator should produce the same result as applying torch.div op
# _helper(current_mobile_module, torch.div)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/mobile/test_upgraders.py
|
# Owner(s): ["oncall: mobile"]
import fnmatch
import io
import shutil
import tempfile
import torch
import torch.utils.show_pickle
# from torch.utils.mobile_optimizer import optimize_for_mobile
from torch.jit.mobile import (
_load_for_lite_interpreter,
_get_mobile_model_contained_types,
_get_model_bytecode_version,
_get_model_ops_and_info,
_backport_for_mobile_to_buffer,
_backport_for_mobile)
from torch.testing._internal.common_utils import TestCase, run_tests
from pathlib import Path
pytorch_test_dir = Path(__file__).resolve().parents[1]
# script_module_v4.ptl and script_module_v5.ptl source code
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
# output_model_path = Path(tmpdirname, "script_module_v5.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(
# str(output_model_path))
SCRIPT_MODULE_V4_BYTECODE_PKL = '''
(4,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('LOADC', 1, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int'), ('aten::add', 'Scalar'))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, '0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_V5_BYTECODE_PKL = '''
(5,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('LOADC', 1, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int'), ('aten::add', 'Scalar'))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, 'constants/0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_V6_BYTECODE_PKL = '''
(6,
('__torch__.*.TestModule.forward',
(('instructions',
(('STOREN', 1, 2),
('DROPR', 1, 0),
('LOADC', 0, 0),
('LOADC', 1, 0),
('MOVE', 2, 0),
('OP', 0, 0),
('OP', 1, 0),
('RET', 0, 0))),
('operators', (('aten::add', 'int', 2), ('aten::add', 'Scalar', 2))),
('constants',
(torch._utils._rebuild_tensor_v2(pers.obj(('storage', torch.DoubleStorage, '0', 'cpu', 8),),
0,
(2, 4),
(4, 1),
False,
collections.OrderedDict()),
1)),
('types', ()),
('register_size', 2)),
(('arguments',
((('name', 'self'),
('type', '__torch__.*.TestModule'),
('default_value', None)),
(('name', 'y'), ('type', 'int'), ('default_value', None)))),
('returns',
((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
'''
SCRIPT_MODULE_BYTECODE_PKL = {
4: {
"bytecode_pkl": SCRIPT_MODULE_V4_BYTECODE_PKL,
"model_name": "script_module_v4.ptl"},
}
# The minimum version a model can be backported to
# Need to be updated when a bytecode version is completely retired
MINIMUM_TO_VERSION = 4
class testVariousModelVersions(TestCase):
def test_get_model_bytecode_version(self):
def check_model_version(model_path, expect_version):
actual_version = _get_model_bytecode_version(model_path)
assert(actual_version == expect_version)
for version, model_info in SCRIPT_MODULE_BYTECODE_PKL.items():
model_path = pytorch_test_dir / "cpp" / "jit" / model_info["model_name"]
check_model_version(model_path, version)
def test_bytecode_values_for_all_backport_functions(self):
# Find the maximum version of the checked in models, start backporting to the minimum support version,
# and comparing the bytecode pkl content.
# It can't be merged to the test `test_all_backport_functions`, because optimization is dynamic and
# the content might change when optimize function changes. This test focuses
# on bytecode.pkl content validation. For the content validation, it is not byte to byte check, but
# regular expression matching. The wildcard can be used to skip some specific content comparison.
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
current_from_version = maximum_checked_in_model_version
with tempfile.TemporaryDirectory() as tmpdirname:
while current_from_version > MINIMUM_TO_VERSION:
# Load model v5 and run forward method
model_name = SCRIPT_MODULE_BYTECODE_PKL[current_from_version]["model_name"]
input_model_path = pytorch_test_dir / "cpp" / "jit" / model_name
# A temporary model file will be export to this path, and run through bytecode.pkl
# content check.
tmp_output_model_path_backport = Path(tmpdirname, "tmp_script_module_backport.ptl")
current_to_version = current_from_version - 1
backport_success = _backport_for_mobile(input_model_path, tmp_output_model_path_backport, current_to_version)
assert(backport_success)
expect_bytecode_pkl = SCRIPT_MODULE_BYTECODE_PKL[current_to_version]["bytecode_pkl"]
buf = io.StringIO()
torch.utils.show_pickle.main(
["", tmpdirname + "/" + tmp_output_model_path_backport.name + "@*/bytecode.pkl"],
output_stream=buf)
output = buf.getvalue()
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expect_bytecode_pkl.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
current_from_version -= 1
shutil.rmtree(tmpdirname)
# Please run this test manually when working on backport.
# This test passes in OSS, but fails internally, likely due to missing step in build
# def test_all_backport_functions(self):
# # Backport from the latest bytecode version to the minimum support version
# # Load, run the backport model, and check version
# class TestModule(torch.nn.Module):
# def __init__(self, v):
# super().__init__()
# self.x = v
# def forward(self, y: int):
# increment = torch.ones([2, 4], dtype=torch.float64)
# return self.x + y + increment
# module_input = 1
# expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
# # temporary input model file and output model file will be exported in the temporary folder
# with tempfile.TemporaryDirectory() as tmpdirname:
# tmp_input_model_path = Path(tmpdirname, "tmp_script_module.ptl")
# script_module = torch.jit.script(TestModule(1))
# optimized_scripted_module = optimize_for_mobile(script_module)
# exported_optimized_scripted_module = optimized_scripted_module._save_for_lite_interpreter(str(tmp_input_model_path))
# current_from_version = _get_model_bytecode_version(tmp_input_model_path)
# current_to_version = current_from_version - 1
# tmp_output_model_path = Path(tmpdirname, "tmp_script_module_backport.ptl")
# while current_to_version >= MINIMUM_TO_VERSION:
# # Backport the latest model to `to_version` to a tmp file "tmp_script_module_backport"
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, current_to_version)
# assert(backport_success)
# backport_version = _get_model_bytecode_version(tmp_output_model_path)
# assert(backport_version == current_to_version)
# # Load model and run forward method
# mobile_module = _load_for_lite_interpreter(str(tmp_input_model_path))
# mobile_module_result = mobile_module(module_input)
# torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
# current_to_version -= 1
# # Check backport failure case
# backport_success = _backport_for_mobile(tmp_input_model_path, tmp_output_model_path, MINIMUM_TO_VERSION - 1)
# assert(not backport_success)
# # need to clean the folder before it closes, otherwise will run into git not clean error
# shutil.rmtree(tmpdirname)
# Check just the test_backport_bytecode_from_file_to_file mechanism but not the function implementations
def test_backport_bytecode_from_file_to_file(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = pytorch_test_dir / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
maximum_checked_in_model_version]["model_name"]
if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
with tempfile.TemporaryDirectory() as tmpdirname:
tmp_backport_model_path = Path(tmpdirname, "tmp_script_module_v5_backported_to_v4.ptl")
# backport from file
success = _backport_for_mobile(
script_module_v5_path,
tmp_backport_model_path,
maximum_checked_in_model_version - 1)
assert(success)
buf = io.StringIO()
torch.utils.show_pickle.main(
["", tmpdirname + "/" + tmp_backport_model_path.name + "@*/bytecode.pkl"],
output_stream=buf)
output = buf.getvalue()
expected_result = SCRIPT_MODULE_V4_BYTECODE_PKL
acutal_result_clean = "".join(output.split())
expect_result_clean = "".join(expected_result.split())
isMatch = fnmatch.fnmatch(acutal_result_clean, expect_result_clean)
assert(isMatch)
# Load model v4 and run forward method
mobile_module = _load_for_lite_interpreter(str(tmp_backport_model_path))
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
shutil.rmtree(tmpdirname)
# Check just the _backport_for_mobile_to_buffer mechanism but not the function implementations
def test_backport_bytecode_from_file_to_buffer(self):
maximum_checked_in_model_version = max(SCRIPT_MODULE_BYTECODE_PKL.keys())
script_module_v5_path = pytorch_test_dir / "cpp" / "jit" / SCRIPT_MODULE_BYTECODE_PKL[
maximum_checked_in_model_version]["model_name"]
if (maximum_checked_in_model_version > MINIMUM_TO_VERSION):
# Backport model to v4
script_module_v4_buffer = _backport_for_mobile_to_buffer(
script_module_v5_path, maximum_checked_in_model_version - 1)
buf = io.StringIO()
# Check version of the model v4 from backport
bytesio = io.BytesIO(script_module_v4_buffer)
backport_version = _get_model_bytecode_version(bytesio)
assert(backport_version == maximum_checked_in_model_version - 1)
# Load model v4 from backport and run forward method
bytesio = io.BytesIO(script_module_v4_buffer)
mobile_module = _load_for_lite_interpreter(bytesio)
module_input = 1
mobile_module_result = mobile_module(module_input)
expected_mobile_module_result = 3 * torch.ones([2, 4], dtype=torch.float64)
torch.testing.assert_close(mobile_module_result, expected_mobile_module_result)
def test_get_model_ops_and_info(self):
# TODO update this to be more in the style of the above tests after a backport from 6 -> 5 exists
script_module_v6 = pytorch_test_dir / "cpp" / "jit" / "script_module_v6.ptl"
ops_v6 = _get_model_ops_and_info(script_module_v6)
assert(ops_v6["aten::add.int"].num_schema_args == 2)
assert(ops_v6["aten::add.Scalar"].num_schema_args == 2)
def test_get_mobile_model_contained_types(self):
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
def forward(self, x):
return x + 10
sample_input = torch.tensor([1])
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(sample_input)
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
type_list = _get_mobile_model_contained_types(buffer)
assert(len(type_list) >= 0)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/mobile/test_bytecode.py
|
# Owner(s): ["oncall: mobile"]
import torch
import torch.nn as nn
import torch.nn.quantized as nnq
import torch.utils.bundled_inputs
from torch.ao.quantization import (
default_qconfig,
float_qparams_weight_only_qconfig,
)
# graph mode quantization based on fx
from torch.ao.quantization.quantize_fx import (
prepare_fx,
convert_fx,
)
from torch.testing._internal.common_quantization import NodeSpec as ns
from torch.testing._internal.common_quantization import (
QuantizationLiteTestCase,
LinearModelWithSubmodule,
)
class TestLiteFuseFx(QuantizationLiteTestCase):
# Tests from:
# ./caffe2/test/quantization/fx/test_quantize_fx.py
def test_embedding(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.emb = torch.nn.Embedding(num_embeddings=10, embedding_dim=12)
def forward(self, indices):
return self.emb(indices)
model = M().eval()
indices = torch.randint(low=0, high=10, size=(20,))
quantized_node = ns.call_module(nnq.Embedding)
configs = [
(float_qparams_weight_only_qconfig, ns.call_module(nnq.Embedding)),
(None, ns.call_module(nn.Embedding)),
(default_qconfig, ns.call_module(nn.Embedding)),
]
for qconfig, node in configs:
qconfig_dict = {"": qconfig}
m = prepare_fx(model, qconfig_dict)
m = convert_fx(m)
self._compare_script_and_mobile(m, input=indices)
def test_conv2d(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1)
self.conv2 = nn.Conv2d(1, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
m = M().eval()
qconfig_dict = {"": default_qconfig, "module_name": [("conv1", None)]}
m = prepare_fx(m, qconfig_dict)
data = torch.randn(1, 1, 1, 1)
m = convert_fx(m)
# first conv is quantized, second conv is not quantized
self._compare_script_and_mobile(m, input=data)
def test_submodule(self):
# test quantizing complete module, submodule and linear layer
configs = [
{},
{"module_name": [("subm", None)]},
{"module_name": [("fc", None)]},
]
for config in configs:
model = LinearModelWithSubmodule().eval()
qconfig_dict = {
"": torch.ao.quantization.get_default_qconfig("qnnpack"),
**config,
}
model = prepare_fx(model, qconfig_dict)
quant = convert_fx(model)
x = torch.randn(5, 5)
self._compare_script_and_mobile(quant, input=x)
if __name__ == "__main__":
run_tests()
|
pytorch-master
|
test/mobile/test_quantize_fx_lite_script_module.py
|
# Owner(s): ["oncall: mobile"]
from torch.testing._internal.common_utils import TestCase, run_tests
from torchgen.operator_versions.gen_mobile_upgraders import (
sort_upgrader,
write_cpp,
)
from pathlib import Path
import tempfile
import os
from torch.jit.generate_bytecode import generate_upgraders_bytecode
pytorch_caffe2_dir = Path(__file__).resolve().parents[2]
class TestLiteScriptModule(TestCase):
def test_generate_bytecode(self):
upgrader_list = generate_upgraders_bytecode()
sorted_upgrader_list = sort_upgrader(upgrader_list)
upgrader_mobile_cpp_path = pytorch_caffe2_dir / "torch" / "csrc" / "jit" / "mobile" / "upgrader_mobile.cpp"
with tempfile.TemporaryDirectory() as tmpdirname:
write_cpp(tmpdirname, sorted_upgrader_list)
with open(os.path.join(tmpdirname, 'upgrader_mobile.cpp'), 'r') as file_name:
actual_output = [line.strip() for line in file_name.readlines() if line]
with open(str(upgrader_mobile_cpp_path), 'r') as file_name:
expect_output = [line.strip() for line in file_name.readlines() if line]
actual_output_filtered = list(filter(lambda token: len(token) != 0, actual_output))
expect_output_filtered = list(filter(lambda token: len(token) != 0, expect_output))
self.assertEqual(actual_output_filtered, expect_output_filtered)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/mobile/test_upgrader_codegen.py
|
# Owner(s): ["oncall: mobile"]
import torch
import torch.utils.bundled_inputs
import io
from typing import Dict, List, NamedTuple
from torch.jit.mobile import _load_for_lite_interpreter
from torch.testing._internal.common_utils import TestCase, run_tests
from collections import namedtuple
class TestLiteScriptModule(TestCase):
def test_typing_namedtuple(self):
myNamedTuple = NamedTuple('myNamedTuple', [('a', List[torch.Tensor])])
class MyTestModule(torch.nn.Module):
def forward(self, a: torch.Tensor):
p = myNamedTuple([a])
return p
sample_input = torch.tensor(5)
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(sample_input).a
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=True))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer) # Error here
mobile_module_result = mobile_module(sample_input).a
torch.testing.assert_allclose(
script_module_result,
mobile_module_result
)
def test_typing_dict_with_namedtuple(self):
class Foo(NamedTuple):
id: torch.Tensor
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.foo = Foo(torch.tensor(1))
def forward(self, a: torch.Tensor):
self.foo = Foo(a)
re: Dict[str, Foo] = dict()
re["test"] = Foo(a)
return self.foo, re["test"]
# The corresponding bytecode is
# (8,
# ('__torch__.___torch_mangle_2.Bar.forward',
# (('instructions',
# (('STOREN', 1, 2),
# ('DROPR', 1, 0),
# ('DICT_CONSTRUCT', 0, 0),
# ('STORE', 3, 0),
# ('LOAD', 3, 0),
# ('LOADC', 1, 0),
# ('MOVE', 2, 0),
# ('NAMED_TUPLE_CONSTRUCT', 1, 1),
# ('OP', 0, 0),
# ('MOVE', 3, 0),
# ('LOADC', 1, 0),
# ('DICT_INDEX', 0, 0),
# ('LOADC', 0, 0),
# ('TUPLE_INDEX', 0, 0),
# ('RET', 0, 0))),
# ('operators', (('aten::_set_item', 'str', 3),)),
# ('constants', (0, 'test')),
# ('types',
# ('Dict[str,__torch__.Foo[NamedTuple, [[id, Tensor]]]]',
# '__torch__.Foo[NamedTuple, [[id, Tensor]]]')),
# ('register_size', 3)),
# (('arguments',
# ((('name', 'self'),
# ('type', '__torch__.___torch_mangle_2.Bar'),
# ('default_value', None)),
# (('name', 'a'), ('type', 'Tensor'), ('default_value', None)))),
# ('returns',
# ((('name', ''), ('type', 'Tensor'), ('default_value', None)),)))))
sample_input = torch.tensor(5)
script_module = torch.jit.script(Bar())
script_module_result = script_module(sample_input)
buffer_mobile = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer_mobile.seek(0)
mobile_module = _load_for_lite_interpreter(buffer_mobile)
mobile_module_result = mobile_module(sample_input)
torch.testing.assert_allclose(
script_module_result,
mobile_module_result
)
def test_typing_namedtuple_custom_classtype(self):
class Foo(NamedTuple):
id: torch.Tensor
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.foo = Foo(torch.tensor(1))
def forward(self, a: torch.Tensor):
self.foo = Foo(a)
return self.foo
sample_input = torch.tensor(5)
script_module = torch.jit.script(Bar())
script_module_result = script_module(sample_input)
buffer_mobile = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer_mobile.seek(0)
mobile_module = _load_for_lite_interpreter(buffer_mobile)
mobile_module_result = mobile_module(sample_input)
torch.testing.assert_allclose(
script_module_result,
mobile_module_result
)
def test_return_collections_namedtuple(self):
myNamedTuple = namedtuple('myNamedTuple', [('a')])
class MyTestModule(torch.nn.Module):
def forward(self, a: torch.Tensor):
return myNamedTuple(a)
sample_input = torch.Tensor(1)
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(sample_input)
buffer_mobile = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer_mobile.seek(0)
mobile_module = _load_for_lite_interpreter(buffer_mobile)
mobile_module_result = mobile_module(sample_input)
torch.testing.assert_allclose(
script_module_result,
mobile_module_result
)
def test_nest_typing_namedtuple_custom_classtype(self):
class Baz(NamedTuple):
di: torch.Tensor
class Foo(NamedTuple):
id: torch.Tensor
baz: Baz
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.foo = Foo(torch.tensor(1), Baz(torch.tensor(1)))
def forward(self, a: torch.Tensor):
self.foo = Foo(a, Baz(torch.tensor(1)))
return self.foo
sample_input = torch.tensor(5)
script_module = torch.jit.script(Bar())
script_module_result = script_module(sample_input)
buffer_mobile = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer_mobile.seek(0)
mobile_module = _load_for_lite_interpreter(buffer_mobile)
mobile_module_result = mobile_module(sample_input)
torch.testing.assert_allclose(
script_module_result.baz.di,
mobile_module_result.baz.di
)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/mobile/test_lite_script_type.py
|
# Owner(s): ["oncall: mobile"]
import torch
import torch.utils.bundled_inputs
import io
from typing import Dict, List
import inspect
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
from torch.testing._internal.common_utils import TestCase, run_tests
from torch.testing._internal.common_quantization import (
AnnotatedSingleLayerLinearModel,
TwoLayerLinearModel,
AnnotatedNestedModel
)
from torch.testing._internal.common_quantization import QuantizationLiteTestCase
class TestLiteScriptModule(TestCase):
def getScriptExportImportCopy(self, m, save_mobile_debug_info=True, also_test_file=False):
m_scripted = torch.jit.script(m)
if not also_test_file:
buffer = io.BytesIO(m_scripted._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=save_mobile_debug_info))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
return mobile_module
with TemporaryFileName() as fname:
m_scripted._save_for_lite_interpreter(fname, _save_mobile_debug_info=save_mobile_debug_info)
mobile_module = _load_for_lite_interpreter(fname)
return mobile_module
def test_load_mobile_module(self):
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
def forward(self, x):
return x + 10
input = torch.tensor([1])
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(input)
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
mobile_module_result = mobile_module(input)
torch.testing.assert_close(script_module_result, mobile_module_result)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_close(script_module_result, mobile_module_forward_result)
mobile_module_run_method_result = mobile_module.run_method("forward", input)
torch.testing.assert_close(script_module_result, mobile_module_run_method_result)
def test_save_mobile_module_with_debug_info_with_trace(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x, y):
return x * y
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.A0 = A()
self.A1 = A()
def forward(self, x, y, z):
return self.A0(x, y) + self.A1(y, z)
for export_method in ['trace', 'script']:
x = torch.rand((2, 3))
y = torch.rand((2, 3))
z = torch.rand((2, 3))
if export_method == 'trace':
trace_module = torch.jit.trace(B(), [x, y, z])
else:
trace_module = torch.jit.script(B())
exported_module = trace_module._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=True)
buffer = io.BytesIO(exported_module)
buffer.seek(0)
assert(b"callstack_debug_map.pkl" in exported_module)
mobile_module = _load_for_lite_interpreter(buffer)
with self.assertRaisesRegex(RuntimeError, r"Module hierarchy:top\(B\)::<unknown>.A0\(A\)::forward.aten::mul"):
x = torch.rand((2, 3))
y = torch.rand((8, 10))
z = torch.rand((8, 10))
mobile_module(x, y, z)
with self.assertRaisesRegex(RuntimeError, r"Module hierarchy:top\(B\)::<unknown>.A1\(A\)::forward.aten::mul"):
x = torch.rand((2, 3))
y = torch.rand((2, 3))
z = torch.rand((8, 10))
mobile_module(x, y, z)
def test_load_mobile_module_with_debug_info(self):
class MyTestModule(torch.nn.Module):
def __init__(self):
super(MyTestModule, self).__init__()
def forward(self, x):
return x + 5
input = torch.tensor([3])
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(input)
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=True))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
mobile_module_result = mobile_module(input)
torch.testing.assert_close(script_module_result, mobile_module_result)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_close(script_module_result, mobile_module_forward_result)
mobile_module_run_method_result = mobile_module.run_method("forward", input)
torch.testing.assert_close(script_module_result, mobile_module_run_method_result)
def test_find_and_run_method(self):
class MyTestModule(torch.nn.Module):
def forward(self, arg):
return arg
input = (torch.tensor([1]), )
script_module = torch.jit.script(MyTestModule())
script_module_result = script_module(*input)
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
has_bundled_inputs = mobile_module.find_method("get_all_bundled_inputs")
self.assertFalse(has_bundled_inputs)
torch.utils.bundled_inputs.augment_model_with_bundled_inputs(
script_module, [input], [])
buffer = io.BytesIO(script_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
has_bundled_inputs = mobile_module.find_method("get_all_bundled_inputs")
self.assertTrue(has_bundled_inputs)
bundled_inputs = mobile_module.run_method("get_all_bundled_inputs")
mobile_module_result = mobile_module.forward(*bundled_inputs[0])
torch.testing.assert_close(script_module_result, mobile_module_result)
def test_method_calls_with_optional_arg(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
# opt arg in script-to-script invocation
def forward(self, x, two: int = 2):
return x + two
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
self.A0 = A()
# opt arg in Python-to-script invocation
def forward(self, x, one: int = 1):
return self.A0(x) + one
script_module = torch.jit.script(B())
buffer = io.BytesIO(
script_module._save_to_buffer_for_lite_interpreter()
)
mobile_module = _load_for_lite_interpreter(buffer)
input = torch.tensor([5])
script_module_forward_result = script_module.forward(input)
mobile_module_forward_result = mobile_module.forward(input)
torch.testing.assert_close(
script_module_forward_result,
mobile_module_forward_result
)
# change ref only
script_module_forward_result = script_module.forward(input, 2)
self.assertFalse(
(script_module_forward_result == mobile_module_forward_result)
.all()
.item()
)
# now both match again
mobile_module_forward_result = mobile_module.forward(input, 2)
torch.testing.assert_close(
script_module_forward_result,
mobile_module_forward_result
)
def test_unsupported_classtype(self):
class Foo():
def __init__(self):
return
def func(self, x: int, y: int):
return x + y
class MyTestModule(torch.nn.Module):
def forward(self, arg):
f = Foo()
return f.func(1, 2)
script_module = torch.jit.script(MyTestModule())
with self.assertRaisesRegex(RuntimeError,
r"Workaround: instead of using arbitrary class type \(class Foo\(\)\), "
r"define a pytorch class \(class Foo\(torch\.nn\.Module\)\)\. "
r"The problematic type is: "):
script_module._save_to_buffer_for_lite_interpreter()
def test_unsupported_return_list_with_module_class(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
class MyTestModuleForListWithModuleClass(torch.nn.Module):
def __init__(self):
super(MyTestModuleForListWithModuleClass, self).__init__()
self.foo = Foo()
def forward(self):
my_list: List[Foo] = [self.foo]
return my_list
script_module = torch.jit.script(MyTestModuleForListWithModuleClass())
with self.assertRaisesRegex(RuntimeError,
r"^Returining a list or dictionary with pytorch class type "
r"is not supported in mobile module "
r"\(List\[Foo\] or Dict\[int\, Foo\] for class Foo\(torch\.nn\.Module\)\)\. "
r"Workaround\: instead of using pytorch class as their element type\, "
r"use a combination of list\, dictionary\, and single types\.$"):
script_module._save_to_buffer_for_lite_interpreter()
def test_unsupported_return_dict_with_module_class(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
class MyTestModuleForDictWithModuleClass(torch.nn.Module):
def __init__(self):
super(MyTestModuleForDictWithModuleClass, self).__init__()
self.foo = Foo()
def forward(self):
my_dict: Dict[int, Foo] = {1: self.foo}
return my_dict
script_module = torch.jit.script(MyTestModuleForDictWithModuleClass())
with self.assertRaisesRegex(RuntimeError,
r"^Returining a list or dictionary with pytorch class type "
r"is not supported in mobile module "
r"\(List\[Foo\] or Dict\[int\, Foo\] for class Foo\(torch\.nn\.Module\)\)\. "
r"Workaround\: instead of using pytorch class as their element type\, "
r"use a combination of list\, dictionary\, and single types\.$"):
script_module._save_to_buffer_for_lite_interpreter()
def test_module_export_operator_list(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.weight = torch.ones((20, 1, 5, 5))
self.bias = torch.ones(20)
def forward(self, input):
x1 = torch.zeros(2, 2)
x2 = torch.empty_like(torch.empty(2, 2))
x3 = torch._convolution(
input,
self.weight,
self.bias,
[1, 1],
[0, 0],
[1, 1],
False,
[0, 0],
1,
False,
False,
True,
True,
)
return (x1, x2, x3)
m = torch.jit.script(Foo())
buffer = io.BytesIO(m._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
expected_ops = {
"aten::_convolution",
"aten::empty.memory_format",
"aten::empty_like",
"aten::zeros",
}
actual_ops = _export_operator_list(mobile_module)
self.assertEqual(actual_ops, expected_ops)
def test_source_range_simple(self):
class FooTest(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest()
loaded = self.getScriptExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest)
with self.assertRaisesRegex(RuntimeError, 'test_lite_script_module.py\", line {}'.format(lineno + 3)):
loaded(torch.rand(3, 4), torch.rand(30, 40))
def test_source_range_raise_exception(self):
class FooTest2(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self):
raise RuntimeError('foo')
_, lineno = inspect.getsourcelines(FooTest2)
# In C++ code, the type of exception thrown is torch::jit::JITException
# which does not extend c10::Error, and hence it isn't possible to add
# additional context to the exception message and preserve the correct
# C++ stack trace for symbolication. i.e. it isn't possible to add
# the debug handle string to show where in the Python code the exception
# occured w/o first changing
# torch::jit::JITException to extend c10::Error.
with self.assertRaisesRegex(torch.jit.Error, 'foo'):
ft = FooTest2()
loaded = self.getScriptExportImportCopy(ft)
loaded()
def test_source_range_function_call(self):
class FooTest3(torch.jit.ScriptModule):
@torch.jit.script_method
def add_method(self, x, w):
return x + w
@torch.jit.script_method
def forward(self, x, y, w):
x = x * y
x = x + 2
return self.add_method(x, w)
ft = FooTest3()
loaded = self.getScriptExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest3)
try:
loaded(torch.rand(3, 4), torch.rand(3, 4), torch.rand(30, 40))
except RuntimeError as e:
error_message = f"{e}"
self.assertTrue('test_lite_script_module.py\", line {}'.format(lineno + 3) in error_message)
self.assertTrue('test_lite_script_module.py\", line {}'.format(lineno + 9) in error_message)
self.assertTrue('top(FooTest3)' in error_message)
def test_source_range_no_debug_info(self):
class FooTest4(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x, w):
return torch.mm(x, w.t())
ft = FooTest4()
loaded = self.getScriptExportImportCopy(ft, save_mobile_debug_info=False)
try:
loaded(torch.rand(3, 4), torch.rand(30, 40))
except RuntimeError as e:
error_message = f"{e}"
self.assertTrue("test_lite_script_module.py" not in error_message)
def test_source_range_raise_exc(self):
class FooTest5(torch.jit.ScriptModule):
def __init__(self, val: int):
super(FooTest5, self).__init__()
self.val = val
@torch.jit.script_method
def add_method(self, val: int, x, w):
if (val == self.val):
raise RuntimeError('self.val and val are same')
return x + w
@torch.jit.script_method
def forward(self, val: int, x, y, w):
x = x * y
x = x + 2
return self.add_method(val, x, w)
ft = FooTest5(42)
loaded = self.getScriptExportImportCopy(ft)
_, lineno = inspect.getsourcelines(FooTest5)
try:
loaded(42, torch.rand(3, 4), torch.rand(3, 4), torch.rand(30, 40))
except torch.jit.Error as e:
error_message = f"{e}"
# In C++ code, the type of exception thrown is torch::jit::JITException
# which does not extend c10::Error, and hence it isn't possible to add
# additional context to the exception message and preserve the correct
# C++ stack trace for symbolication. i.e. it isn't possible to add
# the debug handle string to show where in the Python code the exception
# occured w/o first changing
# torch::jit::JITException to extend c10::Error.
self.assertTrue('self.val and val are same' in error_message)
def test_stacktrace_interface_call(self):
@torch.jit.interface
class Forward(torch.nn.Module):
def forward(self, x) -> torch.Tensor:
pass
def forwardError(self, x) -> torch.Tensor:
pass
class B(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
def forwardError(self, x):
return self.call() + x
def call(self):
return torch.ones(-1)
class A(torch.nn.Module):
b : Forward
def __init__(self):
super().__init__()
self.b = B()
def forward(self):
self.b.forward(torch.ones(1))
self.b.forwardError(torch.ones(1))
a = torch.jit.script(A())
torch._C._enable_mobile_interface_call_export()
buffer = io.BytesIO(a._save_to_buffer_for_lite_interpreter(_save_mobile_debug_info=True))
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
try:
mobile_module()
self.assertTrue(False)
except RuntimeError as exp:
FileCheck().check("Trying to create tensor with negative dimension") \
.check("Traceback of TorchScript") \
.check("self.b.forwardError").check_next("~~~~~~~~~~~~~~~~~~~ <--- HERE") \
.check("return self.call").check_next("~~~~~~~~~ <--- HERE") \
.check("return torch.ones").check_next("~~~~~~~~~~ <--- HERE").run(str(exp))
class TestLiteScriptQuantizedModule(QuantizationLiteTestCase):
def test_single_layer(self):
input = torch.rand(2, 5, dtype=torch.float)
quantized_model = self._create_quantized_model(model_class=AnnotatedSingleLayerLinearModel, qengine="qnnpack")
self._compare_script_and_mobile(model=quantized_model, input=input)
def test_two_layer(self):
input = torch.rand(2, 5, dtype=torch.float)
quantized_model = self._create_quantized_model(model_class=TwoLayerLinearModel)
self._compare_script_and_mobile(model=quantized_model, input=input)
def test_annotated_nested(self):
input = torch.rand(2, 5, dtype=torch.float)
quantized_model = self._create_quantized_model(model_class=AnnotatedNestedModel, qengine="qnnpack")
self._compare_script_and_mobile(model=quantized_model, input=input)
def test_quantization_example(self):
# From the example in Static Quantization section of https://pytorch.org/docs/stable/quantization.html
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv = torch.nn.Conv2d(1, 1, 1)
self.relu = torch.nn.ReLU()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv(x)
x = self.relu(x)
x = self.dequant(x)
return x
model_fp32 = M()
model_fp32.eval()
model_fp32.qconfig = torch.ao.quantization.get_default_qconfig('qnnpack')
model_fp32_fused = torch.ao.quantization.fuse_modules(model_fp32, [['conv', 'relu']])
model_fp32_prepared = torch.ao.quantization.prepare(model_fp32_fused)
input_fp32 = torch.randn(4, 1, 4, 4)
model_fp32_prepared(input_fp32)
model_int8 = torch.ao.quantization.convert(model_fp32_prepared)
input = torch.randn(4, 1, 4, 4)
self._compare_script_and_mobile(model=model_int8, input=input)
def test_bundled_input_with_dynamic_type(self):
class Model(torch.nn.Module):
def __init__(self):
super(Model, self).__init__()
def forward(
self,
x: Dict[int, torch.Tensor],
y: Dict[int, torch.Tensor],
z: Dict[int, torch.Tensor],
):
return x
model = Model()
script_module = torch.jit.script(model)
sample_input = {
script_module.forward: [
(
{0: torch.ones(1)},
{1: torch.ones(1)},
{2: torch.ones(1)},
)
]
}
bundled_model = torch.utils.bundled_inputs.bundle_inputs(
script_module, sample_input
)
buf = bundled_model._save_to_buffer_for_lite_interpreter()
mobile_module = _load_for_lite_interpreter(io.BytesIO(buf))
i = mobile_module.run_method("get_all_bundled_inputs")
self.assertEqual(
i[0],
(
{0: torch.ones(1)},
{1: torch.ones(1)},
{2: torch.ones(1)},
),
)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/mobile/test_lite_script_module.py
|
import torch
from torch import nn
class NeuralNetwork(nn.Module):
def __init__(self):
super(NeuralNetwork, self).__init__()
def forward(self, x):
return torch.add(x, 10)
model = NeuralNetwork()
script = torch.jit.script(model)
torch.jit.save(script, "aot_test_model.pt")
|
pytorch-master
|
test/mobile/nnc/aot_test_model.py
|
import functools
import os
from io import BytesIO
import shutil
import sys
import torch
from torch.jit.mobile import _load_for_lite_interpreter, _export_operator_list
_OPERATORS = set()
_FILENAMES = []
_MODELS = []
def save_model(cls):
"""Save a model and dump all the ops"""
@functools.wraps(cls)
def wrapper_save():
_MODELS.append(cls)
model = cls()
scripted = torch.jit.script(model)
buffer = BytesIO(scripted._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
mobile_module = _load_for_lite_interpreter(buffer)
ops = _export_operator_list(mobile_module)
_OPERATORS.update(ops)
path = f"./{cls.__name__}.ptl"
_FILENAMES.append(path)
scripted._save_for_lite_interpreter(path)
return wrapper_save
@save_model
class ModelWithDTypeDeviceLayoutPinMemory(torch.nn.Module):
def forward(self, x: int):
a = torch.ones(size=[3, x], dtype=torch.int64, layout=torch.strided, device="cpu", pin_memory=False)
return a
@save_model
class ModelWithTensorOptional(torch.nn.Module):
def forward(self, index):
a = torch.zeros(2, 2)
a[0][1] = 1
a[1][0] = 2
a[1][1] = 3
return a[index]
# gradient.scalarrayint(Tensor self, *, Scalar[] spacing, int? dim=None, int edge_order=1) -> Tensor[]
@save_model
class ModelWithScalarList(torch.nn.Module):
def forward(self, a: int):
values = torch.tensor([4., 1., 1., 16.], )
if a == 0:
return torch.gradient(values, spacing=torch.scalar_tensor(2., dtype=torch.float64))
elif a == 1:
return torch.gradient(values, spacing=[torch.tensor(1.).item()])
# upsample_linear1d.vec(Tensor input, int[]? output_size, bool align_corners, float[]? scale_factors) -> Tensor
@save_model
class ModelWithFloatList(torch.nn.Upsample):
def __init__(self):
super().__init__(scale_factor=(2.0,), mode="linear", align_corners=False, recompute_scale_factor=True)
# index.Tensor(Tensor self, Tensor?[] indices) -> Tensor
@save_model
class ModelWithListOfOptionalTensors(torch.nn.Module):
def forward(self, index):
values = torch.tensor([[4., 1., 1., 16.]])
return values[torch.tensor(0), index]
# conv2d(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, int[2] padding=0, int[2] dilation=1,
# int groups=1) -> Tensor
@save_model
class ModelWithArrayOfInt(torch.nn.Conv2d):
def __init__(self):
super().__init__(1, 2, (2, 2), stride=(1, 1), padding=(1, 1))
# add.Tensor(Tensor self, Tensor other, *, Scalar alpha=1) -> Tensor
# ones_like(Tensor self, *, ScalarType?, dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None,
# MemoryFormat? memory_format=None) -> Tensor
@save_model
class ModelWithTensors(torch.nn.Module):
def forward(self, a):
b = torch.ones_like(a)
return a + b
@save_model
class ModelWithStringOptional(torch.nn.Module):
def forward(self, b):
a = torch.tensor(3, dtype=torch.int64)
out = torch.empty(size=[1], dtype=torch.float)
torch.div(b, a, out=out)
return [torch.div(b, a, rounding_mode='trunc'), out]
@save_model
class ModelWithMultipleOps(torch.nn.Module):
def __init__(self):
super().__init__()
self.ops = torch.nn.Sequential(
torch.nn.ReLU(),
torch.nn.Flatten(),
)
def forward(self, x):
x[1] = -2
return self.ops(x)
if __name__ == "__main__":
command = sys.argv[1]
ops_yaml = sys.argv[2]
backup = ops_yaml + ".bak"
if command == "setup":
tests = [
ModelWithDTypeDeviceLayoutPinMemory(),
ModelWithTensorOptional(),
ModelWithScalarList(),
ModelWithFloatList(),
ModelWithListOfOptionalTensors(),
ModelWithArrayOfInt(),
ModelWithTensors(),
ModelWithStringOptional(),
ModelWithMultipleOps(),
]
shutil.copyfile(ops_yaml, backup)
with open(ops_yaml, 'a') as f:
for op in _OPERATORS:
f.write(f"- {op}\n")
elif command == "shutdown":
for file in _MODELS:
if os.path.isfile(file):
os.remove(file)
shutil.move(backup, ops_yaml)
|
pytorch-master
|
test/mobile/lightweight_dispatch/tests_setup.py
|
"""
This is a script for end-to-end mobile custom build test purpose. It prepares
MobileNetV2 TorchScript model, and dumps root ops used by the model for custom
build script to create a tailored build which only contains these used ops.
"""
import torch
import torchvision
import yaml
# Download and trace the model.
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.rand(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
# Save traced TorchScript model.
traced_script_module.save("MobileNetV2.pt")
# Dump root ops used by the model (for custom build optimization).
ops = torch.jit.export_opnames(traced_script_module)
# Besides the ops used by the model, custom c++ client code might use some extra
# ops, too. For example, the dummy predictor.cpp driver in this test suite calls
# `aten::ones` to create all-one-tensor for testing purpose, which is not used
# by the MobileNetV2 model itself.
#
# This is less a problem for Android, where we expect users to use the limited
# set of Java APIs. To actually solve this problem, we probably need ask users
# to run code analyzer against their client code to dump these extra root ops.
# So it will require more work to switch to custom build with dynamic dispatch -
# in static dispatch case these extra ops will be kept by linker automatically.
#
# For CI purpose this one-off hack is probably fine? :)
EXTRA_CI_ROOT_OPS = ['aten::ones']
ops.extend(EXTRA_CI_ROOT_OPS)
with open('MobileNetV2.yaml', 'w') as output:
yaml.dump(ops, output)
|
pytorch-master
|
test/mobile/custom_build/prepare_model.py
|
"""
This is a script to aggregate production ops from xplat/pytorch_models/build/all_mobile_model_configs.yaml.
Specify the file path in the first argument. The results will be dump to model_ops.yaml
"""
import sys
import yaml
root_operators = {}
traced_operators = {}
kernel_metadata = {}
with open(sys.argv[1]) as input_yaml_file:
model_infos = yaml.safe_load(input_yaml_file)
for info in model_infos:
for op in info["root_operators"]:
# aggregate occurance per op
root_operators[op] = 1 + (root_operators[op] if op in root_operators else 0)
for op in info["traced_operators"]:
# aggregate occurance per op
traced_operators[op] = 1 + (traced_operators[op] if op in traced_operators else 0)
# merge dtypes for each kernel
for kernal, dtypes in info["kernel_metadata"].items():
new_dtypes = dtypes + (kernel_metadata[kernal] if kernal in kernel_metadata else [])
kernel_metadata[kernal] = list(set(new_dtypes))
# Only test these built-in ops. No custom ops or non-CPU ops.
namespaces = ["aten", "prepacked", "prim", "quantized"]
root_operators = {x: root_operators[x] for x in root_operators if x.split("::")[0] in namespaces}
traced_operators = {x: traced_operators[x] for x in traced_operators if x.split("::")[0] in namespaces}
out_path = "test/mobile/model_test/model_ops.yaml"
with open(out_path, "w") as f:
yaml.safe_dump({"root_operators": root_operators}, f)
|
pytorch-master
|
test/mobile/model_test/update_production_ops.py
|
from typing import Dict, List, Tuple, Optional
import torch
from torch import Tensor
class AndroidAPIModule(torch.jit.ScriptModule):
def __init__(self):
super(AndroidAPIModule, self).__init__()
@torch.jit.script_method
def forward(self, input):
return None
@torch.jit.script_method
def eqBool(self, input: bool) -> bool:
return input
@torch.jit.script_method
def eqInt(self, input: int) -> int:
return input
@torch.jit.script_method
def eqFloat(self, input: float) -> float:
return input
@torch.jit.script_method
def eqStr(self, input: str) -> str:
return input
@torch.jit.script_method
def eqTensor(self, input: Tensor) -> Tensor:
return input
@torch.jit.script_method
def eqDictStrKeyIntValue(self, input: Dict[str, int]) -> Dict[str, int]:
return input
@torch.jit.script_method
def eqDictIntKeyIntValue(self, input: Dict[int, int]) -> Dict[int, int]:
return input
@torch.jit.script_method
def eqDictFloatKeyIntValue(self, input: Dict[float, int]) -> Dict[float, int]:
return input
@torch.jit.script_method
def listIntSumReturnTuple(self, input: List[int]) -> Tuple[List[int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def listBoolConjunction(self, input: List[bool]) -> bool:
res = True
for x in input:
res = res and x
return res
@torch.jit.script_method
def listBoolDisjunction(self, input: List[bool]) -> bool:
res = False
for x in input:
res = res or x
return res
@torch.jit.script_method
def tupleIntSumReturnTuple(
self, input: Tuple[int, int, int]
) -> Tuple[Tuple[int, int, int], int]:
sum = 0
for x in input:
sum += x
return (input, sum)
@torch.jit.script_method
def optionalIntIsNone(self, input: Optional[int]) -> bool:
return input is None
@torch.jit.script_method
def intEq0None(self, input: int) -> Optional[int]:
if input == 0:
return None
return input
@torch.jit.script_method
def str3Concat(self, input: str) -> str:
return input + input + input
@torch.jit.script_method
def newEmptyShapeWithItem(self, input):
return torch.tensor([int(input.item())])[0]
@torch.jit.script_method
def testAliasWithOffset(self) -> List[Tensor]:
x = torch.tensor([100, 200])
a = [x[0], x[1]]
return a
@torch.jit.script_method
def testNonContiguous(self):
x = torch.tensor([100, 200, 300])[::2]
assert not x.is_contiguous()
assert x[0] == 100
assert x[1] == 300
return x
@torch.jit.script_method
def conv2d(self, x: Tensor, w: Tensor, toChannelsLast: bool) -> Tensor:
r = torch.nn.functional.conv2d(x, w)
if toChannelsLast:
r = r.contiguous(memory_format=torch.channels_last)
else:
r = r.contiguous()
return r
@torch.jit.script_method
def contiguous(self, x: Tensor) -> Tensor:
return x.contiguous()
@torch.jit.script_method
def contiguousChannelsLast(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last)
@torch.jit.script_method
def contiguousChannelsLast3d(self, x: Tensor) -> Tensor:
return x.contiguous(memory_format=torch.channels_last_3d)
|
pytorch-master
|
test/mobile/model_test/android_api_module.py
|
import torch
# https://pytorch.org/docs/stable/jit_builtin_functions.html#builtin-functions
class TSBuiltinOpsModule(torch.nn.Module):
def __init__(self):
super(TSBuiltinOpsModule, self).__init__()
def forward(self):
x = torch.tensor(1)
y = torch.tensor(0.5)
b = float(1)
s = "abcde"
l = ["1", "2", "test", "a{}b"]
d = {"key": 1}
d2 = {0: 100}
return len(
# type
bool(x),
bool(x.item()),
int(y),
int(y.item()),
float(x),
float(x.item()),
# math
x & x,
bool(x) & bool(x),
int(x) & int(x),
x | x,
bool(x) | bool(x),
int(x) | int(x),
x << x,
int(x) << int(x),
x >> x,
int(x) >> int(x),
x ^ x,
bool(x) ^ bool(x),
int(x) ^ int(x),
b * float(x),
b * int(x),
b + float(x),
b - float(x),
x.item() + y.item(),
x.item() - y.item(),
x.item() * y.item(),
x.item() / y.item(),
float(x) < float(y),
float(x) <= float(y),
float(x) > float(y),
float(x) > int(y),
float(x) >= float(y),
float(x) >= int(y),
float(x) == float(y),
float(x) == int(y),
float(x) != float(y),
int(x) != float(y),
float(x) / float(y),
int(x) / int(y),
max(x),
max(x.item(), y.item()),
max(int(x), int(y)),
max(float(x), float(y)),
min(x),
min(x.item(), y.item()),
min(int(x), int(y)),
min(float(x), float(y)),
int(l[0]),
float(l[0]),
# string
str(torch.tensor(1)),
l[2].find("t"),
l[2].replace("t", "x"),
l[2].lower(),
l[2].startswith("t"),
l[2].split("t"),
l[2].strip(),
l[2].rstrip(),
l[2].lstrip(),
l[2][slice(2)],
l[3].format("x"),
ord(l[2][0]),
len(torch.randn(3)),
len(l),
len(l[2]),
len(d),
len(d2),
)
class TSCollectionOpsModule(torch.nn.Module):
def __init__(self):
super(TSCollectionOpsModule, self).__init__()
def forward(self):
s = "abcde"
# list
l = ["1", "2", "test"]
l.reverse()
l.reverse()
l[1] = "3"
l.extend(["4"])
# str dict
d = {"key": 1}
d.clear()
d.update({"key": 0})
if "key" in d:
d["key"] = 2
# int dict
d2 = {0: 100}
if 0 in d2:
d2.clear()
d2[0] = 100
return len(
s[torch.tensor(1)],
d["key"],
d2[0],
d.keys(),
d.items(),
d.values(),
d2.values(),
l.pop(),
)
|
pytorch-master
|
test/mobile/model_test/builtin_ops.py
|
import torch
class TensorOpsModule(torch.nn.Module):
def __init__(self):
super(TensorOpsModule, self).__init__()
def forward(self):
return self.tensor_general_ops()
def tensor_general_ops(self):
a = torch.randn(4)
b = torch.tensor([1.5])
x = torch.ones((2,))
c = torch.randn(4, dtype=torch.cfloat)
w = torch.rand(4, 4, 4, 4)
v = torch.rand(4, 4, 4, 4)
return len(
# torch.is_tensor(a),
# torch.is_storage(a),
torch.is_complex(a),
torch.is_conj(a),
torch.is_floating_point(a),
torch.is_nonzero(b),
# torch.set_default_dtype(torch.float32),
# torch.get_default_dtype(),
# torch.set_default_tensor_type(torch.DoubleTensor),
torch.numel(a),
# torch.set_printoptions(),
# torch.set_flush_denormal(False),
# https://pytorch.org/docs/stable/tensors.html#tensor-class-reference
# x.new_tensor([[0, 1], [2, 3]]),
x.new_full((3, 4), 3.141592),
x.new_empty((2, 3)),
x.new_ones((2, 3)),
x.new_zeros((2, 3)),
x.is_cuda,
x.is_quantized,
x.is_meta,
x.device,
x.dim(),
c.real,
c.imag,
# x.backward(),
x.clone(),
w.contiguous(),
w.contiguous(memory_format=torch.channels_last),
w.copy_(v),
w.copy_(1),
w.copy_(0.5),
x.cpu(),
# x.cuda(),
# x.data_ptr(),
x.dense_dim(),
w.fill_diagonal_(0),
w.element_size(),
w.exponential_(),
w.fill_(0),
w.geometric_(0.5),
a.index_fill(0, torch.tensor([0, 2]), 1),
a.index_put_([torch.argmax(a)], torch.tensor(1.0)),
a.index_put([torch.argmax(a)], torch.tensor(1.0)),
w.is_contiguous(),
c.is_complex(),
w.is_conj(),
w.is_floating_point(),
w.is_leaf,
w.is_pinned(),
w.is_set_to(w),
# w.is_shared,
w.is_coalesced(),
w.coalesce(),
w.is_signed(),
w.is_sparse,
torch.tensor([1]).item(),
x.log_normal_(),
# x.masked_scatter_(),
# x.masked_scatter(),
# w.normal(),
w.numel(),
# w.pin_memory(),
# w.put_(0, torch.tensor([0, 1], w)),
x.repeat(4, 2),
a.clamp_(0),
a.clamp(0),
a.clamp_min(0),
a.hardsigmoid_(),
a.hardsigmoid(),
a.hardswish_(),
a.hardswish(),
a.hardtanh_(),
a.hardtanh(),
a.leaky_relu_(),
a.leaky_relu(),
a.relu_(),
a.relu(),
a.resize_as_(a),
a.type_as(a),
a._shape_as_tensor(),
a.requires_grad_(False),
)
class TensorCreationOpsModule(torch.nn.Module):
def __init__(self):
super(TensorCreationOpsModule, self).__init__()
def forward(self):
return self.tensor_creation_ops()
def tensor_creation_ops(self):
i = torch.tensor([[0, 1, 1], [2, 0, 2]])
v = torch.tensor([3, 4, 5], dtype=torch.float32)
real = torch.tensor([1, 2], dtype=torch.float32)
imag = torch.tensor([3, 4], dtype=torch.float32)
inp = torch.tensor([-1.5, 0.0, 2.0])
values = torch.tensor([0.5])
quantized = torch.quantize_per_channel(
torch.tensor([[-1.0, 0.0], [1.0, 2.0]]),
torch.tensor([0.1, 0.01]),
torch.tensor([10, 0]),
0,
torch.quint8,
)
return len(
torch.tensor([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]]),
# torch.sparse_coo_tensor(i, v, [2, 3]), # not work for iOS
torch.as_tensor([1, 2, 3]),
torch.as_strided(torch.randn(3, 3), (2, 2), (1, 2)),
torch.zeros(2, 3),
torch.zeros((2, 3)),
torch.zeros([2, 3], out=i),
torch.zeros(5),
torch.zeros_like(torch.empty(2, 3)),
torch.ones(2, 3),
torch.ones((2, 3)),
torch.ones([2, 3]),
torch.ones(5),
torch.ones_like(torch.empty(2, 3)),
torch.arange(5),
torch.arange(1, 4),
torch.arange(1, 2.5, 0.5),
torch.range(1, 4),
torch.range(1, 4, 0.5),
torch.linspace(3.0, 3.0, steps=1),
torch.logspace(start=2, end=2, steps=1, base=2.0),
torch.eye(3),
torch.empty(2, 3),
torch.empty_like(torch.empty(2, 3), dtype=torch.int64),
torch.empty_strided((2, 3), (1, 2)),
torch.full((2, 3), 3.141592),
torch.full_like(torch.full((2, 3), 3.141592), 2.71828),
torch.quantize_per_tensor(
torch.tensor([-1.0, 0.0, 1.0, 2.0]), 0.1, 10, torch.quint8
),
torch.dequantize(quantized),
torch.complex(real, imag),
torch.polar(real, imag),
torch.heaviside(inp, values),
)
class TensorIndexingOpsModule(torch.nn.Module):
def __init__(self):
super(TensorIndexingOpsModule, self).__init__()
def forward(self):
return self.tensor_indexing_ops()
def tensor_indexing_ops(self):
x = torch.randn(2, 4)
y = torch.randn(4, 4)
t = torch.tensor([[0, 0], [1, 0]])
mask = x.ge(0.5)
i = [0, 1]
return len(
torch.cat((x, x, x), 0),
torch.concat((x, x, x), 0),
torch.conj(x),
torch.chunk(x, 2),
torch.dsplit(torch.randn(2, 2, 4), i),
torch.column_stack((x, x)),
torch.dstack((x, x)),
torch.gather(x, 0, t),
torch.hsplit(x, i),
torch.hstack((x, x)),
torch.index_select(x, 0, torch.tensor([0, 1])),
x.index(t),
torch.masked_select(x, mask),
torch.movedim(x, 1, 0),
torch.moveaxis(x, 1, 0),
torch.narrow(x, 0, 0, 2),
torch.nonzero(x),
torch.permute(x, (0, 1)),
torch.reshape(x, (-1,)),
torch.row_stack((x, x)),
torch.select(x, 0, 0),
torch.scatter(x, 0, t, x),
x.scatter(0, t, x.clone()),
torch.diagonal_scatter(y, torch.ones(4)),
torch.select_scatter(y, torch.ones(4), 0, 0),
torch.slice_scatter(x, x),
torch.scatter_add(x, 0, t, x),
x.scatter_(0, t, y),
x.scatter_add_(0, t, y),
# torch.scatter_reduce(x, 0, t, reduce="sum"),
torch.split(x, 1),
torch.squeeze(x, 0),
torch.stack([x, x]),
torch.swapaxes(x, 0, 1),
torch.swapdims(x, 0, 1),
torch.t(x),
torch.take(x, t),
torch.take_along_dim(x, torch.argmax(x)),
torch.tensor_split(x, 1),
torch.tensor_split(x, [0, 1]),
torch.tile(x, (2, 2)),
torch.transpose(x, 0, 1),
torch.unbind(x),
torch.unsqueeze(x, -1),
torch.vsplit(x, i),
torch.vstack((x, x)),
torch.where(x),
torch.where(t > 0, t, 0),
torch.where(t > 0, t, t),
)
class TensorTypingOpsModule(torch.nn.Module):
def __init__(self):
super(TensorTypingOpsModule, self).__init__()
def forward(self):
return self.tensor_typing_ops()
def tensor_typing_ops(self):
x = torch.randn(1, 3, 4, 4)
return len(
x.to(torch.float),
x.to(torch.double),
x.to(torch.cfloat),
x.to(torch.cdouble),
x.to(torch.half),
x.to(torch.bfloat16),
x.to(torch.uint8),
x.to(torch.int8),
x.to(torch.short),
x.to(torch.int),
x.to(torch.long),
x.to(torch.bool),
x.to(torch.device("cpu")),
x.to(device="cpu", dtype=torch.float),
x.to(memory_format=torch.channels_last),
)
class TensorViewOpsModule(torch.nn.Module):
def __init__(self):
super(TensorViewOpsModule, self).__init__()
def forward(self):
return self.tensor_view_ops()
def tensor_view_ops(self):
x = torch.randn(4, 4, 1)
y = torch.randn(4, 4, 2)
return len(
x[0, 2:],
x.detach(),
x.detach_(),
x.diagonal(),
x.expand(-1, -1, 3),
x.expand_as(y),
x.select(0, 1),
x.unflatten(1, (2, 2)),
x.unfold(1, 2, 2),
x.view(16),
x.view_as(torch.randn(16)),
)
|
pytorch-master
|
test/mobile/model_test/tensor_ops.py
|
import torch
import torch.nn as nn
import torch.nn.functional as F
# https://pytorch.org/docs/stable/nn.html
class NNConvolutionModule(torch.nn.Module):
def __init__(self):
super(NNConvolutionModule, self).__init__()
self.input1d = torch.randn(1, 4, 36)
self.input2d = torch.randn(1, 4, 30, 10)
self.input3d = torch.randn(1, 4, 10, 4, 4)
self.module1d = nn.ModuleList(
[
nn.Conv1d(4, 33, 3),
nn.ConvTranspose1d(4, 33, 3),
nn.Fold(output_size=(5, 10), kernel_size=(2, 2)),
]
)
self.module2d = nn.ModuleList(
[
nn.Conv2d(4, 33, 3),
nn.ConvTranspose2d(4, 33, 3),
nn.Unfold(kernel_size=3),
]
)
self.module3d = nn.ModuleList(
[
nn.Conv3d(4, 33, 2),
nn.ConvTranspose3d(4, 33, 3),
]
)
def forward(self):
return len((
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
))
class NNPoolingModule(torch.nn.Module):
def __init__(self):
super(NNPoolingModule, self).__init__()
self.input1d = torch.randn(1, 16, 50)
self.module1d = nn.ModuleList(
[
nn.MaxPool1d(3, stride=2),
nn.AvgPool1d(3, stride=2),
nn.LPPool1d(2, 3, stride=2),
nn.AdaptiveMaxPool1d(3),
nn.AdaptiveAvgPool1d(3),
]
)
self.input2d = torch.randn(1, 16, 30, 10)
self.module2d = nn.ModuleList(
[
nn.MaxPool2d((3, 2), stride=(2, 1)),
nn.AvgPool2d((3, 2), stride=(2, 1)),
nn.FractionalMaxPool2d(3, output_ratio=(0.5, 0.5)),
nn.LPPool2d(2, 3, stride=(2, 1)),
nn.AdaptiveMaxPool2d((5, 7)),
nn.AdaptiveAvgPool2d((7)),
]
)
self.input3d = torch.randn(1, 16, 20, 4, 4)
self.module3d = nn.ModuleList(
[
nn.MaxPool3d(2),
nn.AvgPool3d(2),
nn.FractionalMaxPool3d(2, output_ratio=(0.5, 0.5, 0.5)),
nn.AdaptiveMaxPool3d((5, 7, 9)),
nn.AdaptiveAvgPool3d((5, 7, 9)),
]
)
# TODO max_unpool
def forward(self):
return len((
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
))
class NNPaddingModule(torch.nn.Module):
def __init__(self):
super(NNPaddingModule, self).__init__()
self.input1d = torch.randn(1, 4, 50)
self.module1d = nn.ModuleList(
[
nn.ReflectionPad1d(2),
nn.ReplicationPad1d(2),
nn.ConstantPad1d(2, 3.5),
]
)
self.input2d = torch.randn(1, 4, 30, 10)
self.module2d = nn.ModuleList(
[
nn.ReflectionPad2d(2),
nn.ReplicationPad2d(2),
nn.ZeroPad2d(2),
nn.ConstantPad2d(2, 3.5),
]
)
self.input3d = torch.randn(1, 4, 10, 4, 4)
self.module3d = nn.ModuleList(
[
nn.ReflectionPad3d(1),
nn.ReplicationPad3d(3),
nn.ConstantPad3d(3, 3.5),
]
)
def forward(self):
return len((
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
))
class NNNormalizationModule(torch.nn.Module):
def __init__(self):
super(NNNormalizationModule, self).__init__()
self.input1d = torch.randn(1, 4, 50)
self.module1d = nn.ModuleList(
[
nn.BatchNorm1d(4),
nn.InstanceNorm1d(4),
]
)
self.input2d = torch.randn(1, 4, 30, 10)
self.module2d = nn.ModuleList(
[
nn.BatchNorm2d(4),
nn.GroupNorm(4, 4),
nn.InstanceNorm2d(4),
nn.LayerNorm([4, 30, 10]),
nn.LocalResponseNorm(2),
]
)
self.input3d = torch.randn(1, 4, 10, 4, 4)
self.module3d = nn.ModuleList(
[
nn.BatchNorm3d(4),
nn.InstanceNorm3d(4),
nn.ChannelShuffle(2),
]
)
def forward(self):
return len((
[module(self.input1d) for i, module in enumerate(self.module1d)],
[module(self.input2d) for i, module in enumerate(self.module2d)],
[module(self.input3d) for i, module in enumerate(self.module3d)],
))
class NNActivationModule(torch.nn.Module):
def __init__(self):
super(NNActivationModule, self).__init__()
self.activations = nn.ModuleList(
[
nn.ELU(),
nn.Hardshrink(),
nn.Hardsigmoid(),
nn.Hardtanh(),
nn.Hardswish(),
nn.LeakyReLU(),
nn.LogSigmoid(),
# nn.MultiheadAttention(),
nn.PReLU(),
nn.ReLU(),
nn.ReLU6(),
nn.RReLU(),
nn.SELU(),
nn.CELU(),
nn.GELU(),
nn.Sigmoid(),
nn.SiLU(),
nn.Mish(),
nn.Softplus(),
nn.Softshrink(),
nn.Softsign(),
nn.Tanh(),
nn.Tanhshrink(),
# nn.Threshold(0.1, 20),
nn.GLU(),
nn.Softmin(),
nn.Softmax(),
nn.Softmax2d(),
nn.LogSoftmax(),
# nn.AdaptiveLogSoftmaxWithLoss(),
]
)
def forward(self):
input = torch.randn(2, 3, 4)
return len((
[module(input) for i, module in enumerate(self.activations)],
))
class NNRecurrentModule(torch.nn.Module):
def __init__(self):
super(NNRecurrentModule, self).__init__()
self.rnn = nn.ModuleList(
[
nn.RNN(4, 8, 2),
nn.RNNCell(4, 8),
]
)
self.gru = nn.ModuleList([nn.GRU(4, 8, 2), nn.GRUCell(4, 8)])
self.lstm = nn.ModuleList(
[
nn.LSTM(4, 8, 2),
nn.LSTMCell(4, 8),
]
)
def forward(self):
input = torch.randn(5, 3, 4)
h = torch.randn(2, 3, 8)
c = torch.randn(2, 3, 8)
r = self.rnn[0](input, h)
r = self.rnn[1](input[0], h[0])
r = self.gru[0](input, h)
r = self.gru[1](input[0], h[0])
r = self.lstm[0](input, (h, c))
r = self.lstm[1](input[0], (h[0], c[0]))
return len(r)
class NNTransformerModule(torch.nn.Module):
def __init__(self):
super(NNTransformerModule, self).__init__()
self.transformers = nn.ModuleList(
[
nn.Transformer(
d_model=2, nhead=2, num_encoder_layers=1, num_decoder_layers=1
),
nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=2, nhead=2), num_layers=1
),
nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=2, nhead=2), num_layers=1
),
]
)
def forward(self):
input = torch.rand(1, 16, 2)
tgt = torch.rand((1, 16, 2))
r = self.transformers[0](input, tgt)
r = self.transformers[1](input)
r = self.transformers[2](input, tgt)
return len(r)
class NNLinearModule(torch.nn.Module):
def __init__(self):
super(NNLinearModule, self).__init__()
self.linears = nn.ModuleList(
[
nn.Identity(54),
nn.Linear(20, 20),
nn.Bilinear(20, 20, 40),
# nn.LazyLinear(20, 30),
]
)
def forward(self):
input = torch.randn(32, 20)
r = self.linears[0](input)
r = self.linears[1](input)
r = self.linears[2](input, input)
return len(r)
class NNDropoutModule(torch.nn.Module):
def __init__(self):
super(NNDropoutModule, self).__init__()
def forward(self):
a = torch.randn(8, 4)
b = torch.randn(8, 4, 4, 4)
c = torch.randn(8, 4, 4, 4, 4)
return len(
F.dropout(a),
F.dropout2d(b),
F.dropout3d(c),
F.alpha_dropout(a),
F.feature_alpha_dropout(c),
)
class NNSparseModule(torch.nn.Module):
def __init__(self):
super(NNSparseModule, self).__init__()
def forward(self):
input = torch.tensor([[1, 2, 4, 5], [4, 3, 2, 9]])
input2 = torch.tensor([1, 2, 4, 5, 4, 3, 2, 9])
embedding_matrix = torch.rand(10, 3)
offsets = torch.tensor([0, 4])
return len(
F.embedding(input, embedding_matrix),
F.embedding_bag(input2, embedding_matrix, offsets),
F.one_hot(torch.arange(0, 5) % 3, num_classes=5),
)
class NNDistanceModule(torch.nn.Module):
def __init__(self):
super(NNDistanceModule, self).__init__()
def forward(self):
a = torch.randn(8, 4)
b = torch.randn(8, 4)
return len(
F.pairwise_distance(a, b),
F.cosine_similarity(a, b),
F.pdist(a),
)
class NNLossFunctionModule(torch.nn.Module):
def __init__(self):
super(NNLossFunctionModule, self).__init__()
self.x = torch.FloatTensor([[0.1, 0.2, 0.4, 0.8]])
self.y = torch.LongTensor([[3, 0, -1, 1]])
def forward(self):
a = torch.randn(3, 2)
b = torch.rand(3, 2)
c = torch.rand(3)
log_probs = torch.randn(50, 16, 20).log_softmax(2).detach()
targets = torch.randint(1, 20, (16, 30), dtype=torch.long)
input_lengths = torch.full((16,), 50, dtype=torch.long)
target_lengths = torch.randint(10, 30, (16,), dtype=torch.long)
return len(
F.binary_cross_entropy(torch.sigmoid(a), b),
F.binary_cross_entropy_with_logits(torch.sigmoid(a), b),
F.poisson_nll_loss(a, b),
F.cosine_embedding_loss(a, b, c),
F.cross_entropy(a, b),
F.ctc_loss(log_probs, targets, input_lengths, target_lengths),
# F.gaussian_nll_loss(a, b, torch.ones(5, 1)), # ENTER is not supported in mobile module
F.hinge_embedding_loss(a, b),
F.kl_div(a, b),
F.l1_loss(a, b),
F.mse_loss(a, b),
F.margin_ranking_loss(c, c, c),
F.multilabel_margin_loss(self.x, self.y),
F.multilabel_soft_margin_loss(self.x, self.y),
F.multi_margin_loss(self.x, torch.tensor([3])),
F.nll_loss(a, torch.tensor([1, 0, 1])),
F.huber_loss(a, b),
F.smooth_l1_loss(a, b),
F.soft_margin_loss(a, b),
F.triplet_margin_loss(a, b, -b),
# F.triplet_margin_with_distance_loss(a, b, -b), # can't take variable number of arguments
)
class NNVisionModule(torch.nn.Module):
def __init__(self):
super(NNVisionModule, self).__init__()
self.input = torch.randn(1, 4, 9, 9)
self.vision_modules = nn.ModuleList(
[
nn.PixelShuffle(2),
nn.PixelUnshuffle(3),
nn.Upsample(scale_factor=2, mode="nearest"),
nn.Upsample(scale_factor=2, mode="bilinear"),
nn.Upsample(scale_factor=2, mode="bicubic"),
nn.UpsamplingNearest2d(scale_factor=2),
nn.UpsamplingBilinear2d(scale_factor=2),
]
)
self.linear_sample = nn.Upsample(scale_factor=2, mode="linear")
self.trilinear_sample = nn.Upsample(scale_factor=2, mode="trilinear")
def forward(self):
input = torch.randn(1, 3, 16, 16)
for i, module in enumerate(self.vision_modules):
r = module(self.input)
return len(
r,
self.linear_sample(torch.randn(4, 9, 9)),
self.trilinear_sample(torch.randn(1, 3, 4, 9, 9)),
F.grid_sample(input, torch.ones(1, 4, 4, 2)),
)
class NNShuffleModule(torch.nn.Module):
def __init__(self):
super(NNShuffleModule, self).__init__()
self.shuffle = nn.ChannelShuffle(2)
def forward(self):
return len(self.shuffle(torch.randn(1, 4, 2, 2)),)
class NNUtilsModule(torch.nn.Module):
def __init__(self):
super(NNUtilsModule, self).__init__()
self.flatten = nn.Sequential(
nn.Linear(50, 50),
nn.Unflatten(1, (2, 5, 5))
)
def forward(self):
a = [torch.tensor([1, 2, 3]), torch.tensor([3, 4])]
b = nn.utils.rnn.pad_sequence(a, batch_first=True)
# c = nn.utils.rnn.pack_padded_sequence(b, batch_first=True, lengths=torch.tensor([3, 2]))
input = torch.randn(2, 50)
return len(
self.flatten(input),
b,
)
|
pytorch-master
|
test/mobile/model_test/nn_ops.py
|
import torch
import torchvision
from torch.utils.bundled_inputs import augment_model_with_bundled_inputs
from torch.utils.mobile_optimizer import optimize_for_mobile
class MobileNetV2Module:
def __init__(self):
super(MobileNetV2Module, self).__init__()
def getModule(self):
model = torchvision.models.mobilenet_v2(pretrained=True)
model.eval()
example = torch.zeros(1, 3, 224, 224)
traced_script_module = torch.jit.trace(model, example)
optimized_module = optimize_for_mobile(traced_script_module)
augment_model_with_bundled_inputs(
optimized_module,
[
(example, ),
],
)
optimized_module(example)
return optimized_module
|
pytorch-master
|
test/mobile/model_test/torchvision_models.py
|
import torch
# https://pytorch.org/docs/stable/torch.html#random-sampling
class SamplingOpsModule(torch.nn.Module):
def __init__(self):
super(SamplingOpsModule, self).__init__()
def forward(self):
a = torch.empty(3, 3).uniform_(0.0, 1.0)
size = (1, 4)
weights = torch.tensor([0, 10, 3, 0], dtype=torch.float)
return len(
# torch.seed(),
# torch.manual_seed(0),
torch.bernoulli(a),
# torch.initial_seed(),
torch.multinomial(weights, 2),
torch.normal(2.0, 3.0, size),
torch.poisson(a),
torch.rand(2, 3),
torch.rand_like(a),
torch.randint(10, size),
torch.randint_like(a, 4),
torch.rand(4),
torch.randn_like(a),
torch.randperm(4),
a.bernoulli_(),
a.cauchy_(),
a.exponential_(),
a.geometric_(0.5),
a.log_normal_(),
a.normal_(),
a.random_(),
a.uniform_(),
)
|
pytorch-master
|
test/mobile/model_test/sampling_ops.py
|
import torch
import torch.nn as nn
class GeneralQuantModule(torch.nn.Module):
def __init__(self):
super(GeneralQuantModule, self).__init__()
self.embedding = torch.nn.quantized.Embedding(
num_embeddings=10, embedding_dim=12
)
self.embedding_input = torch.tensor([9, 6, 5, 7, 8, 8, 9, 2, 8])
self.func = torch.nn.quantized.QFunctional()
self.conv1 = torch.nn.quantized.ConvTranspose1d(16, 33, 3, stride=2)
self.conv2 = torch.nn.quantized.ConvTranspose2d(16, 33, 3, stride=2)
self.conv3 = torch.nn.quantized.ConvTranspose3d(16, 33, 3, stride=2)
def forward(self):
a = torch.quantize_per_tensor(torch.tensor([3.0]), 1.0, 0, torch.qint32)
b = torch.quantize_per_tensor(torch.tensor(4.0), 1.0, 0, torch.qint32)
c = torch.quantize_per_tensor(
torch.tensor([3.0]), torch.tensor(1.0), torch.tensor(0), torch.qint32
)
input1 = torch.randn(1, 16, 4)
input2 = torch.randn(1, 16, 4, 4)
input3 = torch.randn(1, 16, 4, 4, 4)
return len(
self.func.add(a, b),
self.func.cat((a, a), 0),
self.func.mul(a, b),
self.func.add_relu(a, b),
self.func.add_scalar(a, b),
self.func.mul_scalar(a, b),
self.embedding(self.embedding_input),
self.conv1(
torch.quantize_per_tensor(
input1, scale=1.0, zero_point=0, dtype=torch.quint8
)
),
self.conv2(
torch.quantize_per_tensor(
input2, scale=1.0, zero_point=0, dtype=torch.quint8
)
),
c,
# self.conv3(torch.quantize_per_tensor(input3, scale=1.0, zero_point=0, dtype=torch.quint8)), # failed on iOS
)
class DynamicQuantModule:
def __init__(self):
super(DynamicQuantModule, self).__init__()
self.module = self.M()
def getModule(self):
return torch.quantization.quantize_dynamic(self.module, dtype=torch.qint8)
class M(torch.nn.Module):
def __init__(self):
super(DynamicQuantModule.M, self).__init__()
self.rnn = nn.RNN(4, 8, 2)
self.rnncell = nn.RNNCell(4, 8)
self.gru = nn.GRU(4, 8, 2)
self.grucell = nn.GRUCell(4, 8)
self.lstm = nn.LSTM(4, 8, 2)
self.lstmcell = nn.LSTMCell(4, 8)
self.linears = nn.ModuleList(
[
nn.Identity(54),
nn.Linear(20, 20),
nn.Bilinear(20, 20, 40),
]
)
self.transformers = nn.ModuleList(
[
nn.Transformer(
d_model=2, nhead=2, num_encoder_layers=1, num_decoder_layers=1
),
nn.TransformerEncoder(
nn.TransformerEncoderLayer(d_model=2, nhead=2), num_layers=1
),
nn.TransformerDecoder(
nn.TransformerDecoderLayer(d_model=2, nhead=2), num_layers=1
),
]
)
# self.a = torch.nn.utils.rnn.pad_sequence([torch.tensor([1,2,3]), torch.tensor([3,4])], batch_first=True)
def forward(self):
input = torch.randn(5, 3, 4)
h = torch.randn(2, 3, 8)
c = torch.randn(2, 3, 8)
linear_input = torch.randn(32, 20)
trans_input = torch.randn(1, 16, 2)
tgt = torch.rand(1, 16, 2)
return len((
self.rnn(input, h),
self.rnncell(input[0], h[0]),
self.gru(input, h),
self.grucell(input[0], h[0]),
self.lstm(input, (h, c)),
# self.lstm(torch.nn.utils.rnn.pack_padded_sequence(self.a, lengths=torch.tensor([3,2,1])), (h, c)),
self.lstmcell(input[0], (h[0], c[0])),
self.transformers[0](trans_input, tgt),
self.transformers[1](trans_input),
self.transformers[2](trans_input, tgt),
self.linears[0](linear_input),
self.linears[1](linear_input),
self.linears[2](linear_input, linear_input),
))
class StaticQuantModule:
def __init__(self):
super(StaticQuantModule, self).__init__()
def getModule(self):
model_fp32 = self.M()
model_fp32.eval()
model_fp32.qconfig = torch.quantization.get_default_qconfig("qnnpack")
model_fp32_prepared = torch.quantization.prepare(model_fp32)
model_int8 = torch.quantization.convert(model_fp32_prepared)
return model_int8
class M(torch.nn.Module):
def __init__(self):
super(StaticQuantModule.M, self).__init__()
self.quant = torch.quantization.QuantStub()
self.input1d = torch.randn(4, 2, 2)
self.input2d = torch.randn((4, 2, 4, 4))
self.input3d = torch.randn(4, 2, 2, 4, 4)
self.linear_input = torch.randn(32, 20)
self.layer1 = nn.Sequential(
nn.Conv1d(2, 2, 1), nn.InstanceNorm1d(1), nn.Hardswish()
)
self.layer2 = nn.Sequential(
nn.Conv2d(2, 2, 1),
nn.BatchNorm2d(2),
nn.InstanceNorm2d(1),
nn.LeakyReLU(),
)
self.layer3 = nn.Sequential(
nn.Conv3d(2, 2, 1), nn.BatchNorm3d(2), nn.InstanceNorm3d(1), nn.ReLU()
)
self.layer4 = nn.Sequential(nn.Linear(4, 3))
self.dequant = torch.quantization.DeQuantStub()
def forward(self):
x = self.quant(self.input1d)
x = self.layer1(x)
x = self.dequant(x)
y = self.input2d
y = self.quant(y)
y = self.layer2(y)
y = self.layer4(y)
y = self.dequant(y)
z = self.quant(self.input3d)
z = self.layer3(z)
z = self.dequant(z)
return (x, y, z)
class FusedQuantModule:
def __init__(self):
super(FusedQuantModule, self).__init__()
def getModule(self):
model_fp32 = self.M()
model_fp32.eval()
model_fp32.qconfig = torch.quantization.get_default_qconfig("qnnpack")
model_fp32_fused = torch.quantization.fuse_modules(
model_fp32,
[
["conv1d", "relu1"],
["conv2d", "relu2"],
["conv3d", "relu3"],
["linear", "relu4"],
],
)
model_fp32_prepared = torch.quantization.prepare(model_fp32_fused)
model_int8 = torch.quantization.convert(model_fp32_prepared)
return model_int8
class M(torch.nn.Module):
def __init__(self):
super(FusedQuantModule.M, self).__init__()
self.quant = torch.quantization.QuantStub()
self.input1d = torch.randn(4, 2, 2)
self.input2d = torch.randn((4, 2, 4, 4))
self.input3d = torch.randn(4, 2, 2, 4, 4)
self.conv1d = nn.Conv1d(2, 2, 1)
self.conv2d = nn.Conv2d(2, 2, 1)
self.conv3d = nn.Conv3d(2, 2, 1)
self.linear = nn.Linear(4, 2)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
self.relu3 = nn.ReLU()
self.relu4 = nn.ReLU()
self.dequant = torch.quantization.DeQuantStub()
def forward(self):
x = self.input1d
y = self.input2d
z = self.input3d
x = self.quant(x)
x = self.conv1d(x)
x = self.relu1(x)
x = self.dequant(x)
y = self.quant(y)
y = self.conv2d(y)
y = self.relu2(y)
y = self.dequant(y)
z = self.quant(z)
z = self.conv3d(z)
z = self.relu3(z)
z = self.linear(z)
z = self.relu4(z)
z = self.dequant(z)
return (x, y, z)
|
pytorch-master
|
test/mobile/model_test/quantization_ops.py
|
# https://pytorch.org/docs/stable/torch.html#math-operations
import math
import torch
class PointwiseOpsModule(torch.nn.Module):
def __init__(self):
super(PointwiseOpsModule, self).__init__()
def forward(self):
return self.pointwise_ops()
def pointwise_ops(self):
a = torch.randn(4)
b = torch.randn(4)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
r = torch.tensor([0, 1, 10, 0], dtype=torch.int8)
t = torch.tensor([-1, -2, 3], dtype=torch.int8)
s = torch.tensor([4, 0, 1, 0], dtype=torch.int8)
f = torch.zeros(3)
g = torch.tensor([-1, 0, 1])
w = torch.tensor([0.3810, 1.2774, -0.2972, -0.3719, 0.4637])
return len(
torch.abs(torch.tensor([-1, -2, 3])),
torch.absolute(torch.tensor([-1, -2, 3])),
torch.acos(a),
torch.arccos(a),
torch.acosh(a.uniform_(1.0, 2.0)),
torch.add(a, 20),
torch.add(a, b, out=a),
b.add(a),
b.add(a, out=b),
b.add_(a),
b.add(1),
torch.add(a, torch.randn(4, 1), alpha=10),
torch.addcdiv(
torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1
),
torch.addcmul(
torch.randn(1, 3), torch.randn(3, 1), torch.randn(1, 3), value=0.1
),
torch.angle(a),
torch.asin(a),
torch.arcsin(a),
torch.asinh(a),
torch.arcsinh(a),
torch.atan(a),
torch.arctan(a),
torch.atanh(a.uniform_(-1.0, 1.0)),
torch.arctanh(a.uniform_(-1.0, 1.0)),
torch.atan2(a, a),
torch.bitwise_not(t),
torch.bitwise_and(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.bitwise_or(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.bitwise_xor(t, torch.tensor([1, 0, 3], dtype=torch.int8)),
torch.ceil(a),
torch.ceil(float(torch.tensor(0.5))),
torch.ceil(torch.tensor(0.5).item()),
torch.clamp(a, min=-0.5, max=0.5),
torch.clamp(a, min=0.5),
torch.clamp(a, max=0.5),
torch.clip(a, min=-0.5, max=0.5),
torch.conj(a),
torch.copysign(a, 1),
torch.copysign(a, b),
torch.cos(a),
torch.cosh(a),
torch.deg2rad(
torch.tensor([[180.0, -180.0], [360.0, -360.0], [90.0, -90.0]])
),
torch.div(a, b),
a.div(b),
a.div(1),
a.div_(b),
torch.divide(a, b, rounding_mode="trunc"),
torch.divide(a, b, rounding_mode="floor"),
torch.digamma(torch.tensor([1.0, 0.5])),
torch.erf(torch.tensor([0.0, -1.0, 10.0])),
torch.erfc(torch.tensor([0.0, -1.0, 10.0])),
torch.erfinv(torch.tensor([0.0, 0.5, -1.0])),
torch.exp(torch.tensor([0.0, math.log(2.0)])),
torch.exp(float(torch.tensor(1))),
torch.exp2(torch.tensor([0.0, math.log(2.0), 3.0, 4.0])),
torch.expm1(torch.tensor([0.0, math.log(2.0)])),
torch.fake_quantize_per_channel_affine(
torch.randn(2, 2, 2),
(torch.randn(2) + 1) * 0.05,
torch.zeros(2),
1,
0,
255,
),
torch.fake_quantize_per_tensor_affine(a, 0.1, 0, 0, 255),
torch.float_power(torch.randint(10, (4,)), 2),
torch.float_power(torch.arange(1, 5), torch.tensor([2, -3, 4, -5])),
torch.floor(a),
torch.floor(float(torch.tensor(1))),
torch.floor_divide(torch.tensor([4.0, 3.0]), torch.tensor([2.0, 2.0])),
torch.floor_divide(torch.tensor([4.0, 3.0]), 1.4),
torch.fmod(torch.tensor([-3, -2, -1, 1, 2, 3]), 2),
torch.fmod(torch.tensor([1, 2, 3, 4, 5]), 1.5),
torch.frac(torch.tensor([1.0, 2.5, -3.2])),
torch.randn(4, dtype=torch.cfloat).imag,
torch.ldexp(torch.tensor([1.0]), torch.tensor([1])),
torch.ldexp(torch.tensor([1.0]), torch.tensor([1, 2, 3, 4])),
torch.lerp(torch.arange(1.0, 5.0), torch.empty(4).fill_(10), 0.5),
torch.lerp(
torch.arange(1.0, 5.0),
torch.empty(4).fill_(10),
torch.full_like(torch.arange(1.0, 5.0), 0.5),
),
torch.lgamma(torch.arange(0.5, 2, 0.5)),
torch.log(torch.arange(5) + 10),
torch.log10(torch.rand(5)),
torch.log1p(torch.randn(5)),
torch.log2(torch.rand(5)),
torch.logaddexp(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
torch.logaddexp(
torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp(
torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp2(torch.tensor([-1.0]), torch.tensor([-1, -2, -3])),
torch.logaddexp2(
torch.tensor([-100.0, -200.0, -300.0]), torch.tensor([-1, -2, -3])
),
torch.logaddexp2(
torch.tensor([1.0, 2000.0, 30000.0]), torch.tensor([-1, -2, -3])
),
torch.logical_and(r, s),
torch.logical_and(r.double(), s.double()),
torch.logical_and(r.double(), s),
torch.logical_and(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logical_not(torch.tensor([0, 1, -10], dtype=torch.int8)),
torch.logical_not(torch.tensor([0.0, 1.5, -10.0], dtype=torch.double)),
torch.logical_not(
torch.tensor([0.0, 1.0, -10.0], dtype=torch.double),
out=torch.empty(3, dtype=torch.int16),
),
torch.logical_or(r, s),
torch.logical_or(r.double(), s.double()),
torch.logical_or(r.double(), s),
torch.logical_or(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logical_xor(r, s),
torch.logical_xor(r.double(), s.double()),
torch.logical_xor(r.double(), s),
torch.logical_xor(r, s, out=torch.empty(4, dtype=torch.bool)),
torch.logit(torch.rand(5), eps=1e-6),
torch.hypot(torch.tensor([4.0]), torch.tensor([3.0, 4.0, 5.0])),
torch.i0(torch.arange(5, dtype=torch.float32)),
torch.igamma(a, b),
torch.igammac(a, b),
torch.mul(torch.randn(3), 100),
b.mul(a),
b.mul(5),
b.mul(a, out=b),
b.mul_(a),
b.mul_(5),
torch.multiply(torch.randn(4, 1), torch.randn(1, 4)),
torch.mvlgamma(torch.empty(2, 3).uniform_(1.0, 2.0), 2),
torch.tensor([float("nan"), float("inf"), -float("inf"), 3.14]),
torch.nan_to_num(w),
torch.nan_to_num_(w),
torch.nan_to_num(w, nan=2.0),
torch.nan_to_num(w, nan=2.0, posinf=1.0),
torch.neg(torch.randn(5)),
# torch.nextafter(torch.tensor([1, 2]), torch.tensor([2, 1])) == torch.tensor([eps + 1, 2 - eps]),
torch.polygamma(1, torch.tensor([1.0, 0.5])),
torch.polygamma(2, torch.tensor([1.0, 0.5])),
torch.polygamma(3, torch.tensor([1.0, 0.5])),
torch.polygamma(4, torch.tensor([1.0, 0.5])),
torch.pow(a, 2),
torch.pow(2, float(torch.tensor(0.5))),
torch.pow(torch.arange(1.0, 5.0), torch.arange(1.0, 5.0)),
torch.rad2deg(
torch.tensor([[3.142, -3.142], [6.283, -6.283], [1.570, -1.570]])
),
torch.randn(4, dtype=torch.cfloat).real,
torch.reciprocal(a),
torch.remainder(torch.tensor([-3.0, -2.0]), 2),
torch.remainder(torch.tensor([1, 2, 3, 4, 5]), 1.5),
torch.round(a),
torch.round(torch.tensor(0.5).item()),
torch.rsqrt(a),
torch.sigmoid(a),
torch.sign(torch.tensor([0.7, -1.2, 0.0, 2.3])),
torch.sgn(a),
torch.signbit(torch.tensor([0.7, -1.2, 0.0, 2.3])),
torch.sin(a),
torch.sinc(a),
torch.sinh(a),
torch.sqrt(a),
torch.square(a),
torch.sub(torch.tensor((1, 2)), torch.tensor((0, 1)), alpha=2),
b.sub(a),
b.sub_(a),
b.sub(5),
torch.sum(5),
torch.tan(a),
torch.tanh(a),
torch.true_divide(a, a),
torch.trunc(a),
torch.trunc_(a),
torch.xlogy(f, g),
torch.xlogy(f, g),
torch.xlogy(f, 4),
torch.xlogy(2, g),
)
class ReductionOpsModule(torch.nn.Module):
def __init__(self):
super(ReductionOpsModule, self).__init__()
def forward(self):
return self.reduction_ops()
def reduction_ops(self):
a = torch.randn(4)
b = torch.randn(4)
c = torch.tensor(0.5)
return len(
torch.argmax(a),
torch.argmin(a),
torch.amax(a),
torch.amin(a),
torch.aminmax(a),
torch.all(a),
torch.any(a),
torch.max(a),
a.max(a),
torch.max(a, 0),
torch.min(a),
a.min(a),
torch.min(a, 0),
torch.dist(a, b),
torch.logsumexp(a, 0),
torch.mean(a),
torch.mean(a, 0),
torch.nanmean(a),
torch.median(a),
torch.nanmedian(a),
torch.mode(a),
torch.norm(a),
a.norm(2),
torch.norm(a, dim=0),
torch.norm(c, torch.tensor(2)),
torch.nansum(a),
torch.prod(a),
torch.quantile(a, torch.tensor([0.25, 0.5, 0.75])),
torch.quantile(a, 0.5),
torch.nanquantile(a, torch.tensor([0.25, 0.5, 0.75])),
torch.std(a),
torch.std_mean(a),
torch.sum(a),
torch.unique(a),
torch.unique_consecutive(a),
torch.var(a),
torch.var_mean(a),
torch.count_nonzero(a),
)
class ComparisonOpsModule(torch.nn.Module):
def __init__(self):
super(ComparisonOpsModule, self).__init__()
def forward(self):
a = torch.tensor(0)
b = torch.tensor(1)
return len(
torch.allclose(a, b),
torch.argsort(a),
torch.eq(a, b),
torch.eq(a, 1),
torch.equal(a, b),
torch.ge(a, b),
torch.ge(a, 1),
torch.greater_equal(a, b),
torch.greater_equal(a, 1),
torch.gt(a, b),
torch.gt(a, 1),
torch.greater(a, b),
torch.isclose(a, b),
torch.isfinite(a),
torch.isin(a, b),
torch.isinf(a),
torch.isposinf(a),
torch.isneginf(a),
torch.isnan(a),
torch.isreal(a),
torch.kthvalue(a, 1),
torch.le(a, b),
torch.le(a, 1),
torch.less_equal(a, b),
torch.lt(a, b),
torch.lt(a, 1),
torch.less(a, b),
torch.maximum(a, b),
torch.minimum(a, b),
torch.fmax(a, b),
torch.fmin(a, b),
torch.ne(a, b),
torch.ne(a, 1),
torch.not_equal(a, b),
torch.sort(a),
torch.topk(a, 1),
torch.msort(a),
)
class OtherMathOpsModule(torch.nn.Module):
def __init__(self):
super(OtherMathOpsModule, self).__init__()
def forward(self):
return self.other_ops()
def other_ops(self):
a = torch.randn(4)
b = torch.randn(4)
c = torch.randint(0, 8, (5,), dtype=torch.int64)
e = torch.randn(4, 3)
f = torch.randn(4, 4, 4)
size = [0, 1]
dims = [0, 1]
return len(
torch.atleast_1d(a),
torch.atleast_2d(a),
torch.atleast_3d(a),
torch.bincount(c),
torch.block_diag(a),
torch.broadcast_tensors(a),
torch.broadcast_to(a, (4)),
# torch.broadcast_shapes(a),
torch.bucketize(a, b),
torch.cartesian_prod(a),
torch.cdist(e, e),
torch.clone(a),
torch.combinations(a),
torch.corrcoef(a),
# torch.cov(a),
torch.cross(e, e),
torch.cummax(a, 0),
torch.cummin(a, 0),
torch.cumprod(a, 0),
torch.cumsum(a, 0),
torch.diag(a),
torch.diag_embed(a),
torch.diagflat(a),
torch.diagonal(e),
torch.diff(a),
torch.einsum("iii", f),
torch.flatten(a),
torch.flip(e, dims),
torch.fliplr(e),
torch.flipud(e),
torch.kron(a, b),
torch.rot90(e),
torch.gcd(c, c),
torch.histc(a),
torch.histogram(a),
torch.meshgrid(a),
torch.meshgrid(a, indexing="xy"),
torch.lcm(c, c),
torch.logcumsumexp(a, 0),
torch.ravel(a),
torch.renorm(e, 1, 0, 5),
torch.repeat_interleave(c),
torch.roll(a, 1, 0),
torch.searchsorted(a, b),
torch.tensordot(e, e),
torch.trace(e),
torch.tril(e),
torch.tril_indices(3, 3),
torch.triu(e),
torch.triu_indices(3, 3),
torch.vander(a),
torch.view_as_real(torch.randn(4, dtype=torch.cfloat)),
torch.view_as_complex(torch.randn(4, 2)).real,
torch.resolve_conj(a),
torch.resolve_neg(a),
)
class SpectralOpsModule(torch.nn.Module):
def __init__(self):
super(SpectralOpsModule, self).__init__()
def forward(self):
return self.spectral_ops()
def spectral_ops(self):
a = torch.randn(10)
b = torch.randn(10, 8, 4, 2)
return len(
torch.stft(a, 8),
torch.stft(a, torch.tensor(8)),
torch.istft(b, 8),
torch.bartlett_window(2, dtype=torch.float),
torch.blackman_window(2, dtype=torch.float),
torch.hamming_window(4, dtype=torch.float),
torch.hann_window(4, dtype=torch.float),
torch.kaiser_window(4, dtype=torch.float),
)
class BlasLapackOpsModule(torch.nn.Module):
def __init__(self):
super(BlasLapackOpsModule, self).__init__()
def forward(self):
return self.blas_lapack_ops()
def blas_lapack_ops(self):
m = torch.randn(3, 3)
a = torch.randn(10, 3, 4)
b = torch.randn(10, 4, 3)
v = torch.randn(3)
return len(
torch.addbmm(m, a, b),
torch.addmm(torch.randn(2, 3), torch.randn(2, 3), torch.randn(3, 3)),
torch.addmv(torch.randn(2), torch.randn(2, 3), torch.randn(3)),
torch.addr(torch.zeros(3, 3), v, v),
torch.baddbmm(m, a, b),
torch.bmm(a, b),
torch.chain_matmul(torch.randn(3, 3), torch.randn(3, 3), torch.randn(3, 3)),
# torch.cholesky(a), # deprecated
# torch.cholesky_inverse(torch.randn(3, 3)), # had some error
# torch.cholesky_solve(torch.randn(3, 3), torch.randn(3, 3)),
torch.dot(v, v),
# torch.linalg.eig(m), # not build with lapack
# torch.geqrf(a),
torch.ger(v, v),
torch.inner(m, m),
# torch.inverse(m),
# torch.det(m),
# torch.logdet(m),
# torch.slogdet(m),
# torch.lstsq(m, m),
# torch.linalg.lu_factor(m),
# torch.lu_solve(m, *torch.linalg.lu_factor(m)),
# torch.lu_unpack(*torch.linalg.lu_factor(m)),
torch.matmul(m, m),
torch.matrix_power(m, 2),
# torch.matrix_rank(m),
torch.matrix_exp(m),
torch.mm(m, m),
torch.mv(m, v),
# torch.orgqr(a, m),
# torch.ormqr(a, m, v),
torch.outer(v, v),
# torch.pinverse(m),
# torch.qr(a),
# torch.solve(m, m),
# torch.svd(a),
# torch.svd_lowrank(a),
# torch.pca_lowrank(a),
# torch.symeig(a), # deprecated
# torch.lobpcg(a, b), # not supported
torch.trapz(m, m),
torch.trapezoid(m, m),
torch.cumulative_trapezoid(m, m),
# torch.triangular_solve(m, m),
torch.vdot(v, v),
)
|
pytorch-master
|
test/mobile/model_test/math_ops.py
|
import io
import sys
import torch
import yaml
from android_api_module import AndroidAPIModule
from builtin_ops import (
TSBuiltinOpsModule,
TSCollectionOpsModule,
)
from math_ops import (
PointwiseOpsModule,
ReductionOpsModule,
ComparisonOpsModule,
OtherMathOpsModule,
SpectralOpsModule,
BlasLapackOpsModule,
)
from nn_ops import (
NNConvolutionModule,
NNPoolingModule,
NNPaddingModule,
NNNormalizationModule,
NNActivationModule,
NNRecurrentModule,
NNTransformerModule,
NNLinearModule,
NNDropoutModule,
NNSparseModule,
NNDistanceModule,
NNLossFunctionModule,
NNVisionModule,
NNShuffleModule,
NNUtilsModule,
)
from quantization_ops import (
GeneralQuantModule,
# DynamicQuantModule,
StaticQuantModule,
FusedQuantModule,
)
from sampling_ops import SamplingOpsModule
from tensor_ops import (
TensorOpsModule,
TensorCreationOpsModule,
TensorIndexingOpsModule,
TensorTypingOpsModule,
TensorViewOpsModule,
)
from torch.jit.mobile import _load_for_lite_interpreter
from torchvision_models import MobileNetV2Module
test_path_ios = "ios/TestApp/models/"
test_path_android = "android/pytorch_android/src/androidTest/assets/"
production_ops_path = "test/mobile/model_test/model_ops.yaml"
coverage_out_path = "test/mobile/model_test/coverage.yaml"
all_modules = {
# math ops
"pointwise_ops": PointwiseOpsModule(),
"reduction_ops": ReductionOpsModule(),
"comparison_ops": ComparisonOpsModule(),
"spectral_ops": SpectralOpsModule(),
"other_math_ops": OtherMathOpsModule(),
"blas_lapack_ops": BlasLapackOpsModule(),
# sampling
"sampling_ops": SamplingOpsModule(),
# tensor ops
"tensor_general_ops": TensorOpsModule(),
"tensor_creation_ops": TensorCreationOpsModule(),
"tensor_indexing_ops": TensorIndexingOpsModule(),
"tensor_typing_ops": TensorTypingOpsModule(),
"tensor_view_ops": TensorViewOpsModule(),
# nn ops
"convolution_ops": NNConvolutionModule(),
"pooling_ops": NNPoolingModule(),
"padding_ops": NNPaddingModule(),
"activation_ops": NNActivationModule(),
"normalization_ops": NNNormalizationModule(),
"recurrent_ops": NNRecurrentModule(),
"transformer_ops": NNTransformerModule(),
"linear_ops": NNLinearModule(),
"dropout_ops": NNDropoutModule(),
"sparse_ops": NNSparseModule(),
"distance_function_ops": NNDistanceModule(),
"loss_function_ops": NNLossFunctionModule(),
"vision_function_ops": NNVisionModule(),
"shuffle_ops": NNShuffleModule(),
"nn_utils_ops": NNUtilsModule(),
# quantization ops
"general_quant_ops": GeneralQuantModule(),
# TODO(sdym@fb.com): fix and re-enable dynamic_quant_ops
# "dynamic_quant_ops": DynamicQuantModule(),
"static_quant_ops": StaticQuantModule(),
"fused_quant_ops": FusedQuantModule(),
# TorchScript buildin ops
"torchscript_builtin_ops": TSBuiltinOpsModule(),
"torchscript_collection_ops": TSCollectionOpsModule(),
# vision
"mobilenet_v2": MobileNetV2Module(),
# android api module
"android_api_module": AndroidAPIModule(),
}
models_need_trace = [
"static_quant_ops",
]
def calcOpsCoverage(ops):
with open(production_ops_path) as input_yaml_file:
production_ops_dict = yaml.safe_load(input_yaml_file)
production_ops = set(production_ops_dict["root_operators"].keys())
all_generated_ops = set(ops)
covered_ops = production_ops.intersection(all_generated_ops)
uncovered_ops = production_ops - covered_ops
coverage = round(100 * len(covered_ops) / len(production_ops), 2)
# weighted coverage (take op occurances into account)
total_occurances = sum(production_ops_dict["root_operators"].values())
covered_ops_dict = {op: production_ops_dict["root_operators"][op] for op in covered_ops}
uncovered_ops_dict = {op: production_ops_dict["root_operators"][op] for op in uncovered_ops}
covered_occurances = sum(covered_ops_dict.values())
occurances_coverage = round(100 * covered_occurances / total_occurances, 2)
print(f"\n{len(uncovered_ops)} uncovered ops: {uncovered_ops}\n")
print(f"Generated {len(all_generated_ops)} ops")
print(f"Covered {len(covered_ops)}/{len(production_ops)} ({coverage}%) production ops")
print(f"Covered {covered_occurances}/{total_occurances} ({occurances_coverage}%) occurances")
print(f"pytorch ver {torch.__version__}\n")
with open(coverage_out_path, "w") as f:
yaml.safe_dump(
{
"_covered_ops": len(covered_ops),
"_production_ops": len(production_ops),
"_generated_ops": len(all_generated_ops),
"_uncovered_ops": len(uncovered_ops),
"_coverage": round(coverage, 2),
"uncovered_ops": uncovered_ops_dict,
"covered_ops": covered_ops_dict,
"all_generated_ops": sorted(list(all_generated_ops)),
},
f,
)
def getModuleFromName(model_name):
if model_name not in all_modules:
print("Cannot find test model for " + model_name)
return None, []
module = all_modules[model_name]
if not isinstance(module, torch.nn.Module):
module = module.getModule()
has_bundled_inputs = False # module.find_method("get_all_bundled_inputs")
if model_name in models_need_trace:
module = torch.jit.trace(module, [])
else:
module = torch.jit.script(module)
ops = torch.jit.export_opnames(module)
print(ops)
# try to run the model
runModule(module)
return module, ops
def runModule(module):
buffer = io.BytesIO(module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
lite_module = _load_for_lite_interpreter(buffer)
if lite_module.find_method("get_all_bundled_inputs"):
# run with the first bundled input
input = lite_module.run_method("get_all_bundled_inputs")[0]
lite_module.forward(*input)
else:
# assuming model has no input
lite_module()
# generate all models in the given folder.
# If it's "on the fly" mode, add "_temp" suffix to the model file.
def generateAllModels(folder, on_the_fly=False):
all_ops = []
for name in all_modules:
module, ops = getModuleFromName(name)
all_ops = all_ops + ops
path = folder + name + ("_temp.ptl" if on_the_fly else ".ptl")
module._save_for_lite_interpreter(path)
print("model saved to " + path)
calcOpsCoverage(all_ops)
# generate/update a given model for storage
def generateModel(name):
module, ops = getModuleFromName(name)
if module is None:
return
path_ios = test_path_ios + name + ".ptl"
path_android = test_path_android + name + ".ptl"
module._save_for_lite_interpreter(path_ios)
module._save_for_lite_interpreter(path_android)
print("model saved to " + path_ios + " and " + path_android)
def main(argv):
if argv is None or len(argv) != 1:
print(
"""
This script generate models for mobile test. For each model we have a "storage" version
and an "on-the-fly" version. The "on-the-fly" version will be generated during test,and
should not be committed to the repo.
The "storage" version is for back compatibility # test (a model generated today should
run on master branch in the next 6 months). We can use this script to update a model that
is no longer supported.
- use 'python gen_test_model.py android-test' to generate on-the-fly models for android
- use 'python gen_test_model.py ios-test' to generate on-the-fly models for ios
- use 'python gen_test_model.py android' to generate checked-in models for android
- use 'python gen_test_model.py ios' to generate on-the-fly models for ios
- use 'python gen_test_model.py <model_name_no_suffix>' to update the given storage model
"""
)
return
if argv[0] == "android":
generateAllModels(test_path_android, on_the_fly=False)
elif argv[0] == "ios":
generateAllModels(test_path_ios, on_the_fly=False)
elif argv[0] == "android-test":
generateAllModels(test_path_android, on_the_fly=True)
elif argv[0] == "ios-test":
generateAllModels(test_path_ios, on_the_fly=True)
else:
generateModel(argv[0])
if __name__ == "__main__":
main(sys.argv[1:])
|
pytorch-master
|
test/mobile/model_test/gen_test_model.py
|
pytorch-master
|
test/cpp/__init__.py
|
|
import sys
import os
import torch
class Setup(object):
def setup(self):
raise NotImplementedError()
def shutdown(self):
raise NotImplementedError()
class FileSetup(object):
path = None
def shutdown(self):
if os.path.exists(self.path):
os.remove(self.path)
pass
class EvalModeForLoadedModule(FileSetup):
path = 'dropout_model.pt'
def setup(self):
class Model(torch.jit.ScriptModule):
def __init__(self):
super(Model, self).__init__()
self.dropout = torch.nn.Dropout(0.1)
@torch.jit.script_method
def forward(self, x):
x = self.dropout(x)
return x
model = Model()
model = model.train()
model.save(self.path)
class SerializationInterop(FileSetup):
path = 'ivalue.pt'
def setup(self):
ones = torch.ones(2, 2)
twos = torch.ones(3, 5) * 2
value = (ones, twos)
torch.save(value, self.path, _use_new_zipfile_serialization=True)
# See testTorchSaveError in test/cpp/jit/tests.h for usage
class TorchSaveError(FileSetup):
path = 'eager_value.pt'
def setup(self):
ones = torch.ones(2, 2)
twos = torch.ones(3, 5) * 2
value = (ones, twos)
torch.save(value, self.path, _use_new_zipfile_serialization=False)
class TorchSaveJitStream_CUDA(FileSetup):
path = 'saved_stream_model.pt'
def setup(self):
if not torch.cuda.is_available():
return
class Model(torch.nn.Module):
def forward(self):
s = torch.cuda.Stream()
a = torch.rand(3, 4, device="cuda")
b = torch.rand(3, 4, device="cuda")
with torch.cuda.stream(s):
is_stream_s = torch.cuda.current_stream(s.device_index()).id() == s.id()
c = torch.cat((a, b), 0).to("cuda")
s.synchronize()
return is_stream_s, a, b, c
model = Model()
# Script the model and save
script_model = torch.jit.script(model)
torch.jit.save(script_model, self.path)
tests = [
EvalModeForLoadedModule(),
SerializationInterop(),
TorchSaveError(),
TorchSaveJitStream_CUDA()
]
def setup():
for test in tests:
test.setup()
def shutdown():
for test in tests:
test.shutdown()
if __name__ == "__main__":
command = sys.argv[1]
if command == "setup":
setup()
elif command == "shutdown":
shutdown()
|
pytorch-master
|
test/cpp/jit/tests_setup.py
|
pytorch-master
|
test/cpp/jit/__init__.py
|
|
"""Script to generate baseline values from PyTorch initialization algorithms"""
import sys
import torch
HEADER = """
#include <torch/types.h>
#include <vector>
namespace expected_parameters {
"""
FOOTER = "} // namespace expected_parameters"
PARAMETERS = "inline std::vector<std::vector<torch::Tensor>> {}() {{"
INITIALIZERS = {
"Xavier_Uniform": lambda w: torch.nn.init.xavier_uniform(w),
"Xavier_Normal": lambda w: torch.nn.init.xavier_normal(w),
"Kaiming_Normal": lambda w: torch.nn.init.kaiming_normal(w),
"Kaiming_Uniform": lambda w: torch.nn.init.kaiming_uniform(w)
}
def emit(initializer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
print("// @{} from {}".format('generated', __file__))
print(HEADER)
for initializer_name, weights in initializer_parameter_map.items():
print(PARAMETERS.format(initializer_name))
print(" return {")
for sample in weights:
print(" {")
for parameter in sample:
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
print(" torch::tensor({}),".format(parameter_values))
print(" },")
print(" };")
print("}\n")
print(FOOTER)
def run(initializer):
torch.manual_seed(0)
layer1 = torch.nn.Linear(7, 15)
INITIALIZERS[initializer](layer1.weight)
layer2 = torch.nn.Linear(15, 15)
INITIALIZERS[initializer](layer2.weight)
layer3 = torch.nn.Linear(15, 2)
INITIALIZERS[initializer](layer3.weight)
weight1 = layer1.weight.data.numpy()
weight2 = layer2.weight.data.numpy()
weight3 = layer3.weight.data.numpy()
return [weight1, weight2, weight3]
def main():
initializer_parameter_map = {}
for initializer in INITIALIZERS.keys():
sys.stderr.write('Evaluating {} ...\n'.format(initializer))
initializer_parameter_map[initializer] = run(initializer)
emit(initializer_parameter_map)
if __name__ == "__main__":
main()
|
pytorch-master
|
test/cpp/api/init_baseline.py
|
"""Script to generate baseline values from PyTorch optimization algorithms"""
import argparse
import math
import sys
import torch
import torch.optim
HEADER = """
#include <torch/types.h>
#include <vector>
namespace expected_parameters {
"""
FOOTER = "} // namespace expected_parameters"
PARAMETERS = "inline std::vector<std::vector<torch::Tensor>> {}() {{"
OPTIMIZERS = {
"LBFGS" : lambda p: torch.optim.LBFGS(p, 1.0),
"LBFGS_with_line_search" : lambda p: torch.optim.LBFGS(p, 1.0, line_search_fn="strong_wolfe"),
"Adam": lambda p: torch.optim.Adam(p, 1.0),
"Adam_with_weight_decay": lambda p: torch.optim.Adam(p, 1.0, weight_decay=1e-2),
"Adam_with_weight_decay_and_amsgrad": lambda p: torch.optim.Adam(p, 1.0, weight_decay=1e-6, amsgrad=True),
"AdamW": lambda p: torch.optim.AdamW(p, 1.0),
"AdamW_without_weight_decay": lambda p: torch.optim.AdamW(p, 1.0, weight_decay=0),
"AdamW_with_amsgrad": lambda p: torch.optim.AdamW(p, 1.0, amsgrad=True),
"Adagrad": lambda p: torch.optim.Adagrad(p, 1.0),
"Adagrad_with_weight_decay": lambda p: torch.optim.Adagrad(p, 1.0, weight_decay=1e-2),
"Adagrad_with_weight_decay_and_lr_decay": lambda p: torch.optim.Adagrad(p, 1.0, weight_decay=1e-6, lr_decay=1e-3),
"RMSprop": lambda p: torch.optim.RMSprop(p, 0.1),
"RMSprop_with_weight_decay": lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-2),
"RMSprop_with_weight_decay_and_centered": lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-6, centered=True),
"RMSprop_with_weight_decay_and_centered_and_momentum":
lambda p: torch.optim.RMSprop(p, 0.1, weight_decay=1e-6, centered=True, momentum=0.9),
"SGD": lambda p: torch.optim.SGD(p, 0.1),
"SGD_with_weight_decay": lambda p: torch.optim.SGD(p, 0.1, weight_decay=1e-2),
"SGD_with_weight_decay_and_momentum": lambda p: torch.optim.SGD(p, 0.1, momentum=0.9, weight_decay=1e-2),
"SGD_with_weight_decay_and_nesterov_momentum":
lambda p: torch.optim.SGD(p, 0.1, momentum=0.9, weight_decay=1e-6, nesterov=True),
}
def weight_init(module):
if isinstance(module, torch.nn.Linear):
stdev = 1.0 / math.sqrt(module.weight.size(1))
for p in module.parameters():
p.data.uniform_(-stdev, stdev)
def run(optimizer_name, iterations, sample_every):
torch.manual_seed(0)
model = torch.nn.Sequential(
torch.nn.Linear(2, 3),
torch.nn.Sigmoid(),
torch.nn.Linear(3, 1),
torch.nn.Sigmoid(),
)
model = model.to(torch.float64).apply(weight_init)
optimizer = OPTIMIZERS[optimizer_name](model.parameters())
input = torch.tensor([[0.1, 0.2], [0.3, 0.4], [0.5, 0.6]], dtype=torch.float64)
values = []
for i in range(iterations):
optimizer.zero_grad()
output = model.forward(input)
loss = output.sum()
loss.backward()
def closure():
return torch.tensor([10.])
optimizer.step(closure)
if i % sample_every == 0:
values.append(
[p.clone().flatten().data.numpy() for p in model.parameters()]
)
return values
def emit(optimizer_parameter_map):
# Don't write generated with an @ in front, else this file is recognized as generated.
print("// @{} from {}".format('generated', __file__))
print(HEADER)
for optimizer_name, parameters in optimizer_parameter_map.items():
print(PARAMETERS.format(optimizer_name))
print(" return {")
for sample in parameters:
print(" {")
for parameter in sample:
parameter_values = "{{{}}}".format(", ".join(map(str, parameter)))
print(" torch::tensor({}),".format(parameter_values))
print(" },")
print(" };")
print("}\n")
print(FOOTER)
def main():
parser = argparse.ArgumentParser(
"Produce optimization output baseline from PyTorch"
)
parser.add_argument("-i", "--iterations", default=1001, type=int)
parser.add_argument("-s", "--sample-every", default=100, type=int)
options = parser.parse_args()
optimizer_parameter_map = {}
for optimizer in OPTIMIZERS.keys():
sys.stderr.write('Evaluating {} ...\n'.format(optimizer))
optimizer_parameter_map[optimizer] = run(
optimizer, options.iterations, options.sample_every
)
emit(optimizer_parameter_map)
if __name__ == "__main__":
main()
|
pytorch-master
|
test/cpp/api/optim_baseline.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import tempfile
import random
from textwrap import dedent
import torch
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
def get_fn(file_name, script_path):
import importlib.util
spec = importlib.util.spec_from_file_location(file_name, script_path)
module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(module)
fn = module.fn
return fn
class TestPythonBuiltinOP(JitTestCase):
def test_add(self):
def func(a, b):
c = a + b
c += a
return c
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
self.checkScript(func, (a, b), optimize=True)
def test_mul(self):
def func(a, b):
return a * b
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
self.checkScript(func, (a, b), optimize=True)
def test_matmul_py3(self):
code = dedent("""
def fn(a, b):
return a @ b
""")
with tempfile.TemporaryDirectory() as tmp_dir:
script_path = os.path.join(tmp_dir, 'script.py')
with open(script_path, 'w') as f:
f.write(code)
fn = get_fn('test_matmul_py3', script_path)
a = torch.rand(4, 3, requires_grad=True)
b = torch.rand(3, 2, requires_grad=True)
self.checkScript(fn, (a, b), optimize=True)
def test_pow(self):
def func(a, b):
return a ** b
def func2(a, b, c, d):
return c + a ** b ** d
def func3(a, b):
# type: (int, float) -> float
return a ** b
def func4():
# type: () -> float
return 2 ** -2
def func5(x, y):
return x.item() ** y.item()
a = torch.rand(1, requires_grad=True)
b = torch.rand(1, requires_grad=True)
c = torch.rand(1, requires_grad=True)
d = torch.rand(1, requires_grad=True)
self.checkScript(func, (a, b), optimize=True)
self.checkScript(func2, (a, b, c, d), optimize=True)
self.checkScript(func3, (4, -0.5), optimize=True)
self.checkScript(func4, ())
inputs = [torch.tensor(2), torch.tensor(-2), torch.tensor(.5), torch.tensor(.2)]
for x in inputs:
for y in inputs:
if x < 0:
continue
else:
self.checkScript(func5, (x, y))
def test_triple(self):
def func(x):
return 3. * x
x = torch.rand(1, dtype=torch.float, requires_grad=True)
self.checkScript(func, [x], optimize=True)
def test_slice(self):
def func(x):
return x[:5]
x = torch.rand(10, dtype=torch.float, requires_grad=True)
self.checkScript(func, [x], optimize=True)
def func2(x):
return x[5:]
self.checkScript(func2, [x], optimize=True)
def func3(x):
return x[:8:2]
self.checkScript(func3, [x], optimize=True)
def func4(x):
return x[1::4]
self.checkScript(func4, [x], optimize=True)
def test_gather(self):
def func(x):
return x[0]
x = torch.rand(10, dtype=torch.float, requires_grad=True)
self.checkScript(func, [x], optimize=True)
def test_random(self):
@torch.jit.script
def f(mean, std):
return torch.normal(mean, std)
mean, std = torch.zeros(5, 5), torch.ones(5, 5)
with torch.random.fork_rng(devices=[]):
output = torch.normal(mean, std)
with torch.random.fork_rng(devices=[]):
script_output = f(mean, std)
self.assertEqual(output, script_output)
def _check_code(self, code_str, fn_name, inputs):
scope = {}
exec(code_str, globals(), scope)
cu = torch.jit.CompilationUnit(code_str)
self.assertEqual(cu.func(*inputs), scope[fn_name](*inputs))
def test_stepped_tuple_slicing(self):
def check_slicing_tuple(slicing, tuple_type, tuple):
template = dedent("""
def func(x):
# type: ({}) -> Any
return x{}
""")
self._check_code(template.format(tuple_type, slicing), "func", [tuple])
check_slicing_tuple("[-3:3:2]", "Tuple[int, int, int]", (0, 1, 2))
check_slicing_tuple("[::55]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
check_slicing_tuple("[:4:4]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
check_slicing_tuple("[::-1]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
check_slicing_tuple("[7:5:2]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
check_slicing_tuple("[5:7:-2]", "Tuple[int, int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5, 6))
check_slicing_tuple("[::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
check_slicing_tuple("[:4:-3]", "Tuple[int, int, int, int, int, int]", (0, 1, 2, 3, 4, 5))
check_slicing_tuple("[3::-2]", "Tuple[int, int, int, int, int]", (0, 1, 2, 3, 4))
def test_index(self):
def consec(size, start=0):
numel = torch.tensor(size).prod().item()
return torch.arange(numel).view(size)
def check_indexing(indexing, tensor):
template = dedent("""
def func(x):
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor])
def check_dynamic_indexing(indexing, tensor, value1, value2):
value1 = torch.tensor(value1)
value2 = torch.tensor(value2)
template = dedent("""
def func(x, value1, value2):
i = int(value1)
j = int(value2)
return x{}
""")
self._check_code(template.format(indexing), "func", [tensor, value1, value2])
# basic slices
check_indexing('[0]', consec((3, 3)))
check_indexing('[1]', consec((3, 3), 10))
check_indexing('[2]', consec((3, 3), 19))
check_indexing('[2]', consec((3,)))
check_indexing('[-1]', consec((3, 3), 19))
check_indexing('[0:2]', consec((3, 3, 3)))
check_indexing('[1:-1]', consec((3, 3, 3)))
check_indexing('[-3:-1]', consec((6, 3)))
check_indexing('[1:]', consec((3, 3)))
check_indexing('[:1]', consec((3, 3)))
check_indexing('[:]', consec((3, 2)))
# multi-dim: indexes
check_indexing('[0, 1]', consec((3, 3)))
check_indexing('[0, 1]', consec((3, 3, 2)))
check_indexing('[1, 0, 2]', consec((3, 3, 3)))
check_indexing('[2, -1]', consec((3, 3)))
# multi-dim: mixed slicing and indexing
check_indexing('[0, 1:2]', consec((3, 3)))
check_indexing('[0, :1]', consec((3, 3, 2)))
check_indexing('[1, 2:]', consec((3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[1:, -1, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, 2:, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[-1, 1:, 0]', consec((3, 3, 3, 3)))
check_indexing('[-1, :, 0, 2]', consec((3, 3, 3, 3)))
# zero-sized slices
check_indexing('[0:0]', consec((2, 2)))
check_indexing('[0:0, 1]', consec((3, 3)))
# trivial expression usage
check_indexing('[1+1]', consec((3, 3)))
check_indexing('[1:(0 + 2)]', consec((3, 3, 3)))
# None for new dimensions
check_indexing('[None, 0]', consec((3, 3)))
check_indexing('[1, None]', consec((3, 3), 10))
check_indexing('[None, None, 2]', consec((3, 3), 19))
check_indexing('[None, 2, None]', consec((3,)))
check_indexing('[0:2, None]', consec((3, 3, 3)))
check_indexing('[None, 1:-1]', consec((3, 3, 3)))
check_indexing('[None, -3:-1, None]', consec((6, 3)))
check_indexing('[-1, None, 2:, None, 1:2]', consec((3, 3, 3, 3)))
check_indexing('[None, -1, None, 2:, None, 1:2, None]', consec((3, 3, 3, 3)))
# dynamic expression usage
check_dynamic_indexing("[i + j]", consec((3, 3)), 0, 1)
check_dynamic_indexing("[i:j, i]", consec((3, 3, 2)), 0, 2)
def test_advancedindex(self):
def consec(size, start=0):
numel = torch.tensor(size).prod().item()
return torch.arange(numel).view(size)
def check_indexing(indexing, tensor, **kwargs):
indices_dict = kwargs
template = dedent("""
def func(x{formals}):
return x{expr}
""")
formals = []
values = []
for formal, value in indices_dict.items():
formals.append(formal)
values.append(value)
formals = ''.join(map(', {}'.format, formals))
inputs = [tensor] + values
self._check_code(template.format(formals=formals, expr=indexing),
"func", inputs)
# Indexing with tensor (basic)
check_indexing('[i]', consec((3, 3)), i=torch.tensor([0]))
check_indexing('[i]', consec((3, 3)), i=torch.tensor(1))
check_indexing('[i]', consec((3, 3)), i=torch.tensor([-2]))
check_indexing('[i]', consec((3, 3), 2), i=torch.tensor([0, 0]))
check_indexing('[i]', consec((3, 3, 2, 2)), i=torch.tensor([0, -2, 1]))
# NB: indexing with tensors and indexing with sequences can be implemented
# in a very similar way (sequences are converted to tensors), so only one
# case needs to be tested extensively.
# XXX: When we can index with sequences, replace these cases with
# sequence indexing expressions; those are much easier to read.
# Misc sequence advanced indexing
inp = consec((4, 8, 5))
to_check = [
# [[0, 1, 3]]
['[i]', {'i': [0, 1, 3]}],
# [[0, 2], [1, 3]]
['[i, j]', {'i': [0, 2], 'j': [1, 3]}],
# [[[0, 1], [0, 1]], [[0, 1], [0, 1]]]
['[i, j]', {'i': [[0, 1], [0, 1]], 'j': [[0, 1], [0, 1]]}],
# [[0, 2], [1, 3], [1, 1]]
['[i, j, k]', {'i': [0, 2], 'j': [1, 3], 'k': [1, 1]}],
# [[0, 2], 1, [1, 1]]
['[i, j, k]', {'i': [0, 2], 'j': 1, 'k': [1, 1]}],
# [:, :, [0, 3, 4]]
['[:, :, i]', {'i': [0, 3, 4]}],
# [:, [2, 4, 5, 7], 2:4]
['[:, i, 2:4]', {'i': [0, 2, 3]}],
# [[2, 3], :, :]
['[i, :, :]', {'i': [2, 3]}],
# [:, [0, 2, 3], [1, 3, 4]]
['[:, i, j]', {'i': [0, 2, 3], 'j': [1, 3, 4]}],
# [:, [0], [1, 2, 4]]
['[:, i, j]', {'i': [0], 'j': [1, 2, 4]}],
# [:, [0, 1, 3], [4]]
['[:, i, j]', {'i': [0, 1, 3], 'j': [4]}],
# [:, [[0, 1], [1, 0]], [[2, 3]]]
['[:, i, j]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}],
# [:, [[0, 1], [2, 3]], [[0]]]
['[:, i, j]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}],
# [:, [[5, 6]], [[0, 3], [4, 4]]]
['[:, i, j]', {'i': [[5, 6]], 'j': [[0, 3], [4, 4]]}],
# [[0, 2, 3], [1, 3, 4], :]
['[i, j, :]', {'i': [0, 2, 3], 'j': [1, 3, 4]}],
# [0, [1, 2, 4], :]
['[i, j, :]', {'i': 0, 'j': [1, 2, 4]}],
# [[0, 1, 3], 4, :]
['[i, j, :]', {'i': [0, 1, 3], 'j': 4}],
# [[[0, 1], [1, 0]], [[2, 1], [3, 5]], :]
['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 1], [3, 5]]}],
# [[[0, 1], [1, 0]], [[2, 3]], :]
['[i, j, :]', {'i': [[0, 1], [1, 0]], 'j': [[2, 3]]}],
# [[[0, 1], [2, 3]], [[0]], :]
['[i, j, :]', {'i': [[0, 1], [2, 3]], 'j': [[0]]}],
# [[[2, 1]], [[0, 3], [4, 4]], :]
['[i, j, :]', {'i': [[2, 1]], 'j': [[0, 3], [4, 4]]}],
# [[[2]], [[0, 3], [4, 1]], 0:2]
['[i, j, 0:2]', {'i': [[2]], 'j': [[0, 3], [4, 1]]}],
]
for expr, argdict in to_check:
tensordict = {k: torch.tensor(v) for (k, v) in argdict.items()}
check_indexing(expr, inp, **tensordict)
def test_adv_indexing_list(self):
# indexing with list is equivalent to indexing with tensor
def func1(x):
return x[[0, 1, 5]]
def func2(x):
return x[[0, 1], [0, 1]]
def func3(x):
return x[[[0, 1], [0, 1]], [[0, 1], [0, 1]]]
def func4(x):
ls = [0]
ls.append(1)
ls.append(2)
return x[ls]
def func5(x):
ls = [0.1, 1.2, 2.3]
return x[ls]
input = torch.rand((6, 2))
self.checkScript(func1, (input,))
self.checkScript(func2, (input,))
self.checkScript(func3, (input,))
self.checkScript(func4, (input,))
self.checkScript(func5, (input,))
def test_index_ellipses(self):
vals = [":", 1, None]
for _ in range(100):
indices = [random.choice(vals) for _ in range(4)]
indices[random.randint(0, len(indices) - 1)] = "..."
test_str = dedent("""
def f():
x = torch.ones(10, 9, 8, 7, 6)
return x{indices}.shape
""".format(indices=indices))
test_str = test_str.replace(r"'", r'')
scope = {}
execWrapper(test_str, globals(), scope)
cu = torch.jit.CompilationUnit(test_str)
res1 = cu.f()
res2 = scope['f']()
self.assertEqual(res1, res2)
def test_inf(self):
@torch.jit.script
def foo(a):
return a < float('inf')
s = torch.rand(1)
self.assertTrue(foo(s))
@torch.jit.script
def bar(a):
return a > float('-inf')
s = torch.rand(1)
self.assertTrue(foo(s))
# test re-assignment on imported source
str = """
def foo(x):
# type: (bool)
a = float("-inf")
if not x:
a = float(torch.tensor([5]))
return a < 4
"""
cu = torch.jit.CompilationUnit(str)
self.assertTrue(cu.foo(True))
self.assertFalse(cu.foo(False))
def test_str_to_float(self):
@torch.jit.script
def foo(a):
return 0.5 == float('0.5 hello')
s = torch.rand(1)
with self.assertRaisesRegex(RuntimeError, "could not convert string to float"):
self.assertTrue(foo(s))
@torch.jit.script
def foo(a):
return 0.5 == float('0.5')
s = torch.rand(1)
self.assertTrue(foo(s))
@torch.jit.script
def foo(a):
return 0. == float('0')
s = torch.rand(1)
self.assertTrue(foo(s))
|
pytorch-master
|
test/jit/test_python_builtins.py
|
# Owner(s): ["oncall: jit"]
import torch
import os
import sys
from torch.testing._internal.jit_utils import JitTestCase
from typing import Dict, Any, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestModuleAPIs(JitTestCase):
def test_default_state_dict_methods(self):
"""Tests that default state dict methods are automatically available"""
class DefaultStateDictModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(6, 16, 5)
self.fc = torch.nn.Linear(16 * 5 * 5, 120)
def forward(self, x):
x = self.conv(x)
x = self.fc(x)
return x
m1 = torch.jit.script(DefaultStateDictModule())
m2 = torch.jit.script(DefaultStateDictModule())
state_dict = m1.state_dict()
m2.load_state_dict(state_dict)
def test_customized_state_dict_methods(self):
"""Tests that customized state dict methods are in effect"""
class CustomStateDictModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(6, 16, 5)
self.fc = torch.nn.Linear(16 * 5 * 5, 120)
self.customized_save_state_dict_called: bool = False
self.customized_load_state_dict_called: bool = False
def forward(self, x):
x = self.conv(x)
x = self.fc(x)
return x
@torch.jit.export
def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
prefix: str, keep_vars: bool):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
m1 = torch.jit.script(CustomStateDictModule())
self.assertFalse(m1.customized_save_state_dict_called)
state_dict = m1.state_dict()
self.assertTrue(m1.customized_save_state_dict_called)
m2 = torch.jit.script(CustomStateDictModule())
self.assertFalse(m2.customized_load_state_dict_called)
m2.load_state_dict(state_dict)
self.assertTrue(m2.customized_load_state_dict_called)
def test_submodule_customized_state_dict_methods(self):
"""Tests that customized state dict methods on submodules are in effect"""
class CustomStateDictModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(6, 16, 5)
self.fc = torch.nn.Linear(16 * 5 * 5, 120)
self.customized_save_state_dict_called: bool = False
self.customized_load_state_dict_called: bool = False
def forward(self, x):
x = self.conv(x)
x = self.fc(x)
return x
@torch.jit.export
def _save_to_state_dict(self, destination: Dict[str, torch.Tensor],
prefix: str, keep_vars: bool):
self.customized_save_state_dict_called = True
return {"dummy": torch.ones(1)}
@torch.jit.export
def _load_from_state_dict(self,
state_dict: Dict[str, torch.Tensor],
prefix: str, local_metadata: Any,
strict: bool, missing_keys: List[str],
unexpected_keys: List[str],
error_msgs: List[str]):
self.customized_load_state_dict_called = True
return
class ParentModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.sub = CustomStateDictModule()
def forward(self, x):
return self.sub(x)
m1 = torch.jit.script(ParentModule())
self.assertFalse(m1.sub.customized_save_state_dict_called)
state_dict = m1.state_dict()
self.assertTrue(m1.sub.customized_save_state_dict_called)
m2 = torch.jit.script(ParentModule())
self.assertFalse(m2.sub.customized_load_state_dict_called)
m2.load_state_dict(state_dict)
self.assertTrue(m2.sub.customized_load_state_dict_called)
|
pytorch-master
|
test/jit/test_module_apis.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
import torch._C
from pathlib import Path
from test_nnapi import TestNNAPI
from torch.testing._internal.common_utils import TEST_WITH_ASAN
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
"""
Unit Tests for Nnapi backend with delegate
Inherits most tests from TestNNAPI, which loads Android NNAPI models
without the delegate API.
"""
# First skip is needed for IS_WINDOWS or IS_MACOS to skip the tests.
# Second skip is because ASAN is currently causing an error.
# It is still unclear how to resolve this. T95764916
torch_root = Path(__file__).resolve().parent.parent.parent
lib_path = torch_root / 'build' / 'lib' / 'libnnapi_backend.so'
@unittest.skipIf(not os.path.exists(lib_path),
"Skipping the test as libnnapi_backend.so was not found")
@unittest.skipIf(TEST_WITH_ASAN, "Unresolved bug with ASAN")
class TestNnapiBackend(TestNNAPI):
def setUp(self):
super().setUp()
# Save default dtype
module = torch.nn.PReLU()
self.default_dtype = module.weight.dtype
# Change dtype to float32 (since a different unit test changed dtype to float64,
# which is not supported by the Android NNAPI delegate)
# Float32 should typically be the default in other files.
torch.set_default_dtype(torch.float32)
# Load nnapi delegate library
torch.ops.load_library(str(lib_path))
# Override
def call_lowering_to_nnapi(self, traced_module, args):
compile_spec = {"forward": {"inputs": args}}
return torch._C._jit_to_backend("nnapi", traced_module, compile_spec)
def test_tensor_input(self):
# Lower a simple module
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
# Argument input is a single Tensor
self.call_lowering_to_nnapi(traced, args)
# Argument input is a Tensor in a list
self.call_lowering_to_nnapi(traced, [args])
# Test exceptions for incorrect compile specs
def test_compile_spec_santiy(self):
args = torch.tensor([[1.0, -1.0, 2.0, -2.0]]).unsqueeze(-1).unsqueeze(-1)
module = torch.nn.PReLU()
traced = torch.jit.trace(module, args)
errorMsgTail = r"""
method_compile_spec should contain a Tensor or Tensor List which bundles input parameters: shape, dtype, quantization, and dimorder.
For input shapes, use 0 for run/load time flexible input.
method_compile_spec must use the following format:
{"forward": {"inputs": at::Tensor}} OR {"forward": {"inputs": c10::List<at::Tensor>}}"""
# No forward key
compile_spec = {"backward": {"inputs": args}}
with self.assertRaisesRegex(RuntimeError, "method_compile_spec does not contain the \"forward\" key." + errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No dictionary under the forward key
compile_spec = {"forward": 1}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No inputs key (in the dictionary under the forward key)
compile_spec = {"forward": {"not inputs": args}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain a dictionary with an \"inputs\" key, "
"under it's \"forward\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
# No Tensor or TensorList under the inputs key
compile_spec = {"forward": {"inputs": 1}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
compile_spec = {"forward": {"inputs": [1]}}
with self.assertRaisesRegex(RuntimeError,
"method_compile_spec does not contain either a Tensor or TensorList, under it's \"inputs\" key."
+ errorMsgTail):
torch._C._jit_to_backend("nnapi", traced, compile_spec)
def tearDown(self):
# Change dtype back to default (Otherwise, other unit tests will complain)
torch.set_default_dtype(self.default_dtype)
|
pytorch-master
|
test/jit/test_backend_nnapi.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import inspect
import unittest
from typing import Dict, List
import torch
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestBuiltins(JitTestCase):
"""
Tests for TorchScript support of Python builtin functions.
"""
def test_has_attr(self):
class HasA(torch.nn.Module):
def __init__(self):
super(HasA, self).__init__()
self.a = 0
class HasB(torch.nn.Module):
def __init__(self):
super(HasB, self).__init__()
self.b = 1
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.mods = torch.nn.ModuleList([HasA(), HasB()])
def forward(self):
# use a list to encode hasattr results
l = torch.jit.annotate(List[int], [])
for mod in self.mods:
l.append(int(hasattr(mod, "a")))
l.append(int(hasattr(mod, "b")))
# actually retrieve the attr to test static refinement
if hasattr(mod, "a"):
l.append(mod.a)
if hasattr(mod, "b"):
l.append(mod.b)
return l
self.checkModule(Mod(), ())
def test_has_attr_invalid_args(self):
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.mod = torch.nn.Linear(1, 1)
def forward(self, name):
# not allowed, `name` must be static.
return hasattr(self.mod, name)
with self.assertRaisesRegexWithHighlight(RuntimeError, "hasattr", "name"):
torch.jit.script(Mod())
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
def forward(self, name):
# not allowed, `torch.rand` is not a class type
return hasattr(torch.rand(2, 3), name)
with self.assertRaisesRegexWithHighlight(RuntimeError, "hasattr", "name"):
torch.jit.script(Mod())
def test_del(self):
def fn(x: List[int]) -> List[int]:
a = x * 2
del a
return x
self.checkScript(fn, ([1, 2, 3],))
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
@torch.jit.script
def fn(x):
a = x ** 2
del a
return a
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "a"):
@torch.jit.script
def fn(x):
a = x ** 2
if a:
del a
return a
with self.assertRaisesRegexWithHighlight(RuntimeError, "undefined value", "b"):
@torch.jit.script
def fn(x):
a = x ** 2
del b
return a
def test_del_multiple_operands(self):
def fn(x: List[int]) -> List[int]:
a, b, c = x[0], x[1], x[2]
del a, b, c
return x
self.checkScript(fn, ([1, 2, 3],))
def del_list_multiple_operands(x: List[int]) -> List[int]:
del x[0], x[1]
return x
py_out = del_list_multiple_operands([0, 1, 2])
jit_out = torch.jit.script(del_list_multiple_operands)([0, 1, 2])
self.assertEqual(py_out, jit_out)
def del_dict_multiple_operands(x: Dict[str, int]) -> Dict[str, int]:
del x['hi'], x['there']
return x
py_out = del_dict_multiple_operands({"hi": 5, "there": 6})
jit_out = torch.jit.script(del_dict_multiple_operands)({"hi": 5, "there": 6})
self.assertEqual(py_out, jit_out)
class TestTensorBuiltins(JitTestCase):
def test_tensor_properties(self):
def should_keep(tensor, name):
if inspect.isroutine(getattr(tensor, name)):
return False
if name.startswith('_'):
return False
return True
tensor = torch.arange(4, dtype=torch.float).view(2, 2)
keys = dir(tensor)
# real and imag are only implemented for complex tensors.
self.assertRaises(RuntimeError, lambda: should_keep(tensor, 'imag'))
keys.remove('imag')
properties = [p for p in keys if should_keep(tensor, p)]
code_template = """
def fn(x):
return x.{}
"""
EQUALITY_MISMATCH = set([
# TorchScript doesn't have real enums so they return an int instead
# of the actual value
'dtype',
'layout',
])
MISSING_PROPERTIES = set([
'grad_fn',
# This is an undocumented property so it's not included
"output_nr",
# This has a longer implementation, maybe not worth copying to
# TorchScript if named tensors don't work there anyways
'names',
])
for p in properties:
if p in MISSING_PROPERTIES:
continue
code = code_template.format(p)
cu = torch.jit.CompilationUnit()
cu.define(code)
if p in EQUALITY_MISMATCH:
continue
self.assertEqual(getattr(tensor, p), cu.fn(tensor))
def test_tensor_subscript_assign(self):
def fn1(x):
a = torch.zeros_like(x, dtype=torch.uint8)
a[torch.tensor(0)] = torch.tensor(2, dtype=torch.uint8)
return a
def fn2(x):
a = torch.zeros_like(x, dtype=torch.uint8)
a[0] = 2
return a
def fn3(x):
a = torch.zeros_like(x, dtype=torch.uint8)
a[torch.tensor(0)] = 2
return a
def fn4(x):
a = torch.zeros_like(x, dtype=torch.uint8)
a[0] = torch.tensor(2, dtype=torch.uint8)
return a
def fn5(x):
a = torch.zeros_like(x, dtype=torch.float32)
a[torch.tensor(0)] = 2
return a
for fn in (fn1, fn2, fn3, fn4, fn5):
self.checkScript(fn, (torch.zeros(2, dtype=torch.uint8),))
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_tensor_subscript_assign_device(self):
def fn6(x):
a = torch.zeros_like(x, dtype=torch.float32, device="cuda")
a[torch.tensor(0)] = 2
return a
self.checkScript(fn6, (torch.zeros(2, dtype=torch.float32, device="cuda"),))
def test_tensor_item(self):
def test_scalar_cast(x):
scalar = x.item()
return int(scalar), float(scalar)
graph = torch.jit.script(test_scalar_cast).graph
FileCheck().check("(int, float) = prim::TupleConstruct").run(graph)
self.checkScript(test_scalar_cast, (torch.tensor(1.0),))
self.checkScript(test_scalar_cast, (torch.tensor(1),))
def test_method_on_number(self):
def func():
c = 1
return c.add(1)
with self.assertRaisesRegex(RuntimeError, 'object has no attribute or method'):
torch.jit.script(func)
# testing implicit conversion of tensors to scalars to match function arguments
def test_scalar_to_num_conversions(self):
@torch.jit.script
def multiple_defs(x):
c = 1
x = x + c
return x
self.assertTrue("ImplicitTensorToNum" not in str(multiple_defs.graph))
@torch.jit.script
def tensor_to_int_script(x, tensor):
return x.unsqueeze(tensor)
# location present in error message
with self.assertRaisesRegex(RuntimeError, "x.unsqueeze"):
tensor_to_int_script(torch.tensor([2]), torch.tensor([2, 2]))
def tensor_to_int(x, tensor):
return x.unsqueeze(tensor)
@torch.jit.script
def tensor_to_float_script(x, tensor):
return x.addcmul(tensor, tensor, value=tensor)
def tensor_to_float(x, tensor):
return x.addcmul(tensor, tensor, value=tensor)
x = torch.zeros(10)
# float tensor, float tensor with grad, int tensor (can't set grad on int tensor)
tensors = [torch.tensor(1.1),
torch.tensor(1.1, requires_grad=True),
torch.tensor(0),
torch.tensor([2])]
script_funs = [tensor_to_int_script, tensor_to_float_script]
funs = [tensor_to_int, tensor_to_float]
# return the result, or whether exception was thrown
def test_func(func, x, tensor):
try:
result = func(x, tensor)
except RuntimeError as e:
result = True
except TypeError as e:
result = True
return result
# assert result or exception equal for each (function, inputs)
for tensor in tensors:
for i in range(len(script_funs)):
self.assertEqual(test_func(script_funs[i], x, tensor), test_func(funs[i], x, tensor))
|
pytorch-master
|
test/jit/test_builtins.py
|
r"""
Define exceptions used in test_exception.py. We define them in a
separate file on purpose to make sure the fully qualified exception class name
is captured correctly in suce cases.
"""
class MyKeyError(KeyError):
def __init__(self, msg):
super(KeyError, self).__init__(msg)
|
pytorch-master
|
test/jit/myexception.py
|
# Owner(s): ["oncall: jit"]
import torch
from typing import List, Tuple
class SubmoduleNoForwardInputs(torch.nn.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self):
assert self.name == "inner_mod_name"
class ModuleNoForwardInputs(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SubmoduleNoForwardInputs(submodule_name)
def forward(self):
self.submodule()
class SubmoduleForwardSingleInput(torch.nn.Module):
def __init__(self, name):
super().__init__()
self.name = name
def foo(self, input: str):
return input
def forward(self, input: str):
input = input + "_inner_mod"
input = self.foo(input)
return input
class ModuleForwardSingleInput(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SubmoduleForwardSingleInput(submodule_name)
def forward(self, input: str):
input = input + "_outermod"
return self.submodule(input)
class ModuleDirectforwardSubmodCall(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SubmoduleForwardSingleInput(submodule_name)
def forward(self, input: str):
input = input + "_outermod"
return self.submodule.forward(input)
class SuboduleForwardMultipleInputs(torch.nn.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, input1: List[str], input2: str):
input1.append(self.name)
output2 = input2 + "_"
return input1, output2
class ModuleForwardMultipleInputs(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SuboduleForwardMultipleInputs(submodule_name)
def forward(self, input1: List[str], input2: str):
input1.append(self.name)
return self.submodule(input1, input2)
class SubmoduleForwardTupleInput(torch.nn.Module):
def __init__(self, name):
super().__init__()
self.name = name
def forward(self, input: Tuple[int]):
input_access = input[0]
return (1,)
class ModuleForwardTupleInput(torch.nn.Module):
def __init__(self, name: str, submodule_name: str):
super().__init__()
self.name = name
self.submodule = SubmoduleForwardTupleInput(submodule_name)
def forward(self, input: Tuple[int]):
input_access = input[0]
return self.submodule((1,))
# Modules for JIT forward hook and pre-hooks python and cpp tests
def create_module_no_forward_input():
# Use to test module level hooks with no forward input
m = ModuleNoForwardInputs("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[()]) -> None:
assert self.name == "outer_mod_name"
def forward_hook(self, input: Tuple[()], output: None):
assert self.name == "outer_mod_name"
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
return m
def create_submodule_no_forward_input():
# Use to test submodule level hooks with no forward input
m = ModuleNoForwardInputs("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[()]) -> None:
assert self.name == "inner_mod_name"
def forward_hook(self, input: Tuple[()], output: None):
assert self.name == "inner_mod_name"
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_module_forward_multiple_inputs():
# Use to test module level hooks with forward having multiple
# inputs and returns
m = ModuleForwardMultipleInputs("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "outer_mod_name"
assert input[0][0] == "a"
return ["pre_hook_override_name"], "pre_hook_override"
def forward_hook(self, input: Tuple[List[str], str], output: Tuple[List[str], str]):
assert self.name == "outer_mod_name"
assert input[0][0] == "pre_hook_override_name"
output2 = output[1] + "fh"
return output[0], output2
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
return m
def create_module_multiple_hooks_multiple_inputs():
# Use to test that module level hooks with multiple inputs execute
# in correct order and pass correct information between each other
m = ModuleForwardMultipleInputs("outer_mod_name", "inner_mod_name")
def pre_hook1(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "outer_mod_name"
assert input[0][0] == "a"
return ["pre_hook_override_name"], "pre_hook_override"
def pre_hook2(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "outer_mod_name"
assert input[0][0] == "pre_hook_override_name"
return ["pre_hook_override_name2"], "pre_hook_override"
def forward_hook1(
self, input: Tuple[List[str], str], output: Tuple[List[str], str]
):
assert self.name == "outer_mod_name"
assert input[0][0] == "pre_hook_override_name2"
output2 = output[1] + "fh1"
return output[0], output2
def forward_hook2(
self, input: Tuple[List[str], str], output: Tuple[List[str], str]
):
assert self.name == "outer_mod_name"
assert input[0][0] == "pre_hook_override_name2"
assert output[1] == "pre_hook_override_fh1"
output2 = output[1] + "_fh2"
return output[0], output2
m.register_forward_pre_hook(pre_hook1)
m.register_forward_pre_hook(pre_hook2)
m.register_forward_hook(forward_hook1)
m.register_forward_hook(forward_hook2)
return m
def create_module_forward_single_input():
# Use to test module level hooks for forward with single input
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "outer_mod_name"
assert input[0] == "a"
return ("pre_hook_override_name",)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "outer_mod_name"
assert input == ("pre_hook_override_name",)
output = output + "_fh"
return output
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
return m
def create_module_same_hook_repeated():
# Use to test module can run same hook multiple times
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "outer_mod_name"
input_change = input[0] + "_ph"
return (input_change,)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "outer_mod_name"
assert input == ("a_ph_ph",)
output = output + "_fh"
return output
m.register_forward_pre_hook(pre_hook)
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
m.register_forward_hook(forward_hook)
return m
def create_module_hook_return_nothing():
# Use to test module level hooks that return nothing
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> None:
assert self.name == "outer_mod_name"
assert input[0] == "a"
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "outer_mod_name"
assert input == ("a",)
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
return m
def create_module_multiple_hooks_single_input():
# Use to test that modules can run multiple hooks with single input
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook1(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "outer_mod_name"
assert input[0] == "a"
return ("pre_hook_override_name1",)
def pre_hook2(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "outer_mod_name"
assert input[0] == "pre_hook_override_name1"
return ("pre_hook_override_name2",)
def forward_hook1(self, input: Tuple[str], output: str):
assert self.name == "outer_mod_name"
assert input == ("pre_hook_override_name2",)
assert output == "pre_hook_override_name2_outermod_inner_mod"
output = output + "_fh1"
return output, output
def forward_hook2(self, input: Tuple[str], output: Tuple[str, str]):
assert self.name == "outer_mod_name"
assert input == ("pre_hook_override_name2",)
assert output[0] == "pre_hook_override_name2_outermod_inner_mod_fh1"
output = output[0] + "_fh2"
return output
m.register_forward_pre_hook(pre_hook1)
m.register_forward_pre_hook(pre_hook2)
m.register_forward_hook(forward_hook1)
m.register_forward_hook(forward_hook2)
return m
def create_submodule_forward_multiple_inputs():
# Use to test that submodules can run hooks that have multiple forward inputs
m = ModuleForwardMultipleInputs("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "inner_mod_name"
assert input[0][1] == "outer_mod_name"
return ["pre_hook_override_name"], "pre_hook_override"
def forward_hook(self, input: Tuple[List[str], str], output: Tuple[List[str], str]):
assert self.name == "inner_mod_name"
assert input[0][0] == "pre_hook_override_name"
output2 = output[1] + "fh"
return output[0], output2
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_submodule_multiple_hooks_multiple_inputs():
# Use to test that submodules can run multiple hooks with multiple
# forward inputs
m = ModuleForwardMultipleInputs("outer_mod_name", "inner_mod_name")
def pre_hook1(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "inner_mod_name"
assert input[1] == "no_pre_hook"
return ["pre_hook_override_name"], "pre_hook_override1"
def pre_hook2(self, input: Tuple[List[str], str]) -> Tuple[List[str], str]:
assert self.name == "inner_mod_name"
assert input[1] == "pre_hook_override1"
return ["pre_hook_override_name"], "pre_hook_override2"
def forward_hook1(
self, input: Tuple[List[str], str], output: Tuple[List[str], str]
):
assert self.name == "inner_mod_name"
assert input[1] == "pre_hook_override2"
assert output[1] == "pre_hook_override2_"
output2 = output[1] + "fh1"
return output[0], output2, output2
def forward_hook2(
self, input: Tuple[List[str], str], output: Tuple[List[str], str, str]
):
assert self.name == "inner_mod_name"
assert input[1] == "pre_hook_override2"
assert output[1] == "pre_hook_override2_fh1"
output2 = output[1] + "_fh2"
return output[0], output2
m.submodule.register_forward_pre_hook(pre_hook1)
m.submodule.register_forward_pre_hook(pre_hook2)
m.submodule.register_forward_hook(forward_hook1)
m.submodule.register_forward_hook(forward_hook2)
return m
def create_submodule_forward_single_input():
# Use to test that submodules can run hooks with a single argument
# passed to forward
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
return ("pre_hook_override_name",)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("pre_hook_override_name",)
return output
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_submodule_to_call_directly_with_hooks():
# Use to test that submodules have their hooks invoked when called
# directly
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
return ("pre_hook_override_name",)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("pre_hook_override_name",)
return output + "_fh"
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_submodule_same_hook_repeated():
# Use to test that submodules can run same hooks multiple times
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
changed = input[0] + "_ph"
return (changed,)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("a_outermod_ph_ph",)
return output + "_fh"
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_submodule_hook_return_nothing():
# Use to test that submodules can run hooks that return nothing
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> None:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("a_outermod",)
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
def create_submodule_multiple_hooks_single_input():
# Use to test that submodules can run multiple hooks that have a single input
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook1(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
return ("pre_hook_override_name",)
def pre_hook2(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
assert input[0] == "pre_hook_override_name"
return ("pre_hook_override_name2",)
def forward_hook1(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("pre_hook_override_name2",)
assert output == "pre_hook_override_name2_inner_mod"
return output + "_fwh1"
def forward_hook2(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("pre_hook_override_name2",)
assert output == "pre_hook_override_name2_inner_mod_fwh1"
return output
m.submodule.register_forward_pre_hook(pre_hook1)
m.submodule.register_forward_pre_hook(pre_hook2)
m.submodule.register_forward_hook(forward_hook1)
m.submodule.register_forward_hook(forward_hook2)
return m
def create_forward_tuple_input():
# Use to test case where forward is passed a single tuple for input.
# This is different because eager always wraps pre-hook return arguments
# in a tuple when the returned pre-hook result isn't a tuple
# (to allow the result to be passed to another pre-hook if needed).
# The eager behavior doesn't wrap the single tuple input pre-hook return in a
# tuple as it should. To get consistent behavior between single tuple inputs and
# the rest of the possible forward inputs, pre-hooks need to
# wrap single tuple inputs returns in another tuple. This is
# enforced by the schema checker.
m = ModuleForwardTupleInput("outer_mod_name", "inner_mod_name")
def pre_hook_outermod(self, input: Tuple[Tuple[int]]) -> Tuple[Tuple[int]]:
# 'return (11,)' doesn't work with eager, inner tuple lost
return ((11,),)
def pre_hook_innermod(self, input: Tuple[Tuple[int]]) -> Tuple[Tuple[int]]:
# 'return (22,)' doesn't work with eager, inner tuple lost
return ((22,),)
def forward_hook_outermod(self, input: Tuple[Tuple[int]], output: int):
return (11,)
def forward_hook_innermod(self, input: Tuple[Tuple[int]], output: Tuple[int]):
return 22
m.register_forward_pre_hook(pre_hook_outermod)
m.submodule.register_forward_pre_hook(pre_hook_innermod)
m.register_forward_hook(forward_hook_outermod)
m.submodule.register_forward_hook(forward_hook_innermod)
return m
def create_submodule_forward_single_input_return_not_tupled():
# Use to check that submodules can return modified inputs
# that aren't wrapped in a tuple (to match eager behavior)
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> str:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
# return is wrapped in tuple in other test cases
return "pre_hook_override_name"
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "inner_mod_name"
assert input == ("pre_hook_override_name",)
output = output + "_fh"
return output
m.submodule.register_forward_pre_hook(pre_hook)
m.submodule.register_forward_hook(forward_hook)
return m
|
pytorch-master
|
test/jit/test_hooks_modules.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestBatchMM(JitTestCase):
@staticmethod
def _get_test_tensors(n: int):
return [
torch.tensor([[1 + x, 2 + x, 3 + x], [4 + x, 5 + x, 6 + x]])
if x % 2 == 0
else torch.tensor([[1 + x, 2 + x], [3 + x, 4 + x], [5 + x, 6 + x]])
for x in range(n)
]
def test_batch_mm_no_mutation(self):
def test_batch_mm(
T1: torch.Tensor,
T2: torch.Tensor,
T3: torch.Tensor,
T4: torch.Tensor,
T5: torch.Tensor,
T6: torch.Tensor,
T7: torch.Tensor,
T8: torch.Tensor,
):
return (
torch.mm(T1, T2)
+ torch.mm(T3, T4)
+ torch.mm(T5, T6)
+ torch.mm(T7, T8)
)
test_batch_mm_scripted = torch.jit.script(test_batch_mm)
tensors = TestBatchMM._get_test_tensors(8)
expected = test_batch_mm(*tensors)
FileCheck().check_count("aten::mm", 4, exactly=True).run(
test_batch_mm_scripted.graph
)
self.run_pass("batch_mm", test_batch_mm_scripted.graph)
FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).run(
test_batch_mm_scripted.graph
)
actual = test_batch_mm_scripted(*tensors)
self.assertEqual(expected, actual, atol=1e-9, rtol=1e-9)
def test_batch_mm_permitted_mutation(self):
def test_batch_mm(
T1: torch.Tensor,
T2: torch.Tensor,
T3: torch.Tensor,
T4: torch.Tensor,
T5: torch.Tensor,
T6: torch.Tensor,
T7: torch.Tensor,
T8: torch.Tensor,
):
result = {}
result["product"] = (
torch.mm(T1, T2)
+ torch.mm(T3, T4)
+ torch.mm(T5, T6)
+ torch.mm(T7, T8)
)
result["constant"] = torch.tensor([42.0])
return result
test_batch_mm_scripted = torch.jit.script(test_batch_mm)
tensors = TestBatchMM._get_test_tensors(8)
expected = test_batch_mm(*tensors)
FileCheck().check_count("aten::mm", 4, exactly=True).run(
test_batch_mm_scripted.graph
)
self.run_pass("batch_mm", test_batch_mm_scripted.graph)
FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).run(
test_batch_mm_scripted.graph
)
actual = test_batch_mm_scripted(*tensors)
self.assertEqual(expected, actual, atol=1e-9, rtol=1e-9)
def test_batch_mm_prohibited_mutation(self):
@torch.jit.script
def test_batch_mm(n: int):
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
torch.relu_(T1)
result = (
torch.mm(T1, T2)
+ torch.mm(T3, T4)
+ torch.mm(T5, T6)
+ torch.mm(T7, T8)
)
return result
FileCheck().check_count("aten::mm", 4, exactly=True).run(test_batch_mm.graph)
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("aten::mm", 4, exactly=True).check_not(
"prim::MMTreeReduce"
).run(test_batch_mm.graph)
def test_batch_mm_prohibited_mutation_multiple_adds(self):
@torch.jit.script
def test_batch_mm(n: int):
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
T9 = torch.zeros((n, n))
T10 = torch.zeros((n, n))
torch.relu_(T1)
result = {}
result["no_mutated_parameters"] = (
torch.mm(T2, T3)
+ torch.mm(T4, T5)
+ torch.mm(T6, T7)
+ torch.mm(T8, T9)
)
result["all_parameters"] = (
torch.mm(T1, T2)
+ torch.mm(T3, T4)
+ torch.mm(T5, T6)
+ torch.mm(T7, T8)
+ torch.mm(T9, T10)
)
return result
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("prim::MMTreeReduce", 1, exactly=True).check_count(
"aten::mm", 5, exactly=True
).run(test_batch_mm.graph)
def test_batch_mm_prohibited_mutation_if_node(self):
@torch.jit.script
def test_batch_mm(n: int, use_t1: bool):
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
T9 = torch.zeros((n, n))
T10 = torch.zeros((n, n))
if use_t1:
torch.relu_(T1)
return (
torch.mm(T1, T2)
+ torch.mm(T3, T4)
+ torch.mm(T5, T6)
+ torch.mm(T7, T8)
+ torch.mm(T9, T10)
)
else:
return (
torch.mm(T2, T3)
+ torch.mm(T4, T5)
+ torch.mm(T6, T7)
+ torch.mm(T8, T9)
)
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("aten::mm", 5, exactly=True).check_count(
"prim::MMTreeReduce", 1, exactly=True
).run(test_batch_mm.graph)
def test_batch_mm_side_permitted_mutation(self):
@torch.jit.script
def test_batch_mm(n: int):
result = {}
A = torch.zeros((n, n))
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
result["T1"] = torch.mm(A, T1)
result["T2"] = torch.mm(A, T2)
result["T3"] = torch.mm(A, T3)
result["T4"] = torch.mm(A, T4)
result["T5"] = torch.mm(A, T5)
result["T6"] = torch.mm(A, T6)
result["T7"] = torch.mm(A, T7)
result["T8"] = torch.mm(A, T8)
return result
FileCheck().check_count("aten::mm", 8, exactly=True).run(test_batch_mm.graph)
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("prim::MMBatchSide", 1, exactly=True).check_not(
"aten::mm"
).run(test_batch_mm.graph)
def test_batch_mm_side_prohibited_mutation_uncommon_side(self):
@torch.jit.script
def test_batch_mm(n: int):
A = torch.zeros((n, n))
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
T9 = torch.zeros((n, n))
T10 = torch.zeros((n, n))
torch.relu_(T1)
result = {}
result["T1"] = torch.mm(A, T1)
result["T2"] = torch.mm(A, T2)
result["T3"] = torch.mm(A, T3)
result["T4"] = torch.mm(A, T4)
result["T5"] = torch.mm(A, T5)
result["T6"] = torch.mm(A, T6)
result["T7"] = torch.mm(A, T7)
result["T8"] = torch.mm(A, T8)
result["T9"] = torch.mm(A, T9)
result["T10"] = torch.mm(A, T10)
return result
FileCheck().check_count("aten::mm", 10, exactly=True).run(test_batch_mm.graph)
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("aten::mm", 1, exactly=True).run(test_batch_mm.graph)
FileCheck().check_count("prim::MMBatchSide", 1, exactly=True).run(
test_batch_mm.graph
)
def test_batch_mm_side_prohibited_mutation_common_side(self):
@torch.jit.script
def test_batch_mm(n: int):
A = torch.zeros((n, n))
T1 = torch.zeros((n, n))
T2 = torch.zeros((n, n))
T3 = torch.zeros((n, n))
T4 = torch.zeros((n, n))
T5 = torch.zeros((n, n))
T6 = torch.zeros((n, n))
T7 = torch.zeros((n, n))
T8 = torch.zeros((n, n))
T9 = torch.zeros((n, n))
T10 = torch.zeros((n, n))
torch.relu_(A)
result = {}
result["T1"] = torch.mm(A, T1)
result["T2"] = torch.mm(A, T2)
result["T3"] = torch.mm(A, T3)
result["T4"] = torch.mm(A, T4)
result["T5"] = torch.mm(A, T5)
result["T6"] = torch.mm(A, T6)
result["T7"] = torch.mm(A, T7)
result["T8"] = torch.mm(A, T8)
result["T9"] = torch.mm(A, T9)
result["T10"] = torch.mm(A, T10)
return result
FileCheck().check_count("aten::mm", 10, exactly=True).run(test_batch_mm.graph)
self.run_pass("batch_mm", test_batch_mm.graph)
FileCheck().check_count("aten::mm", 10, exactly=True).check_not(
"prim::MMBatchSide"
).run(test_batch_mm.graph)
|
pytorch-master
|
test/jit/test_batch_mm.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import io
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import suppress_warnings
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestTypeSharing(JitTestCase):
def assertSameType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertEqual(m1._c._type(), m2._c._type())
def assertDifferentType(self, m1, m2):
if not isinstance(m1, torch.jit.ScriptModule):
m1 = torch.jit.script(m1)
if not isinstance(m2, torch.jit.ScriptModule):
m2 = torch.jit.script(m2)
self.assertNotEqual(m1._c._type(), m2._c._type())
def test_basic(self):
class M(torch.nn.Module):
def __init__(self, a, b, c):
super(M, self).__init__()
self.a = a
self.b = b
self.c = c
def forward(self, x):
return x
a = torch.rand(2, 3)
b = torch.rand(2, 3)
c = torch.rand(2, 3)
m1 = M(a, b, c)
m2 = M(a, b, c)
self.assertSameType(m1, m2)
def test_diff_attr_values(self):
"""
Types should be shared even if attribute values differ
"""
class M(torch.nn.Module):
def __init__(self, a, b, c):
super(M, self).__init__()
self.a = a
self.b = b
self.c = c
def forward(self, x):
return x
a = torch.rand(2, 3)
b = torch.rand(2, 3)
c = torch.rand(2, 3)
m1 = M(a, b, c)
m2 = M(a * 2, b * 3, c * 4)
self.assertSameType(m1, m2)
def test_constants(self):
"""
Types should be shared for identical constant values, and different for different constant values
"""
class M(torch.nn.Module):
__constants__ = ["const"]
def __init__(self, attr, const):
super(M, self).__init__()
self.attr = attr
self.const = const
def forward(self):
return self.const
attr = torch.rand(2, 3)
m1 = M(attr, 1)
m2 = M(attr, 1)
self.assertSameType(m1, m2)
# a different constant value
m3 = M(attr, 2)
self.assertDifferentType(m1, m3)
def test_linear(self):
"""
Simple example with a real nn Module
"""
a = torch.nn.Linear(5, 5)
b = torch.nn.Linear(5, 5)
c = torch.nn.Linear(10, 10)
a = torch.jit.script(a)
b = torch.jit.script(b)
c = torch.jit.script(c)
self.assertSameType(a, b)
self.assertDifferentType(a, c)
def test_submodules(self):
"""
If submodules differ, the types should differ.
"""
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super(M, self).__init__()
self.submod1 = torch.nn.Linear(in1, out1)
self.submod2 = torch.nn.Linear(in2, out2)
def forward(self, x):
x = self.submod1(x)
x = self.submod2(x)
return x
a = M(1, 1, 2, 2)
b = M(1, 1, 2, 2)
self.assertSameType(a, b)
self.assertSameType(a.submod1, b.submod1)
c = M(2, 2, 2, 2)
self.assertDifferentType(a, c)
self.assertSameType(b.submod2, c.submod1)
self.assertDifferentType(a.submod1, b.submod2)
def test_param_vs_attribute(self):
"""
The same module with an `foo` as a parameter vs. attribute shouldn't
share types
"""
class M(torch.nn.Module):
def __init__(self, foo):
super(M, self).__init__()
self.foo = foo
def forward(self, x):
return x + self.foo
as_param = torch.nn.Parameter(torch.ones(2, 2))
as_attr = torch.ones(2, 2)
param_mod = M(as_param)
attr_mod = M(as_attr)
self.assertDifferentType(attr_mod, param_mod)
def test_same_but_different_classes(self):
"""
Even if everything about the module is the same, different originating
classes should prevent type sharing.
"""
class A(torch.nn.Module):
__constants__ = ["const"]
def __init__(self, in1, out1, in2, out2):
super(A, self).__init__()
self.submod1 = torch.nn.Linear(in1, out1)
self.submod2 = torch.nn.Linear(in2, out2)
self.const = 5
def forward(self, x):
x = self.submod1(x)
x = self.submod2(x)
return x * self.const
class B(torch.nn.Module):
__constants__ = ["const"]
def __init__(self, in1, out1, in2, out2):
super(B, self).__init__()
self.submod1 = torch.nn.Linear(in1, out1)
self.submod2 = torch.nn.Linear(in2, out2)
self.const = 5
def forward(self, x):
x = self.submod1(x)
x = self.submod2(x)
return x * self.const
a = A(1, 1, 2, 2)
b = B(1, 1, 2, 2)
self.assertDifferentType(a, b)
def test_mutate_attr_value(self):
"""
Mutating the value of an attribute should not change type sharing
"""
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super(M, self).__init__()
self.submod1 = torch.nn.Linear(in1, out1)
self.submod2 = torch.nn.Linear(in2, out2)
self.foo = torch.ones(in1, in1)
def forward(self, x):
x = self.submod1(x)
x = self.submod2(x)
return x + self.foo
a = M(1, 1, 2, 2)
b = M(1, 1, 2, 2)
a.foo = torch.ones(2, 2)
b.foo = torch.rand(2, 2)
self.assertSameType(a, b)
def test_assign_python_attr(self):
"""
Assigning a new (python-only) attribute should not change type sharing
"""
class M(torch.nn.Module):
def __init__(self, in1, out1, in2, out2):
super(M, self).__init__()
self.submod1 = torch.nn.Linear(in1, out1)
self.submod2 = torch.nn.Linear(in2, out2)
self.foo = torch.ones(in1, in1)
def forward(self, x):
x = self.submod1(x)
x = self.submod2(x)
return x + self.foo
# explicitly call script() to freeze the type
a = torch.jit.script(M(1, 1, 2, 2))
b = torch.jit.script(M(1, 1, 2, 2))
a.new_attr = "foo bar baz"
self.assertSameType(a, b)
# but if we assign attributes *before* calling script(), the types
# should be different, since `new_attr` should be turned into a Script
# attribute
a = M(1, 1, 2, 2)
b = M(1, 1, 2, 2)
a.new_attr = "foo bar baz"
self.assertDifferentType(a, b)
def test_failed_attribute_compilation(self):
"""
Attributes whose type cannot be inferred should fail cleanly with nice hints
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
# assign a type we know can't be converted to TorchScript
self.foo = object
def forward(self):
# try to use it in forward
return self.foo
m = M()
with self.assertRaisesRegexWithHighlight(RuntimeError,
"failed to convert Python type",
"self.foo"):
torch.jit.script(m)
def test_script_function_attribute_different(self):
"""
Different functions passed in should lead to different types
"""
@torch.jit.script
def fn1(x):
return x + x
@torch.jit.script
def fn2(x):
return x - x
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
fn1_mod = M(fn1)
fn2_mod = M(fn2)
self.assertDifferentType(fn1_mod, fn2_mod)
def test_builtin_function_same(self):
class Caller(torch.nn.Module):
def __init__(self, fn):
super(Caller, self).__init__()
self.fn = fn
def forward(self, input):
return self.fn(input, input)
c1 = Caller(torch.add)
c2 = Caller(torch.add)
self.assertSameType(c1, c2)
def test_builtin_function_different(self):
class Caller(torch.nn.Module):
def __init__(self, fn):
super(Caller, self).__init__()
self.fn = fn
def forward(self, input):
return self.fn(input, input)
c1 = Caller(torch.add)
c2 = Caller(torch.sub)
self.assertDifferentType(c1, c2)
def test_script_function_attribute_same(self):
"""
Same functions passed in should lead to same types
"""
@torch.jit.script
def fn(x):
return x + x
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
fn1_mod = M(fn)
fn2_mod = M(fn)
self.assertSameType(fn1_mod, fn2_mod)
def test_python_function_attribute_different(self):
"""
Different functions passed in should lead to different types
"""
def fn1(x):
return x + x
def fn2(x):
return x - x
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
fn1_mod = M(fn1)
fn2_mod = M(fn2)
self.assertDifferentType(fn1_mod, fn2_mod)
def test_python_function_attribute_same(self):
"""
Same functions passed in should lead to same types
"""
def fn(x):
return x + x
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
fn1_mod = M(fn)
fn2_mod = M(fn)
self.assertSameType(fn1_mod, fn2_mod)
@suppress_warnings
def test_tracing_gives_different_types(self):
"""
Since we can't guarantee that methods are the same between different
trace runs, tracing must always generate a unique type.
"""
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, x, y):
if x.sum() > y.sum():
return x
else:
return y
a = torch.jit.trace(M(), (torch.zeros(1, 1), torch.ones(1, 1)))
b = torch.jit.trace(M(), (torch.ones(1, 1), torch.zeros(1, 1)))
self.assertDifferentType(a, b)
def test_ignored_fns(self):
class M(torch.nn.Module):
def __init__(self, foo):
super(M, self).__init__()
self.foo = foo
@torch.jit.ignore
def ignored(self):
return self.foo
def forward(self):
return self.ignored()
a = torch.jit.script(M(torch.ones(1)))
b = torch.jit.script(M(torch.ones(2)))
self.assertSameType(a, b)
self.assertNotEqual(a(), b())
@suppress_warnings
def test_script_module_containing_traced_module(self):
class Traced(torch.nn.Module):
def __init__(self):
super(Traced, self).__init__()
def forward(self, x):
if x.sum() > 0:
return x
else:
return x + x
class M(torch.nn.Module):
def __init__(self, input):
super(M, self).__init__()
self.traced = torch.jit.trace(Traced(), input)
def forward(self, x):
return self.traced(x)
a = M((torch.ones(1), ))
b = M((torch.zeros(1), ))
self.assertDifferentType(a, b)
def test_loaded_modules_work(self):
class AB(torch.nn.Module):
def __init__(self):
super(AB, self).__init__()
self.a = 1
self.b = 1
def forward(self):
return self.a + self.b
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
self.a = 1
def forward(self):
return self.a
class Wrapper(torch.nn.Module):
def __init__(self, sub):
super(Wrapper, self).__init__()
self.sub = sub
def forward(self):
return self.sub()
def package(x):
buffer = io.BytesIO()
torch.jit.save(torch.jit.script(x), buffer)
buffer.seek(0)
return torch.jit.script(Wrapper(torch.jit.load(buffer)))
a = package(AB())
a()
b = package(A())
b()
def test_module_dict_same_type_different_name(self):
"""
We should be able to differentiate between two ModuleDict instances
that have different keys but the same value types.
"""
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
return x
class Foo(torch.nn.Module):
def __init__(self, s):
super(Foo, self).__init__()
self.dict = torch.nn.ModuleDict(s)
def forward(self, x):
return x
a = Foo({'foo': A()})
b = Foo({'bar': A()})
c = Foo({'bar': A()})
self.assertDifferentType(a, b)
self.assertSameType(b, c)
def test_type_sharing_define_in_init(self):
"""
Tests that types between instances of a ScriptModule
subclass that defines methods in its __init__ are not
shared.
"""
class A(torch.jit.ScriptModule):
def __init__(self, val):
super().__init__()
self.define(f"""
def forward(self) -> int:
return {val}
""")
one = A(1)
two = A(2)
self.assertEqual(one(), 1)
self.assertEqual(two(), 2)
def test_type_sharing_disabled(self):
"""
Test that type sharing can be disabled.
"""
class A(torch.nn.Module):
def __init__(self, sub):
super().__init__()
self.sub = sub
def forward(self, x):
return x
class B(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return x
top1 = A(A(B()))
top2 = A(A(B()))
top1_s = torch.jit._recursive.create_script_module(
top1,
torch.jit._recursive.infer_methods_to_compile,
share_types=False,
)
top2_s = torch.jit._recursive.create_script_module(
top2,
torch.jit._recursive.infer_methods_to_compile,
share_types=False,
)
self.assertDifferentType(top1_s, top2_s)
self.assertDifferentType(top1_s, top1_s.sub)
self.assertDifferentType(top1_s, top2_s.sub)
self.assertDifferentType(top2_s, top2_s.sub)
self.assertDifferentType(top2_s, top1_s.sub)
def test_type_shared_ignored_attributes(self):
"""
Test that types are shared if the exclusion of their
ignored attributes makes them equal.
"""
class A(torch.nn.Module):
__jit_ignored_attributes__ = ["a"]
def __init__(self, a, b):
super().__init__()
self.a = a
self.b = b
def forward(self, x):
return x
a_with_linear = A(torch.nn.Linear(5, 5), 5)
a_with_string = A("string", 10)
# Both should have the same type because the attribute
# that differs in type is ignored and the common attribute
# has the same type.
self.assertSameType(a_with_linear, a_with_string)
def test_type_not_shared_ignored_attributes(self):
"""
Test that types are not shared if the exclusion of their
ignored attributes makes them not equal.
"""
class A(torch.nn.Module):
__jit_ignored_attributes__ = ["a"]
def __init__(self, a, b, c):
super().__init__()
self.a = a
self.b = b
self.c = c
def forward(self, x):
return x
mod = A(torch.nn.Linear(5, 5), 5, "string")
s1 = torch.jit.script(mod)
A.__jit_ignored_attributes__ = ["a", "b"]
s2 = torch.jit.script(mod)
# The types of s1 and s2 should differ. Although they are instances
# of A, __jit_ignored_attributes__ was modified before scripting s2,
# so the set of ignored attributes is different between s1 and s2.
self.assertDifferentType(s1, s2)
|
pytorch-master
|
test/jit/test_type_sharing.py
|
# Owner(s): ["oncall: mobile"]
import torch
import torch._C
import torch.nn.functional as F
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfNoXNNPACK
class TestOptimizeForMobilePreserveDebugInfo(JitTestCase):
def check_replacement(
self,
model,
replacements,
jit_pass,
):
"""
model: Model which optimization is performed on
replacements: Dict mapping from nodes' kinds in the optimized model
to the kinds of nodes they replaced in the original model
jit_pass: Function to perform optimization
"""
original_kinds = set(replacements.values())
original_source_ranges = {
node.kind(): node.sourceRange()
for node in model.graph.nodes()
if node.kind() in original_kinds
}
jit_pass(model._c)
for node in model.graph.nodes():
if node.kind() in replacements:
self.assertEqual(
node.sourceRange(),
original_source_ranges[replacements[node.kind()]],
)
@skipIfNoXNNPACK
def test_replace_conv1d_with_conv2d(self):
class TestConv1d(torch.nn.Module):
def __init__(self, weight, bias):
super(TestConv1d, self).__init__()
self.weight = weight
self.bias = bias
def forward(self, x):
return F.conv1d(x, self.weight, self.bias)
self.check_replacement(
model=torch.jit.script(
TestConv1d(
weight=torch.rand(3, 3, 3),
bias=torch.rand(3),
),
),
replacements={
"prim::ListUnpack": "aten::conv1d",
"prim::ListConstruct": "aten::conv1d",
"aten::unsqueeze": "aten::conv1d",
"aten::conv2d": "aten::conv1d",
"aten::squeeze": "aten::conv1d",
},
jit_pass=torch._C._jit_pass_transform_conv1d_to_conv2d,
)
@skipIfNoXNNPACK
def test_insert_pre_packed_linear_before_inline_and_conv_2d_op(self):
class TestPrepackedLinearBeforeInlineAndConv2dOp(torch.nn.Module):
def __init__(
self,
linear_weight,
linear_bias,
conv2d_weight,
conv2d_bias,
conv_transpose2d_weight,
conv_transpose2d_bias,
):
super(
TestPrepackedLinearBeforeInlineAndConv2dOp,
self,
).__init__()
self.linear_weight = linear_weight.float()
self.linear_bias = linear_bias.float()
self.conv2d_weight = conv2d_weight.float()
self.conv2d_bias = conv2d_bias.float()
self.conv_transpose2d_weight = conv_transpose2d_weight.float()
self.conv_transpose2d_bias = conv_transpose2d_bias.float()
def forward(self, x):
linear_res = F.linear(
x.float(),
self.linear_weight,
self.linear_bias,
)
conv2d_res = F.conv2d(
input=linear_res.unsqueeze(dim=0).float(),
weight=self.conv2d_weight,
bias=self.conv2d_bias,
)
return F.conv_transpose2d(
input=conv2d_res,
weight=self.conv_transpose2d_weight,
bias=self.conv_transpose2d_bias,
)
minibatch = 1
in_channels = 6
iH = 4
iW = 5
out_channels = 6
kH = 2
kW = 3
self.check_replacement(
model=torch.jit.script(
TestPrepackedLinearBeforeInlineAndConv2dOp(
linear_weight=torch.rand(iW, 3),
linear_bias=torch.rand(iW),
conv2d_weight=torch.rand(out_channels, in_channels, kH, kW),
conv2d_bias=torch.rand(out_channels),
conv_transpose2d_weight=torch.rand(
out_channels,
in_channels,
kH,
kW,
),
conv_transpose2d_bias=torch.rand(out_channels),
),
),
replacements={
"prepacked::linear_clamp_prepack": "aten::linear",
"prepacked::linear_clamp_run": "aten::linear",
"prepacked::conv2d_clamp_prepack": "aten::conv2d",
"prepacked::conv2d_clamp_run": "aten::conv2d",
"prepacked::conv2d_transpose_clamp_prepack":
"aten::conv_transpose2d",
"prepacked::conv2d_transpose_clamp_run":
"aten::conv_transpose2d",
},
jit_pass=torch._C._jit_pass_insert_prepacked_ops,
)
@skipIfNoXNNPACK
def test_insert_pre_packed_linear_op(self):
self.check_replacement(
model=torch.jit.trace(torch.nn.Linear(5, 4), torch.rand(3, 2, 5)),
replacements={
"prepacked::linear_clamp_prepack": "aten::linear",
"prepacked::linear_clamp_run": "aten::linear"
},
jit_pass=torch._C._jit_pass_insert_prepacked_ops,
)
def run_test_fuse_activation_with_pack_ops_linear_conv2d(
self,
linear_activation,
linear_activation_kind,
conv2d_activation,
conv2d_activation_kind,
):
class TestFuseActivationLinearConv2d(torch.nn.Module):
def __init__(
self,
linear_weight,
linear_bias,
conv2d_weight,
conv2d_bias,
):
super(TestFuseActivationLinearConv2d, self).__init__()
self.linear_weight = linear_weight
self.linear_bias = linear_bias
self.conv2d_weight = conv2d_weight
self.conv2d_bias = conv2d_bias
def forward(self, x):
x = F.linear(
input=x,
weight=self.linear_weight,
bias=self.linear_bias,
)
x = linear_activation(x)
x = F.conv2d(
input=x.unsqueeze(dim=0),
weight=self.conv2d_weight,
bias=self.conv2d_bias,
)
return conv2d_activation(x)
linear_in_features = 5
linear_out_features = 4
conv2d_in_channels = 3
conv2d_out_channels = 4
conv2d_kernel = 2
x_shape = (3, 2, 5)
model = torch.jit.trace(
TestFuseActivationLinearConv2d(
linear_weight=torch.nn.Parameter(
data=torch.rand(
linear_out_features,
linear_in_features,
),
requires_grad=False,
),
linear_bias=torch.nn.Parameter(
data=torch.rand(linear_out_features),
requires_grad=False,
),
conv2d_weight=torch.rand(
conv2d_out_channels,
conv2d_in_channels,
conv2d_kernel,
conv2d_kernel,
),
conv2d_bias=torch.rand(conv2d_out_channels),
),
torch.rand(x_shape),
)
torch._C._jit_pass_insert_prepacked_ops(model._c)
self.check_replacement(
model=model,
replacements={
"prepacked::linear_clamp_prepack":
"prepacked::linear_clamp_prepack",
"prepacked::linear_clamp_run": linear_activation_kind,
"prepacked::conv2d_clamp_prepack":
"prepacked::conv2d_clamp_prepack",
"prepacked::conv2d_clamp_run": conv2d_activation_kind,
},
jit_pass=torch._C._jit_pass_fuse_clamp_w_prepacked_linear_conv,
)
@skipIfNoXNNPACK
def test_fuse_activation_with_pack_ops_linear_conv2d_1(self):
self.run_test_fuse_activation_with_pack_ops_linear_conv2d(
linear_activation=F.hardtanh,
linear_activation_kind="aten::hardtanh",
conv2d_activation=F.hardtanh_,
conv2d_activation_kind="aten::hardtanh_"
)
@skipIfNoXNNPACK
def test_fuse_activation_with_pack_ops_linear_conv2d_2(self):
self.run_test_fuse_activation_with_pack_ops_linear_conv2d(
linear_activation=F.hardtanh_,
linear_activation_kind="aten::hardtanh_",
conv2d_activation=F.hardtanh,
conv2d_activation_kind="aten::hardtanh"
)
@skipIfNoXNNPACK
def test_fuse_activation_with_pack_ops_linear_conv2d_3(self):
self.run_test_fuse_activation_with_pack_ops_linear_conv2d(
linear_activation=F.relu,
linear_activation_kind="aten::relu",
conv2d_activation=F.relu_,
conv2d_activation_kind="aten::relu_"
)
@skipIfNoXNNPACK
def test_fuse_activation_with_pack_ops_linear_conv2d_4(self):
self.run_test_fuse_activation_with_pack_ops_linear_conv2d(
linear_activation=F.relu_,
linear_activation_kind="aten::relu_",
conv2d_activation=F.relu,
conv2d_activation_kind="aten::relu"
)
|
pytorch-master
|
test/jit/test_optimize_for_mobile_preserve_debug_info.py
|
# Owner(s): ["oncall: jit"]
from torch.testing._internal.jit_utils import JitTestCase
from torch._C import parse_ir
import torch
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestAliasAnalysis(JitTestCase):
def test_becomes_wildcard_annotations(self):
graph_str = """
graph(%a.1 : Tensor, %b.1 : Tensor):
%11 : NoneType = prim::Constant()
%8 : int = prim::Constant[value=0]()
%7 : int = prim::Constant[value=1]()
%x.1 : Tensor = aten::add(%a.1, %b.1, %7)
%y.1 : Tensor[] = aten::split(%x.1, %7, %8)
return ()
"""
graph = parse_ir(graph_str)
alias_db = graph.alias_db()
split_node = graph.findNode("aten::split")
# split input enters wildcard set, list initalized as containing wildcard set
self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), split_node.output()))
# because %x.1 enters wildcard set, it now aliases other members of wildcard set (graph inputs)
self.assertTrue(alias_db.may_contain_alias(next(split_node.inputs()), next(graph.inputs())))
def test_nested_list_construct_not_wildcard(self):
@torch.jit.script
def foo(x):
y = torch.rand([2, 2])
return [y]
graph = foo.graph
graph.alias_db()
alias_db = graph.alias_db()
ten_construct = graph.findNode("aten::rand").output()
output = next(graph.outputs())
self.assertTrue(alias_db.may_contain_alias(ten_construct, output))
self.assertFalse(alias_db.may_contain_alias(next(graph.inputs()), ten_construct))
def test_recursive_calls(self):
@torch.jit.script
def foo(x, y):
x.add_(1)
return x + y
@torch.jit.script
def caller():
a = torch.rand([2, 2])
b = torch.ones([2, 2])
out1 = foo(a, b)
c = torch.rand([1])
d = torch.ones([2])
out2 = foo(d, c)
return out1, out2
isFrozen = False
descend_function_calls = True
alias_db = caller.graph.alias_db(isFrozen, descend_function_calls)
func_calls = caller.graph.findAllNodes("prim::CallFunction")
self.assertEqual(len(func_calls), 2)
for node in func_calls:
inps = list(node.inputs())
self.assertTrue(alias_db.has_writers(inps[1]))
self.assertFalse(alias_db.has_writers(inps[2]))
class Mod(torch.nn.Module):
def forward(self):
a = torch.rand([2, 2])
b = torch.ones([2, 2])
out1 = self.foo2(a, b)
c = torch.rand([1])
d = torch.ones([2])
out2 = self.foo2(d, c)
return out1, out2
def foo2(self, x, y):
x.add_(1)
return x + y
mod = torch.jit.script(Mod())
alias_db = mod.graph.alias_db(isFrozen, descend_function_calls)
func_calls = mod.graph.findAllNodes("prim::CallMethod")
self.assertEqual(len(func_calls), 2)
for node in func_calls:
inps = list(node.inputs())
self.assertTrue(alias_db.has_writers(inps[1]))
self.assertFalse(alias_db.has_writers(inps[2]))
|
pytorch-master
|
test/jit/test_alias_analysis.py
|
# Owner(s): ["oncall: jit"]
from typing import Any, Dict, List, Optional, Tuple
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.testing import FileCheck
from torch import jit
from jit.test_module_interface import TestModuleInterface # noqa: F401
import os
import sys
import torch
import torch.testing._internal.jit_utils
import torch.nn as nn
from torch.testing._internal.common_utils import freeze_rng_state
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestMisc(JitTestCase):
def test_joined_str(self):
def func(x):
hello, test = "Hello", "test"
print(f"{hello + ' ' + test}, I'm a {test}")
print("format blank")
hi = 'hi'
print(f"stuff before {hi}")
print(f"{hi} stuff after")
return x + 1
x = torch.arange(4., requires_grad=True)
# TODO: Add support for f-strings in string parser frontend
# self.checkScript(func, [x], optimize=True, capture_output=True)
with self.capture_stdout() as captured:
out = func(x)
scripted = torch.jit.script(func)
with self.capture_stdout() as captured_script:
out_script = func(x)
self.assertEqual(out, out_script)
self.assertEqual(captured, captured_script)
def test_kwarg_support(self):
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError, "variable number of arguments"):
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str = 2):
pass
torch.jit.script(M())
class M(torch.nn.Module):
def forward(self, *, n_tokens: int, device_name: str):
return n_tokens, device_name
sm = torch.jit.script(M())
with self.assertRaisesRegex(RuntimeError, "missing value for argument 'n_tokens'"):
sm()
with self.assertRaisesRegex(RuntimeError, "positional arg"):
sm(3, 'hello')
self.assertEqual(sm(n_tokens=3, device_name='hello'), (3, 'hello'))
def test_tuple_subscripted_assign(self):
with self.assertRaisesRegex(RuntimeError, "subscripted assignment"):
@torch.jit.script
def foo(a: Tuple[int, int]) -> None:
a[0] = a[1]
with self.assertRaisesRegex(RuntimeError, "augmented assignment"):
@torch.jit.script
def bar(a: Tuple[int, int]) -> None:
a[0] += a[1]
def test_subexpression_List_Future(self):
@torch.jit.script
def fn(x: List[torch.jit.Future[int]]) -> torch.jit.Future[int]:
return x[0]
FileCheck().check('Future[int]').check('Future[int]').run(fn.graph)
def test_subexpression_Future_annotate(self):
@torch.jit.script
def fn() -> torch.jit.Future[int]:
x: List[torch.jit.Future[int]] = []
return x[0]
FileCheck().check("Future[int][]").run(fn.graph)
def test_future_isinstance(self):
@torch.jit.script
def fn(x: Any) -> torch.jit.Future[int]:
assert isinstance(x, jit.Future[int])
return x
FileCheck().check("Future[int]").run(fn.graph)
def test_str_refine_any(self):
def forward(x: Any) -> str:
if isinstance(x, str):
return x
return "foo"
forward = torch.jit.script(forward)
self.assertEqual(forward(1), "foo")
self.assertEqual(forward("bar"), "bar")
def test_subexpression_Tuple_int_int_Future(self):
@torch.jit.script
def fn(x: Tuple[int, int, torch.jit.Future[int]]) -> Tuple[int, torch.jit.Future[int]]:
return x[0], x[2]
FileCheck().check('(int, int, Future[int])').check('(int, Future[int])').run(fn.graph)
def test_subexpression_Dict_int_Future(self):
@torch.jit.script
def fn(x: Dict[int, torch.jit.Future[int]], y: int) -> torch.jit.Future[int]:
return x[y]
FileCheck().check('Dict(int, Future(int))').check('Future[int]').run(fn.graph)
def test_subexpression_Optional(self):
@torch.jit.script
def fn(x: Optional[Dict[int, torch.jit.Future[int]]]) -> Optional[torch.jit.Future[int]]:
if x is not None:
return x[0]
else:
return None
FileCheck().check('Dict(int, Future(int))?').run(fn.graph)
def test_if_returning_any(self):
"""
Check that an if statement can return different
types early from each branch when the return
type of the function is Any.
"""
def if_function(inp: torch.Tensor) -> Any:
if inp.shape[0] == 1:
return inp * inp
else:
return "str"
self.checkScript(if_function, (torch.randn(5),))
def test_hacked_twin(self):
def gen_data():
with freeze_rng_state():
return torch.randn(10), torch.randint(10, (20,)), torch.randn(20)
input, index, value, = gen_data()
input1, index1, value1, = gen_data()
out1 = torch.ops.aten.index_put.hacked_twin(input, [index], value, accumulate=False)
out2 = torch.index_put(input1, [index1], value1, accumulate=False)
self.assertEqual(out1, out2)
torch.ops.aten.index_put_.hacked_twin(input, [index], value, accumulate=False)
torch.index_put_(input1, [index1], value1, accumulate=False)
self.assertEqual(input, input1)
def test_export_opnames_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
pass
def two(self, x: torch.Tensor) -> torch.Tensor:
pass
def forward(self, x: torch.Tensor) -> torch.Tensor:
pass
class FooMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x + y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 * x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
return x * y
def two(self, x: torch.Tensor) -> torch.Tensor:
return 2 / x
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.two(self.one(x, x))
make_global(OneTwoModule)
class M(nn.Module):
sub : OneTwoModule
def __init__(self):
super(M, self).__init__()
self.sub = BarMod()
def forward(self, x: torch.Tensor) -> torch.Tensor:
return self.sub.forward(x)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
torch._C._enable_mobile_interface_call_export()
scripted_M_mod = torch.jit.script(M())
self.assertTrue(set(['aten::mul.Scalar', 'aten::mul.Tensor', 'aten::reciprocal']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
scripted_M_mod.sub = torch.jit.script(FooMod())
self.assertTrue(set(['aten::add.Tensor', 'aten::mul.Scalar']).issubset(
set(torch.jit.export_opnames(scripted_M_mod))))
def test_math_inf(self):
from math import inf
def foo():
return inf
self.checkScript(foo, ())
def test_list_literal_infer(self):
def expects_intlist(x: List[int]):
x.append(3)
return x
def foo():
return expects_intlist([])
self.checkScript(foo, ())
def annotated_list_fail():
return expects_intlist(torch.jit.annotate([], List[Tensor]))
with self.assertRaises(RuntimeError):
torch.jit.script(annotated_list_fail)
def non_temporary_fail():
a = []
return expects_intlist(a)
with self.assertRaises(RuntimeError):
torch.jit.script(non_temporary_fail)
@torch.jit.script
def test_return():
return []
FileCheck().check("Tensor[] = prim::ListConstruct").run(test_return.graph)
def test_legacy_tensor_constructor(self):
# testing PyObject overload
def test_all_dtypes():
return (
torch.BoolTensor([2]),
torch.LongTensor([3]),
torch.ByteTensor([4]),
torch.CharTensor([5]),
torch.DoubleTensor([6]),
torch.FloatTensor([7]),
torch.IntTensor([8]),
torch.ShortTensor([1]),
torch.HalfTensor([1]),
)
self.checkScript(test_all_dtypes, ())
# now test empty overload
def empty_overload():
return torch.LongTensor(2, 3, 4)
eager = empty_overload()
jit = torch.jit.script(empty_overload)()
eager[:] = 1
jit[:] = 1
self.assertEqual(eager, jit)
def no_inputs():
return torch.DoubleTensor()
self.checkScript(no_inputs, ())
# bad schema
def multiple_args():
return torch.LongTensor(1, [2])
with self.assertRaisesRegex(RuntimeError, "multiple positional arguments that were not all integers"):
torch.jit.script(multiple_args)
# kwarg bad schema
def bad_kwarg():
return torch.LongTensor(hello="1")
with self.assertRaisesRegex(RuntimeError, "hello"):
torch.jit.script(bad_kwarg)
def test_broadcasting_list(self):
"""
Test BroadcastingList and torch.nn._size_N_t alias
"""
from torch._jit_internal import BroadcastingList2
from torch.nn.common_types import _size_2_t
def sum_i(x: _size_2_t) -> int:
return x[0] + x[1]
def sum_f(x: BroadcastingList2[float]) -> float:
return x[0] + x[1]
self.assertTrue(torch.jit.script(sum_i)(4) == 8)
self.assertTrue(torch.jit.script(sum_f)(4.5) == 9.)
def test_parse_ir_annotate(self):
ir = """
graph():
%3 : int[] = prim::Constant[value=annotate(List[int], [])]()
return (%3)
"""
graph = torch._C.parse_ir(ir, True)
func = torch._C._create_function_from_graph("forward", graph)
ret = func()
self.assertTrue(ret == [])
def test_parse_ir_single_element_tensor_positive(self):
ir = """
graph():
%7 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={0}]()
return (%7)
"""
graph = torch._C.parse_ir(ir, True)
func = torch._C._create_function_from_graph("forward", graph)
ret = func()
self.assertTrue(ret.numel() == 1)
self.assertTrue(len(ret.size()) == 1)
def test_parse_ir_single_element_tensor_negative(self):
ir = """
graph():
%7 : Long(1, strides=[1], requires_grad=0, device=cpu) = prim::Constant[value={-17}]()
return (%7)
"""
graph = torch._C.parse_ir(ir, True)
func = torch._C._create_function_from_graph("forward", graph)
ret = func()
self.assertTrue(ret.numel() == 1)
self.assertTrue(len(ret.size()) == 1)
|
pytorch-master
|
test/jit/test_misc.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import shutil
import sys
import tempfile
import torch
import torch.nn as nn
from torch.onnx import OperatorExportTypes
from torch.autograd import Variable
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfNoLapack, skipIfCaffe2, skipIfNoCaffe2
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# Smoke tests for export methods
class TestExportModes(JitTestCase):
class MyModel(nn.Module):
def __init__(self):
super(TestExportModes.MyModel, self).__init__()
def forward(self, x):
return x.transpose(0, 1)
def test_protobuf(self):
torch_model = TestExportModes.MyModel()
fake_input = Variable(torch.randn(1, 1, 224, 224), requires_grad=True)
f = io.BytesIO()
torch.onnx._export(torch_model, (fake_input), f, verbose=False,
export_type=torch.onnx.ExportTypes.PROTOBUF_FILE)
def test_zipfile(self):
torch_model = TestExportModes.MyModel()
fake_input = Variable(torch.randn(1, 1, 224, 224), requires_grad=True)
f = io.BytesIO()
torch.onnx._export(torch_model, (fake_input), f, verbose=False,
export_type=torch.onnx.ExportTypes.ZIP_ARCHIVE)
def test_compressed_zipfile(self):
torch_model = TestExportModes.MyModel()
fake_input = Variable(torch.randn(1, 1, 224, 224), requires_grad=True)
f = io.BytesIO()
torch.onnx._export(torch_model, (fake_input), f, verbose=False,
export_type=torch.onnx.ExportTypes.COMPRESSED_ZIP_ARCHIVE)
def test_directory(self):
torch_model = TestExportModes.MyModel()
fake_input = Variable(torch.randn(1, 1, 224, 224), requires_grad=True)
d = tempfile.mkdtemp()
torch.onnx._export(torch_model, (fake_input), d, verbose=False,
export_type=torch.onnx.ExportTypes.DIRECTORY)
shutil.rmtree(d)
def test_onnx_multiple_return(self):
@torch.jit.script
def foo(a):
return (a, a)
f = io.BytesIO()
x = torch.ones(3)
torch.onnx._export(foo, (x,), f)
@skipIfNoCaffe2
@skipIfNoLapack
def test_caffe2_aten_fallback(self):
class ModelWithAtenNotONNXOp(nn.Module):
def forward(self, x, y):
abcd = x + y
defg = torch.linalg.qr(abcd)
return defg
x = torch.rand(3, 4)
y = torch.rand(3, 4)
torch.onnx.export_to_pretty_string(
ModelWithAtenNotONNXOp(), (x, y),
add_node_names=False,
do_constant_folding=False,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK)
@skipIfCaffe2
@skipIfNoLapack
def test_aten_fallback(self):
class ModelWithAtenNotONNXOp(nn.Module):
def forward(self, x, y):
abcd = x + y
defg = torch.linalg.qr(abcd)
return defg
x = torch.rand(3, 4)
y = torch.rand(3, 4)
torch.onnx.export_to_pretty_string(
ModelWithAtenNotONNXOp(), (x, y),
add_node_names=False,
do_constant_folding=False,
operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
# support for linalg.qr was added in later op set versions.
opset_version=9)
# torch.fmod is using to test ONNX_ATEN.
# If you plan to remove fmod from aten, or found this test failed.
# please contact @Rui.
def test_onnx_aten(self):
class ModelWithAtenFmod(nn.Module):
def forward(self, x, y):
return torch.fmod(x, y)
x = torch.randn(3, 4, dtype=torch.float32)
y = torch.randn(3, 4, dtype=torch.float32)
torch.onnx.export_to_pretty_string(
ModelWithAtenFmod(), (x, y),
add_node_names=False,
do_constant_folding=False,
operator_export_type=OperatorExportTypes.ONNX_ATEN)
|
pytorch-master
|
test/jit/test_export_modes.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestLogging(JitTestCase):
def test_bump_numeric_counter(self):
class ModuleThatLogs(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
for i in range(x.size(0)):
x += 1.0
torch.jit._logging.add_stat_value('foo', 1)
if bool(x.sum() > 0.0):
torch.jit._logging.add_stat_value('positive', 1)
else:
torch.jit._logging.add_stat_value('negative', 1)
return x
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
mtl = ModuleThatLogs()
for i in range(5):
mtl(torch.rand(3, 4, 5))
self.assertEqual(logger.get_counter_val('foo'), 15)
self.assertEqual(logger.get_counter_val('positive'), 5)
finally:
torch.jit._logging.set_logger(old_logger)
def test_trace_numeric_counter(self):
def foo(x):
torch.jit._logging.add_stat_value('foo', 1)
return x + 1.0
traced = torch.jit.trace(foo, torch.rand(3, 4))
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
traced(torch.rand(3, 4))
self.assertEqual(logger.get_counter_val('foo'), 1)
finally:
torch.jit._logging.set_logger(old_logger)
def test_time_measurement_counter(self):
class ModuleThatTimes(torch.jit.ScriptModule):
def forward(self, x):
tp_start = torch.jit._logging.time_point()
for i in range(30):
x += 1.0
tp_end = torch.jit._logging.time_point()
torch.jit._logging.add_stat_value('mytimer', tp_end - tp_start)
return x
mtm = ModuleThatTimes()
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
mtm(torch.rand(3, 4))
self.assertGreater(logger.get_counter_val('mytimer'), 0)
finally:
torch.jit._logging.set_logger(old_logger)
def test_time_measurement_counter_script(self):
class ModuleThatTimes(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, x):
tp_start = torch.jit._logging.time_point()
for i in range(30):
x += 1.0
tp_end = torch.jit._logging.time_point()
torch.jit._logging.add_stat_value('mytimer', tp_end - tp_start)
return x
mtm = ModuleThatTimes()
logger = torch.jit._logging.LockingLogger()
old_logger = torch.jit._logging.set_logger(logger)
try:
mtm(torch.rand(3, 4))
self.assertGreater(logger.get_counter_val('mytimer'), 0)
finally:
torch.jit._logging.set_logger(old_logger)
def test_counter_aggregation(self):
def foo(x):
for i in range(3):
torch.jit._logging.add_stat_value('foo', 1)
return x + 1.0
traced = torch.jit.trace(foo, torch.rand(3, 4))
logger = torch.jit._logging.LockingLogger()
logger.set_aggregation_type('foo', torch.jit._logging.AggregationType.AVG)
old_logger = torch.jit._logging.set_logger(logger)
try:
traced(torch.rand(3, 4))
self.assertEqual(logger.get_counter_val('foo'), 1)
finally:
torch.jit._logging.set_logger(old_logger)
def test_logging_levels_set(self):
torch._C._jit_set_logging_option('foo')
self.assertEqual('foo', torch._C._jit_get_logging_option())
|
pytorch-master
|
test/jit/test_logging.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
from typing import Any, List
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestWith(JitTestCase):
"""
A suite of tests for with statements.
"""
def test_with_as(self):
"""
Check that with statements that use the 'as' keyword to bind expressions
to targets work as expected.
"""
@torch.jit.script
class Context(object):
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any) -> bool:
self.count.sub_(0.3)
return True
make_global(Context)
def test_basic(x: torch.Tensor) -> torch.Tensor:
"""Basic test with one with-statement."""
c = Context(1)
with c as mult:
y = x + mult
y *= c.count
return y
def test_pass(x: torch.Tensor) -> torch.Tensor:
"""
Test with a pass statement inside a with-statement. Although
the body of the with is empty, __enter__ and __exit__ should
still be called.
"""
c = Context(1)
with c as mult:
pass
x *= c.count
return x
def test_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that returning early from inside a with-statement works
as expected.
"""
with c as mult:
y = x + mult
return y
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c as mult:
y = x + mult
if mult > 0:
return y
x = y + y
return x
def test_break(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that breaking early from inside a with-statement works
as expected.
"""
with c as mult:
for a in l:
if a == 0:
break
x += a * mult
return x
def test_continue(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that using continue inside a with-statement works
as expected.
"""
with c as mult:
for a in l:
if a == 0:
continue
x += a * mult
return x
def test_serial(x: torch.Tensor) -> torch.Tensor:
"""
Test two with-statements in a row.
"""
c = Context(1)
with c as mult:
y = x + mult
with c as mult:
y *= mult
return y
def test_nested(x: torch.Tensor) -> torch.Tensor:
"""
Test nested with-statements.
"""
c = Context(1)
with c as m:
with c as n:
y = x + n
y *= m
return y
def test_combined(x: torch.Tensor) -> torch.Tensor:
"""
Test a with-statement with multiple with items.
"""
c = Context(1)
d = Context(2)
with c as m, d as n:
y = x + (m + n)
return y
test_input = torch.randn(2, 2)
test_context = Context(2)
test_list = [2, 0, 1, 3, 0, 2]
self.checkScript(test_basic, (test_input,))
self.checkScript(test_pass, (test_input,))
self.checkScript(test_early_return, (test_input, test_context))
self.checkScript(test_break, (test_input, test_context, test_list))
self.checkScript(test_continue, (test_input, test_context, test_list))
self.assertEqual(test_context.count, 2)
self.checkScript(test_serial, (test_input,))
self.checkScript(test_nested, (test_input,))
self.checkScript(test_combined, (test_input,))
def test_with_no_as(self):
"""
Check that with statements that do not use the 'as' keyword to bind expressions
to targets work as expected.
"""
@torch.jit.script
class Context(object):
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any):
self.count.sub_(0.3)
make_global(Context)
def test_basic(x: torch.Tensor) -> torch.Tensor:
"""Basic test with one with-statement."""
c = Context(1)
with c:
y = x + c.count
y *= c.count
return y
def test_pass(x: torch.Tensor) -> torch.Tensor:
"""
Test with a pass statement inside a with-statement. Although
the body of the with is empty, __enter__ and __exit__ should
still be called.
"""
c = Context(1)
with c:
pass
x *= c.count
return x
def test_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that returning early from inside a with-statement works
as expected.
"""
with c:
y = x + c.count
return y
x = y + y
return x
def test_conditional_early_return(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test that conditionally returning early from inside a with-statement works
as expected.
"""
with c:
y = x + c.count
if c.count > 0:
return y
x = y + y
return x
def test_break(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that breaking early from inside a with-statement works
as expected.
"""
with c:
for a in l:
if a == 0:
break
x += a * c.count
return x
def test_continue(x: torch.Tensor, c: Context, l: List[int]) -> torch.Tensor:
"""
Test that using continue inside a with-statement works
as expected.
"""
with c:
for a in l:
if a == 0:
continue
x += a * c.count
return x
def test_serial(x: torch.Tensor) -> torch.Tensor:
"""
Test two with-statements in a row.
"""
c = Context(1)
with c:
y = x + c.count
with c:
y *= c.count
return y
def test_nested(x: torch.Tensor) -> torch.Tensor:
"""
Test nested with-statements.
"""
c = Context(1)
with c:
with c:
y = x + c.count
y *= c.count
return y
def test_combined(x: torch.Tensor) -> torch.Tensor:
"""
Test a with-statement with multiple with items.
"""
c = Context(1)
d = Context(2)
with c, d:
y = x + (c.count + d.count)
return y
test_input = torch.randn(2, 2)
test_context = Context(2)
test_list = [2, 0, 1, 3, 0, 2]
self.checkScript(test_basic, (test_input,))
self.checkScript(test_pass, (test_input,))
self.checkScript(test_early_return, (test_input, test_context))
self.checkScript(test_break, (test_input, test_context, test_list))
self.checkScript(test_continue, (test_input, test_context, test_list))
self.assertEqual(test_context.count, 2)
self.checkScript(test_serial, (test_input,))
self.checkScript(test_nested, (test_input,))
self.checkScript(test_combined, (test_input,))
def test_with_exceptions(self):
"""
Check that exceptions thrown in the bodies of with-statements are
handled correctly.
"""
@torch.jit.script
class Context(object):
"""
This class implements a basic context manager interface for use in
the unit tests. Unlike Context, the stateful part of this class
is a Tensor that is mutated in-place so that modifications made in the
JIT interpreter are visible outside of it.
"""
def __init__(self, start: int):
self.count = torch.tensor([start], dtype=torch.double)
def __enter__(self):
self.count.add_(0.3)
return self.count
def __exit__(self, type: Any, value: Any, tb: Any):
self.count.sub_(0.3)
make_global(Context)
@torch.jit.script
def method_that_raises() -> torch.Tensor:
raise Exception("raised exception")
@torch.jit.script
def test_exception(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while executing the body of a with-statement.
"""
with c as _:
x += method_that_raises()
return x
@torch.jit.script
def test_exception_nested(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while executing the body of a nested with-statement.
"""
with c as _:
with c as _:
x += method_that_raises()
return x
@torch.jit.script
def with_that_raises(c: Context) -> torch.Tensor:
a = torch.tensor([1])
with c as _:
a += method_that_raises()
return a
@torch.jit.script
def test_exception_fn_call(x: torch.Tensor, c: Context) -> torch.Tensor:
"""
Test the case in which an exception is thrown while there are active with-statements in two different
frames.
"""
with c as _:
x += with_that_raises(c)
return x
c = Context(1)
# checkScript and checkScriptRaisesRegex cannot be used because the string frontend will
# not compile class types (of which Context, the context manager being used for this test
# is one).
with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
test_exception(torch.randn(2), c)
self.assertEqual(c.count, 1)
with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
test_exception_nested(torch.randn(2), c)
self.assertEqual(c.count, 1)
with self.assertRaisesRegexWithHighlight(Exception, r"raised exception", "raise Exception(\"raised exception"):
test_exception_fn_call(torch.randn(2), c)
self.assertEqual(c.count, 1)
def test_with_errors(self):
"""
Check that errors related to with-statements are detected and reported correctly.
"""
@torch.jit.script
class NoEnterNoExit(object):
"""
This class is missing __enter__ and __exit__ methods.
"""
def __init__(self):
self.count = 1
@torch.jit.script
class BadEnter(object):
"""
This class has an __enter__ method with an incorrect signature.
"""
def __init__(self):
self.count = 1
def __enter__(self, incr: int):
self.count += incr
def __exit__(self, type: Any, value: Any, tb: Any):
pass
@torch.jit.script
class BadExit(object):
"""
This class has an __exit__ method with an incorrect signature.
"""
def __init__(self):
self.count = 1
def __enter__(self):
self.count += 1
def __exit__(self, type: Any, value: Any):
pass
@torch.jit.script
class ExitIncorrectTypes(object):
"""
This class has an __exit__ method with unsupported argument types.
"""
def __init__(self):
self.count = 1
def __enter__(self):
self.count += 1
def __exit__(self, type: Any, value: int, tb: int):
pass
def test_no_enter_no_exit(x: torch.Tensor, cm: NoEnterNoExit) -> torch.Tensor:
with cm as _:
pass
return x
def test_bad_enter(x: torch.Tensor, cm: BadEnter) -> torch.Tensor:
with cm as _:
pass
return x
def test_bad_exit(x: torch.Tensor, cm: BadExit) -> torch.Tensor:
with cm as _:
pass
return x
def test_exit_incorrect_types(x: torch.Tensor, cm: ExitIncorrectTypes) -> torch.Tensor:
with cm as _:
pass
return x
def test_enter_without_object():
with "not_object" as obj:
pass
test_tensor = torch.randn(5, dtype=torch.double)
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"does not define __enter__ and __exit__ methods", "cm"
):
self.checkScript(test_no_enter_no_exit, (test_tensor, NoEnterNoExit()))
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"__enter__ must have only one argument and one return value", "cm"
):
self.checkScript(test_bad_enter, (test_tensor, BadEnter()))
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"__exit__ must have four arguments", "cm"
):
self.checkScript(test_bad_exit, (test_tensor, BadExit()))
with self.assertRaisesRegexWithHighlight(
RuntimeError, r"argument 2 of __exit__ must have Any type", "cm"
):
self.checkScript(
test_exit_incorrect_types, (test_tensor, ExitIncorrectTypes())
)
with self.assertRaisesRegexWithHighlight(RuntimeError, r"must return an object", "\"not_object\""):
self.checkScript(test_enter_without_object, ())
def test_with_no_grad(self):
"""
Check that torch.no_grad() works. Most of these are adapted from
corresponding tests for eager-mode no_grad.
"""
# Basic no_grad test.
def test_no_grad(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
w = x + y
return w
s = torch.jit.script(test_no_grad)
x = torch.ones(5, 5, requires_grad=True)
y = torch.ones(5, 5) * 4
w = s(x, y)
self.assertFalse(w.requires_grad)
self.assertRaises(RuntimeError, lambda: w.backward(torch.ones(5, 5)))
self.assertIsNone(w.grad_fn)
# Test assignment of a grad-less Tensor to a Tensor with gradients
# in a no_grad block.
def test_no_grad_assignment(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
x[0] = y
return x
s = torch.jit.script(test_no_grad_assignment)
z = torch.randn(5)
w = s(x, z)
self.assertTrue(w.requires_grad)
self.assertIsNone(w.grad_fn)
# Check that @torch.jit.ignored functions respect no_grad when it is
# called in JIT mode.
class NoGradModule(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.ignore
def adder(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
w = x + y
return w
def forward(self, x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.no_grad():
w = self.adder(x, y)
return w
s = torch.jit.script(NoGradModule())
w = s(x, y)
self.assertFalse(w.requires_grad)
def test_with_record_function(self):
"""
Check that torch.autograd.profiler.record_function context manager is
torchscriptable.
"""
def with_rf(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:
with torch.autograd.profiler.record_function("foo"):
# Nested record_function.
with torch.autograd.profiler.record_function("nested"):
a = x + y
return a
scripted = torch.jit.script(with_rf)
x, y = torch.ones(2), torch.ones(2)
with torch.autograd.profiler.profile() as p:
scripted(x, y)
# Need to call below to populate CPU children.
p.key_averages()
function_events = p.function_events
# Event with name "foo" should be recorded.
rf_events = [evt for evt in function_events if evt.name == "foo"]
self.assertEqual(len(rf_events), 1)
rf_event = rf_events[0]
child_events = rf_event.cpu_children
# Ensure we find nested record_function event
self.assertTrue("nested" in (child.name for child in child_events))
nested_function_event = [
evt for evt in function_events if evt.name == "nested"
][0]
# Nested record function should have child "aten::add"
nested_child_events = nested_function_event.cpu_children
self.assertTrue("aten::add" in (child.name for child in nested_child_events))
|
pytorch-master
|
test/jit/test_with.py
|
# Owner(s): ["oncall: jit"]
from itertools import product
from typing import Tuple
from unittest.case import expectedFailure
import torch
from torch import complex32, float32, float64, int32, int64
from torch.jit._passes import _property_propagation
from torch.testing._internal.common_methods_invocations import (
SampleInput,
sample_inputs_adaptive_avg_pool2d,
sample_inputs_conv2d,
)
from torch.testing._internal.common_utils import set_default_dtype, first_sample
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.jit_metaprogramming_utils import create_traced_fn
from torch.testing._internal.common_device_type import (
ops,
instantiate_device_type_tests,
)
from torch.testing._internal.common_methods_invocations import op_db
"""
Dtype Analysis relies on symbolic shape analysis, which is still in beta
"""
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
custom_rules_works_list = {
"nn.functional.adaptive_avg_pool1d",
"nn.functional.adaptive_avg_pool2d",
"nn.functional.adaptive_avg_pool3d",
"nn.functional.adaptive_max_pool1d",
"nn.functional.adaptive_max_pool2d",
"avg_pool1d",
"avg_pool3d",
"conv_transpose2d",
"conv1d",
"conv2d",
"hardswish",
"avg_pool2d",
"max_pool1d",
"max_pool2d",
"max_pool3d",
"nn.functional.prelu",
"batch_norm",
"batch_norm",
}
custom_rules_expected_failure_list = {
# create_traced_fn generates prim::NumToTensor nodes in graph (not supported yet)
"nn.functional.adaptive_max_pool3d",
}
# These ops seem to not be in opinfos
custom_rules_not_tested_list = [
"conv3d",
"conv_tbc",
"conv_transpose1d",
"conv_transpose3d",
"convolution",
"_convolution",
"max_unpool2d",
"max_unpool3d",
"reflection_pad1d",
"reflection_pad2d",
"reflection_pad3d",
"replication_pad1d",
"replication_pad2d",
"replication_pad3d",
"upsample_bilinear2d",
"upsample_linear1d",
"upsample_nearest1d",
"upsample_nearest2d",
"upsample_nearest3d",
"upsample_trilinear3d",
"flatten",
]
class TestDtypeBase(JitTestCase):
SCALAR = "SCALAR" # To mark unary vs 0 dim tensor
def setUp(self):
self.prev_symbolic_shapes_test_enabled = (
torch._C._jit_symbolic_shapes_test_mode_enabled()
)
torch._C._jit_set_symbolic_shapes_test_mode(True)
def tearDown(self):
torch._C._jit_set_symbolic_shapes_test_mode(
self.prev_symbolic_shapes_test_enabled
)
@staticmethod
def node_output_dtypes(graph):
dtypes = []
for out in graph.outputs():
if isinstance(out.type(), torch._C.TensorType):
dtypes.append(out.type().dtype())
else:
dtypes.append(None)
return dtypes
@staticmethod
def node_output_dtype_single(graph):
dtypes = TestDtypeBase.node_output_dtypes(graph)
assert len(dtypes) == 1
return dtypes[0]
def prop_dtype_on_graph(self, graph, example_inputs):
# We need to clear shape information because torch.jit.script
# will return a cached graph if the function is scripted twice.
torch._C._jit_pass_erase_shape_information(graph)
_property_propagation.apply_input_props_using_example(graph, example_inputs)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
torch._C._jit_pass_propagate_dtype(graph)
def assert_dtype_equal(self, fn, in_shapes, in_dtypes):
inputs = [self.get_rand_tensor(s, d) for s, d in zip(in_shapes, in_dtypes)]
try:
self.assert_dtype_equal_custom_args(fn, inputs)
except Exception:
fail_text = f"Failed for shapes {in_shapes}, and dtypes {in_dtypes}"
raise AssertionError(fail_text)
def assert_dtype_equal_custom_args(self, fn, args):
try:
# Eager execution
expected_res = fn(*args)
except RuntimeError as e:
return
expected_dtype = expected_res.dtype
# Run the Dtype Analysis
graph = torch.jit.script(fn).graph # Note this is a cached graph
self.prop_dtype_on_graph(graph, args)
actual_dtype = self.node_output_dtype_single(graph)
self.assertEqual(actual_dtype, expected_dtype, "Failed Verification")
def get_rand_tensor(self, shape, dtype):
if shape is self.SCALAR:
if dtype is float32:
return 1.1
elif dtype is int64:
return 2
else:
raise RuntimeError(
"Testing of scalars only supported for fp32 and int64"
)
if dtype in (int32, int64):
rand_tensor = torch.randint(0, 10, shape, dtype=dtype)
else:
rand_tensor = torch.rand(shape, dtype=dtype)
# Sanity check!
self.assertEqual(rand_tensor.dtype, dtype)
return rand_tensor
class TestDtypeAnalysis(TestDtypeBase):
def test_unary(self):
# Testing the Unary Implementation that uses metatensors
def relu_inplace(x):
return x.relu_()
def log(x):
return torch.log(x)
functions = [relu_inplace, log]
input_shapes = [
((2, 2),), # Simple Case
((0, 2),), # Size 0 Tensor
((),), # zerodim
]
input_dtypes = [
(float32,), # Simple Case
(int64,), # Test how some unary ops implicitly convert to float
(complex32,), # Show we can handle complex vals as well
]
for fn, in_shapes, in_dtypes in product(functions, input_shapes, input_dtypes):
self.assert_dtype_equal(fn, in_shapes, in_dtypes)
def test_binary_tensors(self):
# Testing using Metatensors
def add(x, y):
return x + y
def div(x, y):
return x / y
functions = [add, div]
input_shapes = [
((1, 1, 2), (1, 2)), # Different Dim, non-zerodim
((), (1, 2)), # One zerodim
((1, 2), ()), # Other zerodim
((2, 0, 3), (1, 3)), # Test a tensor with a dim of 0
((), ()), # both zerodim
]
input_dtypes = [
(float32, float32), # Simple Case
(int32, int64), # Size Promotion (compliated case for 0dim tensors)
(float32, int32), # type Promotion
(int64, float32), # Type promotion with size change
(float64, complex32), # Show we can handle complex vals as well
]
for fn, in_shapes, in_dtypes in product(functions, input_shapes, input_dtypes):
self.assert_dtype_equal(fn, in_shapes, in_dtypes)
def test_binary_scalar(self):
# Test the mixing of scalar and non-scalar args
input_shapes = [
((2, 2), self.SCALAR), # Non-Zerodim vs scalar
((), self.SCALAR), # Zerodim vs scalar
# Scalar vs Scalar is automatically inferred.
]
input_dtypes = [
(float32, float32), # Simple Case
(int32, int64), # Size Promotion (compliated case for 0dim tensors)
(int32, float32), # type Promotion
]
with set_default_dtype(float32):
for in_shapes, in_dtypes in product(input_shapes, input_dtypes):
scalar_type = in_dtypes[1]
if scalar_type == float32:
def add(x, y: float):
return x + y
else:
def add(x, y: int):
return x + y
self.assert_dtype_equal(add, in_shapes, in_dtypes)
def test_custom_rules(self):
# Test some of the ops that are not covered by Metatensors
# Note that unlike the Conv2d module, the function conv2d
# does not take dtype/device arguments.
def conv2d_fn(input, weight, bias):
return torch.nn.functional.conv2d(input, weight, bias)
def adaptive_avg_pool2d_fn(input, output_size: Tuple[int]):
return torch._C._nn.adaptive_avg_pool2d(input, output_size)
for fn, inputs_fn in (
(conv2d_fn, sample_inputs_conv2d),
(adaptive_avg_pool2d_fn, sample_inputs_adaptive_avg_pool2d),
):
for dtype in (torch.int8, torch.float64):
# Gets default version for conv2d
sample_input: SampleInput = list(inputs_fn(None, "cpu", dtype, False))[-1]
input_args = [sample_input.input, *sample_input.args]
self.assert_dtype_equal_custom_args(fn, input_args)
def test_conv_no_mixed_args(self):
def conv2d_fn(input, weight, bias):
return torch.nn.functional.conv2d(input, weight, bias)
# Now make sure that conv2d doesn't support mixed args
conv_ins = sample_inputs_conv2d(None, "cpu", torch.float, False)
conv_in = list(conv_ins)[-1]
weight, bias = conv_in.args
weight = weight.type(torch.long)
with self.assertRaises(RuntimeError):
conv2d_fn(conv_in.input, weight, bias)
# Check that we also don't propagate
graph = torch.jit.script(conv2d_fn).graph # Note this is a cached graph
self.prop_dtype_on_graph(graph, [conv_in.input, weight, bias])
actual_dtype = self.node_output_dtype_single(graph)
self.assertEqual(actual_dtype, None)
def test_combined(self):
# Test a case with both custom rules and metatensors
def func(input, weight, bias, y):
conv_out = torch.nn.functional.conv2d(input, weight, bias)
conv_2 = conv_out + y
flattened = torch.flatten(conv_2, start_dim=2)
add_res = flattened + y
return add_res
conv_ins = sample_inputs_conv2d(None, "cpu", torch.int8, False)
conv_in = list(conv_ins)[-1]
y_val = torch.rand((1,), dtype=torch.float32)
input_args = [conv_in.input, *conv_in.args, y_val]
self.assert_dtype_equal_custom_args(func, input_args)
class TestDtypeCustomRules(TestDtypeBase):
def assert_output_dtype_equal(self, expected_res, prop_graph):
actual_dtype = self.node_output_dtypes(prop_graph)
if len(actual_dtype) == 1:
# For len=1, there is no tuple packing for expected_res.
self.assert_tensor_dtype_equal(expected_res, actual_dtype[0])
else:
self.assertEqual(len(expected_res), len(actual_dtype))
for expected, actual in zip(expected_res, actual_dtype):
self.assert_tensor_dtype_equal(expected, actual)
def assert_tensor_dtype_equal(self, tensor_output, graph_dtype):
if not isinstance(tensor_output, torch.Tensor):
return
self.assertEqual(tensor_output.dtype, graph_dtype)
def custom_rules_test_base(self, device, dtype, op, allow_eager_fail=False):
try:
samples = op.sample_inputs(device, dtype, requires_grad=False)
sample_input = first_sample(self, samples)
input_args = [sample_input.input, *sample_input.args]
expected_res = op(*input_args, **sample_input.kwargs)
except Exception as e:
if allow_eager_fail:
return
else:
raise e
func = op.get_op()
traced_fn = create_traced_fn(self, func)
# Have to run the traced function to actually generate the trace
traced_fn(sample_input.input, *sample_input.args, **sample_input.kwargs)
# Run the Dtype Analysis
graph = traced_fn.graph # Note this is a cached graph
input_tensors = [t for t in input_args if isinstance(t, torch.Tensor)]
input_tensors += [v for v in sample_input.kwargs.values() if isinstance(v, torch.Tensor)]
self.prop_dtype_on_graph(graph, input_tensors)
self.assert_output_dtype_equal(expected_res, graph)
@ops([op for op in op_db if op.aten_name in custom_rules_works_list])
def test_custom_rules(self, device, dtype, op):
self.custom_rules_test_base(device, dtype, op)
@ops([op for op in op_db if op.aten_name in custom_rules_works_list])
def test_custom_rules_ints(self, device, dtype, op):
# This is done because opinfos currently only runs on floats.
# Return fn, inputs_fn for all
if dtype == torch.float32:
dtype = torch.int32
else:
dtype = torch.int64
# Because ints are not always implemented, we need to allow for eager to fail
self.custom_rules_test_base(device, dtype, op, allow_eager_fail=True)
@expectedFailure
@ops([op for op in op_db if op.aten_name in custom_rules_expected_failure_list])
def test_custom_rules_expected_failure(self, device, dtype, op):
self.custom_rules_test_base(device, dtype, op)
TestDtypeCustomRulesCPU = None
# This creates TestDtypeCustomRulesCPU
instantiate_device_type_tests(TestDtypeCustomRules, globals(), only_for=("cpu",))
|
pytorch-master
|
test/jit/test_dtype_analysis.py
|
# Owner(s): ["oncall: jit"]
import io
import torch
import unittest
from torch.testing._internal.common_utils import IS_WINDOWS, TEST_MKL
from torch.testing._internal.jit_utils import JitTestCase
class TestSparse(JitTestCase):
def test_freeze_sparse_coo(self):
class SparseTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.rand(3, 4).to_sparse()
self.b = torch.rand(3, 4).to_sparse()
def forward(self, x):
return x + self.a + self.b
x = torch.rand(3, 4).to_sparse()
m = SparseTensorModule()
unfrozen_result = m.forward(x)
m.eval()
frozen = torch.jit.freeze(torch.jit.script(m))
frozen_result = frozen.forward(x)
self.assertEqual(unfrozen_result, frozen_result)
buffer = io.BytesIO()
torch.jit.save(frozen, buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
loaded_result = loaded_model.forward(x)
self.assertEqual(unfrozen_result, loaded_result)
def test_serialize_sparse_coo(self):
class SparseTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.rand(3, 4).to_sparse()
self.b = torch.rand(3, 4).to_sparse()
def forward(self, x):
return x + self.a + self.b
x = torch.rand(3, 4).to_sparse()
m = SparseTensorModule()
expected_result = m.forward(x)
buffer = io.BytesIO()
torch.jit.save(torch.jit.script(m), buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
loaded_result = loaded_model.forward(x)
self.assertEqual(expected_result, loaded_result)
@unittest.skipIf(IS_WINDOWS or not TEST_MKL, "Need MKL to run CSR matmul")
def test_freeze_sparse_csr(self):
class SparseTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.rand(4, 4).to_sparse_csr()
self.b = torch.rand(4, 4).to_sparse_csr()
def forward(self, x):
return x.matmul(self.a).matmul(self.b)
x = torch.rand(4, 4).to_sparse_csr()
m = SparseTensorModule()
unfrozen_result = m.forward(x)
m.eval()
frozen = torch.jit.freeze(torch.jit.script(m))
frozen_result = frozen.forward(x)
self.assertEqual(unfrozen_result.to_dense(), frozen_result.to_dense())
buffer = io.BytesIO()
torch.jit.save(frozen, buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
loaded_result = loaded_model.forward(x)
self.assertEqual(unfrozen_result.to_dense(), loaded_result.to_dense())
@unittest.skipIf(IS_WINDOWS or not TEST_MKL, "Need MKL to run CSR matmul")
def test_serialize_sparse_csr(self):
class SparseTensorModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = torch.rand(4, 4).to_sparse_csr()
self.b = torch.rand(4, 4).to_sparse_csr()
def forward(self, x):
return x.matmul(self.a).matmul(self.b)
x = torch.rand(4, 4).to_sparse_csr()
m = SparseTensorModule()
expected_result = m.forward(x)
buffer = io.BytesIO()
torch.jit.save(torch.jit.script(m), buffer)
buffer.seek(0)
loaded_model = torch.jit.load(buffer)
loaded_result = loaded_model.forward(x)
self.assertEqual(expected_result.to_dense(), loaded_result.to_dense())
|
pytorch-master
|
test/jit/test_sparse.py
|
# Owner(s): ["oncall: jit"]
from itertools import product
import unittest
import torch
from torch.testing._internal.common_utils import TEST_CUDA
from torch.testing._internal.jit_utils import JitTestCase
from torch.jit._passes._property_propagation import apply_input_props_using_example
try:
from torchvision import models
except ImportError:
models = None
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestDeviceAnalysis(JitTestCase):
@classmethod
def setUpClass(cls):
cls.cpu = torch.device("cpu")
cls.cuda = torch.device("cuda")
cls.vulkan = torch.device("vulkan")
cls.mkldnn = torch.device(
"mkldnn"
) # MKLDNN can't mix with other device types at all
cls.device_types = [cls.cpu, cls.cuda, cls.vulkan]
@staticmethod
def node_output_device(graph):
graph_out = list(graph.outputs())
assert len(graph_out) == 1
return graph_out[0].type().device()
def prop_device_on_graph(self, graph, example_devices, in_shapes=None):
graph_inputs = list(graph.inputs())
torch._C._jit_pass_erase_shape_information(graph)
self.assertEqual(len(graph_inputs), len(example_devices))
for graph_i, device_i in zip(graph_inputs, example_devices):
if device_i is not None:
graph_i.setType(graph_i.type().with_device(device_i))
if in_shapes:
for graph_i, shapes_i in zip(graph_inputs, in_shapes):
if shapes_i is not None:
graph_i.setType(graph_i.type().with_sizes(shapes_i))
torch._C._jit_pass_propagate_shapes_on_graph(graph)
torch._C._jit_pass_propagate_device(graph)
def assert_device_equal(
self, fn, in_devices, expected_device, in_shapes=None, subtest_str=""
):
with self.subTest(
f"In device: {in_devices}, expected: {expected_device}, \n {subtest_str}"
):
graph = torch.jit.script(fn).graph
self.prop_device_on_graph(graph, in_devices, in_shapes)
actual_device = self.node_output_device(graph)
if expected_device is None or actual_device is None:
self.assertEqual(actual_device, expected_device)
else:
self.assertEqual(
actual_device.type, expected_device.type, "Failed Verification"
)
def test_device_apply(self):
# Test if the device is properly applied to the input
def add_self(x):
return x + x
graph = torch.jit.script(add_self).graph
graph_input = next(graph.inputs())
graph_input.setType(graph_input.type().with_device(self.cpu))
# self.prop_device_on_graph(graph, [self.cpu])
self.assertEqual(graph_input.type().device(), self.cpu)
@unittest.skipIf(models is None, "Requires torchvision")
def test_mobilenet(self):
in_cpu = torch.randn(1, 3, 224, 224, device=self.cpu)
in_example = in_cpu
expected_device = self.cpu
m = torch.jit.script(models.mobilenet_v3_small())
m.eval()
graph = torch.jit.freeze(m).graph
# torch._C._jit_pass_erase_shape_information(graph)
apply_input_props_using_example(graph, in_example)
torch._C._jit_pass_propagate_shapes_on_graph(graph)
torch._C._jit_pass_propagate_device(graph)
actual_device = self.node_output_device(graph)
if expected_device is None or actual_device is None:
self.assertEqual(actual_device, expected_device)
else:
self.assertEqual(
actual_device.type, expected_device.type, "Failed Verification"
)
def test_simple(self):
def add_self(x):
return x + x
def relu_(x):
return torch.nn.functional.relu_(x)
functions = [add_self, relu_]
for in_device, fn in product(self.device_types, functions):
self.assert_device_equal(fn, [in_device], in_device)
def test_set_dtype(self):
def set_device(x):
return x.to("cpu")
for in_device in self.device_types:
self.assert_device_equal(set_device, [in_device], self.cpu)
def test_device_arg(self):
# Test that no device gets propagated when arg is passed in
def set_device(x, device_name: torch.device):
return x.to(device=device_name)
for in_device in self.device_types:
self.assert_device_equal(set_device, [in_device, None], None)
def test_tensor_as_fns(self):
def view_as_fn(x, y):
return x.view_as(y)
def expand_as_fn(x, y):
return x.expand_as(y)
def reshape_as_fn(x, y):
return x.reshape_as(y)
for test_fn in [view_as_fn, expand_as_fn, reshape_as_fn]:
self.assert_device_equal(test_fn, [self.cpu, self.cpu], self.cpu)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, self.mkldnn], None)
def type_as_fn(x, y):
return x.type_as(y)
self.assert_device_equal(type_as_fn, [self.cpu, self.cpu], self.cpu)
self.assert_device_equal(type_as_fn, [self.cuda, None], None)
self.assert_device_equal(type_as_fn, [None, self.mkldnn], self.mkldnn)
def zerodim_test_core(self, device_pairs):
# Test the support of zerodim tensors with non-zerodim tensors
def mul(x, y):
return x * y
def add(x, y):
return x + y
fns = [mul, add]
input_shapes = [
((1, 2, 2), (2, 2)), # Different dim, non-zerodim
((1, 2, 2), ()), # one zerodim
((), ()), # both zerodim
]
for fn, shapes, devices in product(fns, input_shapes, device_pairs):
subtest_str = f"{fn.__name__} \n shapes: {shapes}, \n devices: {devices}"
in0 = torch.rand(shapes[0], device=devices[0])
in1 = torch.rand(shapes[1], device=devices[1])
try:
out = fn(in0, in1)
except Exception as e:
# Don't expect eager failures for CPU zerodim tensors
for i in range(len(devices)):
if shapes[i] == () and devices[i] == self.cpu:
raise e
# only expect eager failures on different devices
if devices[0] == devices[1]:
raise e
# Expect result device to be None for the failure cases.
self.assert_device_equal(fn, devices, None, shapes, subtest_str)
continue
self.assert_device_equal(fn, devices, out.device, shapes, subtest_str)
# Test that without shapes, we either get the same device or None for the device
# Aka that the code is convservative for tensor shapes.
graph = torch.jit.script(fn).graph
self.prop_device_on_graph(graph, devices)
actual_device = self.node_output_device(graph)
self.assertTrue(
(actual_device is None) or (actual_device.type == out.device.type)
)
def test_zerodim_cpu(self):
# Allow for minimal testing locally
self.zerodim_test_core([(self.cpu, self.cpu)])
def test_zerodim_no_device(self):
# If device is missing, you should never be able to infer device type.
def mul(x, y):
return x * y
def add(x, y):
return x + y
fns = [mul, add]
device_pairs = [
(self.cpu, None),
(None, self.cpu),
(None, None),
]
input_shapes = [
((1, 2, 2), (2, 2)), # Different dim, non-zerodim
((1, 2, 2), ()), # one zerodim
((), ()), # both zerodim
]
for fn, shapes, devices in product(fns, input_shapes, device_pairs):
self.assert_device_equal(fn, devices, None, shapes)
@unittest.skipIf(not TEST_CUDA, "No CUDA")
def test_zerodim_gpu(self):
device_pairs = [
(self.cpu, self.cuda),
(self.cuda, self.cpu),
(self.cuda, self.cuda),
]
self.zerodim_test_core(device_pairs)
def test_custom_device_op(self):
# Test both of the custom functions and check that the devicetype is
# correctly applied
def set_cuda(x):
return x.cuda()
def set_cpu(x):
return x.cpu()
def set_mkldnn(x):
return x.to_mkldnn()
device_pairs = (
(set_cuda, self.cuda),
(set_cpu, self.cpu),
(set_mkldnn, self.mkldnn),
)
for fn, out_device in device_pairs:
for in_device in self.device_types:
self.assert_device_equal(fn, [in_device], out_device)
def test_device_if_propagation(self):
def test_fn(x, y, z: bool):
if z:
return x + 3
else:
return y * 2
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.mkldnn, self.mkldnn, None], self.mkldnn)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None], None)
def test_loop_simple(self):
def test_fn(x, y, z: int):
for _ in range(z):
y = x
return y
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None], None)
self.assert_device_equal(test_fn, [self.cpu, None, None], None)
def test_loop_device_change(self):
def test_fn(x, z: int):
for _ in range(z):
x = x.cuda()
return x
self.assert_device_equal(test_fn, [self.cpu, None], None)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, None], None)
def test_while_change(self):
def test_fn(x, z: int):
while z > 0:
x = x.cuda()
z = 0
return x
self.assert_device_equal(test_fn, [self.cpu, None], None)
self.assert_device_equal(test_fn, [self.cuda, None], self.cuda)
self.assert_device_equal(test_fn, [None, None], None)
def test_nested_loops(self):
def test_fn(x, z: int):
for i in range(z):
x = x.cpu()
for _ in range(i):
x = x + 1
return x
self.assert_device_equal(test_fn, [self.cpu, None], self.cpu)
self.assert_device_equal(test_fn, [self.cuda, None], None)
self.assert_device_equal(test_fn, [None, None], None)
def test_if_loop_mix(self):
def test_fn(x, y, z: bool, a: bool):
c = x
while a:
if z:
c = x + 3
else:
c = y * 2
a = False
return c
self.assert_device_equal(test_fn, [self.cpu, self.cpu, None, None], self.cpu)
self.assert_device_equal(
test_fn, [self.mkldnn, self.mkldnn, None, None], self.mkldnn
)
self.assert_device_equal(test_fn, [self.cpu, self.cuda, None, None], None)
|
pytorch-master
|
test/jit/test_device_analysis.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import sys
import torch
import torch.nn as nn
from typing import Any, Tuple
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _inline_everything
from typing import List
from torch import Tensor
class TestAsync(JitTestCase):
def test_async_python(self):
@torch.jit.script
def foo(x):
return torch.neg(x)
x = torch.rand(3, 4)
fut = torch.jit.fork(foo, x)
y_hat = foo(x)
y = torch.jit.wait(fut)
# assert nothing; only to make sure the fake python path works
def test_async_future_type_python(self):
def foo(inp):
futures = torch.jit.annotate(List[torch.jit.Future[torch.Tensor]], [])
for i in range(5):
futures.append(torch.jit.fork(lambda x: x, inp))
all_outputs = []
for future in futures:
all_outputs.append(torch.jit.wait(future))
return all_outputs
# assert nothing, just to make sure python type parsing works
foo(torch.randn(3, 4))
def test_async_parsing(self):
@torch.jit.script
def foo(x: Tensor) -> List[Tensor]:
return [torch.neg(x), x.t()]
@torch.jit.script
def bar(x):
futures = torch.jit.annotate(List[Future[List[Tensor]]], [])
for _ in range(3):
future = torch.jit.annotate(
Future[List[Tensor]],
torch.jit.fork(foo, x)
)
futures.append(future)
output = torch.jit.annotate(List[List[Tensor]], [])
for i in range(3):
output.append(torch.jit.wait(futures[i]))
return output
x = torch.rand(3, 3)
result = bar(x)
self.assertEqual(len(result), 3)
def test_async_script(self):
@torch.jit.script
def foo(x):
return torch.neg(x), x
x = torch.rand(3, 4)
@torch.jit.script
def wait_script(x):
fut = torch.jit.fork(foo, x)
y_hat = foo(x)
y = torch.jit.wait(fut)
return y, y_hat
y, y_hat = wait_script(x)
self.assertEqual(y, y_hat)
def test_async_script_capture(self):
class Mod(torch.jit.ScriptModule):
__constants__ = ['const']
def __init__(self):
super(Mod, self).__init__()
self.const = 42
self.param = nn.Parameter(torch.randn(2, 2))
@torch.jit.script_method
def foo(self, x1, x2):
return torch.neg(x1), self.param, self.const, torch.neg(x2), self.param
@torch.jit.script_method
def forward(self, x1, x2):
fut = torch.jit.fork(self.foo, x1, x2)
y_hat = self.foo(x1, x2)
y = torch.jit.wait(fut)
return y, y_hat
x1 = torch.rand(3, 4)
x2 = torch.rand(5, 6)
m = Mod()
with torch.jit.optimized_execution(False):
y, y_hat = m.forward(x1, x2)
self.assertEqual(y, y_hat)
def test_async_script_nested(self):
@torch.jit.script
def foo(x):
return torch.neg(x), x
x = torch.rand(3, 4)
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
y_hat = foo(x)
y = torch.jit._wait(fut)
return y, y_hat
@torch.jit.script
def wait_script_nest(x):
fut = torch.jit._fork(wait_script, x)
return torch.jit._wait(fut)
y, y_hat = wait_script_nest(x)
self.assertEqual(y, y_hat)
def test_async_script_no_script_mod(self):
x = torch.rand(3, 4)
with self.assertRaisesRegexWithHighlight(RuntimeError, 'cannot call a value', 'torch.jit._fork(x'):
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(x)
return fut
def test_async_script_multi_waits(self):
@torch.jit.script
def foo(x):
return torch.neg(x).t() + x
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
# wait twice on the same future
y1 = torch.jit._wait(fut)
y2 = torch.jit._wait(fut)
return y1, y2
x = torch.rand(2, 2)
y1, y2 = wait_script(x)
self.assertEqual(y1, y2)
def test_async_script_multi_forks(self):
@torch.jit.script
def foo1(x):
return torch.neg(x).t() + x
@torch.jit.script
def foo2(x, y):
return torch.neg(x).t() + x + torch.neg(y).t()
@torch.jit.script
def foo3(x, y, z):
return torch.neg(z).t() + y.t() + x
x1 = torch.rand(10, 10)
x2 = torch.rand(10, 10)
x3 = torch.rand(10, 10)
@torch.jit.script
def wait_script(x1, x2, x3):
f1 = torch.jit._fork(foo1, x1)
f2 = torch.jit._fork(foo2, x1, x2)
f3 = torch.jit._fork(foo3, x1, x2, x3)
f4 = torch.jit._fork(foo1, x2)
f5 = torch.jit._fork(foo2, x2, x3)
# ignore some forks
y1 = torch.jit._wait(f1)
y2 = torch.jit._wait(f2)
y3 = torch.jit._wait(f3)
return y1, y2, y3
y1, y2, y3 = wait_script(x1, x2, x3)
self.assertEqual(y1, foo1(x1))
self.assertEqual(y2, foo2(x1, x2))
self.assertEqual(y3, foo3(x1, x2, x3))
def test_async_kwargs(self):
def foo(x1, x2):
return 2 * x1 + x2
x1 = torch.rand(3, 4)
x2 = torch.rand(3, 4)
y_hat = foo(x1, x2)
# Cover tracing and bare functions with permutations of args, kwargs
for func in [
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1, x2)),
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1, x2=x2)),
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x1=x1, x2=x2)),
lambda x1, x2: torch.jit._wait(torch.jit._fork(foo, x2=x2, x1=x1))
]:
for wrapper in [
func,
torch.jit.trace(func, (x1, x2)),
]:
self.assertEqual(wrapper(x1, x2), y_hat)
self.assertEqual(wrapper(x1, x2=x2), y_hat)
self.assertEqual(wrapper(x1=x1, x2=x2), y_hat)
self.assertEqual(wrapper(x2=x2, x1=x1), y_hat)
# Cover scripting
@torch.jit.script
def foo_script_args(x1, x2):
return torch.jit._wait(torch.jit._fork(foo, x1, x2))
@torch.jit.script
def foo_script_kwargs(x1, x2):
return torch.jit._wait(torch.jit._fork(foo, x1=x1, x2=x2))
for wrapper in [
foo_script_args,
foo_script_kwargs,
]:
self.assertEqual(wrapper(x1, x2), y_hat)
self.assertEqual(wrapper(x1, x2=x2), y_hat)
self.assertEqual(wrapper(x1=x1, x2=x2), y_hat)
self.assertEqual(wrapper(x2=x2, x1=x1), y_hat)
@_inline_everything
def test_async_script_trace(self):
class Traced(nn.Module):
def __init__(self):
super(Traced, self).__init__()
def forward(self, x):
return (torch.neg(x), x)
class Mod(torch.jit.ScriptModule):
def __init__(self):
super(Mod, self).__init__()
x = torch.rand(3, 3)
self.traced = torch.jit.trace(Traced(), (x), _force_outplace=True)
@torch.jit.script_method
def forward(self, x: Tensor) -> Tuple[List[Tensor], Tuple[Tensor, Tensor], Tensor]:
future1 = torch.jit._fork(self.traced, x)
future2 = torch.jit._fork(torch.neg, x)
tensor_tuple = torch.jit._wait(future1)
tensor_single = torch.jit._wait(future2)
tensor_list = []
tensor_list.append(tensor_tuple[0])
tensor_list.append(tensor_single)
# return a nested structure of tensors
return (tensor_list, tensor_tuple, tensor_tuple[1])
class TupleCl(nn.Module):
def __init__(self):
super(TupleCl, self).__init__()
self.module = Mod()
def forward(self, x):
z = torch.neg(x)
y = self.module(x)
list = [z, y[0][0], y[0][1], y[1][0], y[1][1], y[2]]
return tuple(list)
x = torch.rand(3, 3)
module = torch.jit.trace(TupleCl(), (x), _force_outplace=True)
# Make sure we have forks
self.assertGraphContainsExactly(module.graph, kind='prim::fork', num_kind_nodes=2)
# Make sure 1 ::neg is in the root graph and 2 ::negs are in the subgraphs
self.assertGraphContainsExactly(module.graph, kind='aten::neg', num_kind_nodes=1)
self.assertGraphContainsExactly(module.graph, kind='aten::neg', num_kind_nodes=3, consider_subgraphs=True)
y = torch.neg(x)
self.assertEqual(module(x), (y, y, y, y, x, x))
def test_async_script_error(self):
x = torch.rand(3, 4)
@torch.jit.script
def foo(x):
# error here
return x.t() + x
@torch.jit.script
def wait_script(x):
fut = torch.jit._fork(foo, x)
return torch.jit._wait(fut)
@torch.jit.script
def wait_script_nest(x):
fut = torch.jit._fork(wait_script, x)
return torch.jit._wait(fut)
# no future
error_msg = 'The size.*must match the size of tensor'
with self.assertRaisesRegexWithHighlight(Exception, error_msg, 'x.t() + x'):
foo(x)
# one future
with self.assertRaisesRegexWithHighlight(Exception, error_msg, 'torch.jit._fork(foo, x'):
wait_script(x)
# two futures with a different error
x = torch.rand(3, 4, 5)
with self.assertRaisesRegexWithHighlight(Exception,
'expects a tensor with <= 2 dimensions',
'torch.jit._fork(wait_script, x'):
wait_script_nest(x)
def test_async_grad_guard_with_grad(self):
@torch.jit.script
def foo(x):
y = x * 2
return y.requires_grad
@torch.jit.script
def bar(x):
fut = torch.jit._fork(foo, x)
requires_grad_in_fork = torch.jit._wait(fut)
z = x * 2
return (requires_grad_in_fork, z.requires_grad)
x = torch.randn(3, requires_grad=True)
with torch.enable_grad():
(inside_fork, after_wait) = bar(x)
self.assertEqual(inside_fork, True)
self.assertEqual(after_wait, True)
def test_async_grad_guard_no_grad(self):
@torch.jit.script
def foo(x):
y = x * 2
return y.requires_grad
@torch.jit.script
def bar(x):
fut = torch.jit._fork(foo, x)
requires_grad_in_fork = torch.jit._wait(fut)
z = x * 2
return (requires_grad_in_fork, z.requires_grad)
x = torch.randn(3, requires_grad=True)
with torch.no_grad():
(inside_fork, after_wait) = bar(x)
self.assertEqual(inside_fork, False)
self.assertEqual(after_wait, False)
def test_trace_fork_wait(self):
def fork_body(x):
return x.neg(), x.neg() + 1
def fn(x):
fut = torch.jit._fork(fork_body, x)
vals = torch.jit._wait(fut)
return vals[0], vals[1], x - 1
traced = torch.jit.trace(fn, (torch.rand(3, 4),))
x = torch.rand(3, 4)
self.assertEqual(fn(x), traced(x))
self.assertGraphContainsExactly(traced.graph, kind='prim::fork', num_kind_nodes=1)
self.assertGraphContainsExactly(traced.graph, kind='aten::wait', num_kind_nodes=1)
self.assertGraphContainsExactly(traced.graph, kind='aten::neg', num_kind_nodes=2, consider_subgraphs=True)
def test_trace_fork_wait_leaking(self):
my_list = []
def fork_body(x):
my_list.append(x + 1)
return x + 1
def fn(x):
fut = torch.jit._fork(fork_body, x)
val = torch.jit._wait(fut)
return my_list[0]
with self.assertRaisesRegexWithHighlight(RuntimeError, 'did not have observable data dependence with trace inputs; '
'this probably indicates your program cannot be understood '
'by the tracer.', ''):
traced = torch.jit.trace(fn, (torch.rand(3, 4),), check_trace=False)
def test_trace_fork_wait_inline(self):
def fork_body(x):
return x + 1, x + 2
def fn(x):
fut = torch.jit._fork(fork_body, x)
val = torch.jit._wait(fut)
return val[1]
traced = torch.jit.trace(fn, (torch.rand(3, 4),))
torch._C._jit_pass_inline_fork_wait(traced.graph)
self.assertGraphContainsExactly(traced.graph, kind='prim::fork', num_kind_nodes=0)
self.assertGraphContainsExactly(traced.graph, kind='aten::wait', num_kind_nodes=0)
self.assertGraphContainsExactly(traced.graph, kind='aten::add', num_kind_nodes=2)
def test_trace_fork_wait_inline_onnx(self):
def fork_body(x):
return torch.neg(x), torch.neg(x)
class MyMod(torch.nn.Module):
def forward(self, x):
fut = torch.jit._fork(fork_body, x)
val = torch.jit._wait(fut)
return val[1]
# smoke test for ONNX export
f = io.BytesIO()
torch.onnx.export(MyMod(), (torch.rand(3, 4),), f)
def test_trace_fork_wait_list_modulecalls(self):
def add_one(input):
return input + torch.ones(input.size())
class TestListFutureModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
input_list = []
for i in range(3):
input_list.append(input)
fut_list: List[Future[torch.Tensor]] = []
for input_tensor in input_list:
fut_list.append(torch.jit._fork(add_one, input_tensor))
# return list[future[tensor]] here to ensure tracing
# module calls return the correct types
return fut_list
class TestModuleWrapper(nn.Module):
def __init__(self):
super().__init__()
self.list_fut_mod = TestListFutureModule()
def forward(self, input):
fut_list = self.list_fut_mod(input)
res = input
for fut in fut_list:
res = res + fut.wait()
return res
self.checkTrace(TestModuleWrapper(), (torch.randn(5, 5),))
def test_trace_modulecalls_with_different_output_types(self):
def add_one(input):
return input + torch.ones(input.size())
class DifferentOutputModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, input):
fut_res = torch.jit._fork(add_one, (input))
# return different types from module call
return input, fut_res
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.gen_output = DifferentOutputModule()
def forward(self, input):
res, fut_res = self.gen_output(input)
res = res + fut_res.wait()
return res
self.checkTrace(TestModule(), (torch.randn(5, 5),))
def test_no_future_subtype_message(self):
with self.assertRaisesRegexWithHighlight(RuntimeError, 'Future without a contained type', ''):
@torch.jit.script
def forward(self, x):
futs = torch.jit.annotate(List[torch.jit.Future], [])
def test_future_subtyping(self):
"""
Test that futures subtype each other properly.
"""
# Successful subtyping.
def returns_int(x: int) -> int:
return x + x + 1
def returns_future_any(x: int) -> torch.jit.Future[Any]:
return torch.jit._fork(returns_int, (x))
@torch.jit.script
def fn_int(x: int) -> Any:
fut = returns_future_any(x)
return fut.wait()
# Unsuccessful subtyping.
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"was annotated as having type Future\[float\] but is actually of type Future\[int\]",
"fut = returns_future_float(x"
):
def returns_future_float(x: int) -> torch.jit.Future[float]:
return torch.jit._fork(returns_int, (x))
@torch.jit.script
def fn_float(x: int) -> Any:
fut = returns_future_float(x)
return fut.wait()
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
|
pytorch-master
|
test/jit/test_async.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import io
import torch
import warnings
from contextlib import redirect_stderr
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestWarn(JitTestCase):
def test_warn(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
with redirect_stderr(f):
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=1,
exactly=True) \
.run(f.getvalue())
def test_warn_only_once(self):
@torch.jit.script
def fn():
for _ in range(10):
warnings.warn("I am warning you")
f = io.StringIO()
with redirect_stderr(f):
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=1,
exactly=True) \
.run(f.getvalue())
def test_warn_only_once_in_loop_func(self):
def w():
warnings.warn("I am warning you")
@torch.jit.script
def fn():
for _ in range(10):
w()
f = io.StringIO()
with redirect_stderr(f):
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=1,
exactly=True) \
.run(f.getvalue())
def test_warn_once_per_func(self):
def w1():
warnings.warn("I am warning you")
def w2():
warnings.warn("I am warning you")
@torch.jit.script
def fn():
w1()
w2()
f = io.StringIO()
with redirect_stderr(f):
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=2,
exactly=True) \
.run(f.getvalue())
def test_warn_once_per_func_in_loop(self):
def w1():
warnings.warn("I am warning you")
def w2():
warnings.warn("I am warning you")
@torch.jit.script
def fn():
for _ in range(10):
w1()
w2()
f = io.StringIO()
with redirect_stderr(f):
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=2,
exactly=True) \
.run(f.getvalue())
def test_warn_multiple_calls_multiple_warnings(self):
@torch.jit.script
def fn():
warnings.warn("I am warning you")
f = io.StringIO()
with redirect_stderr(f):
fn()
fn()
FileCheck() \
.check_count(
str="UserWarning: I am warning you",
count=2,
exactly=True) \
.run(f.getvalue())
def test_warn_multiple_calls_same_func_diff_stack(self):
def warn(caller: str):
warnings.warn("I am warning you from " + caller)
@torch.jit.script
def foo():
warn("foo")
@torch.jit.script
def bar():
warn("bar")
f = io.StringIO()
with redirect_stderr(f):
foo()
bar()
FileCheck() \
.check_count(
str="UserWarning: I am warning you from foo",
count=1,
exactly=True) \
.check_count(
str="UserWarning: I am warning you from bar",
count=1,
exactly=True) \
.run(f.getvalue())
|
pytorch-master
|
test/jit/test_warn.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
from textwrap import dedent
import unittest
import torch
from torch.testing._internal import jit_utils
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# Tests various JIT-related utility functions.
class TestJitUtils(JitTestCase):
# Tests that POSITIONAL_OR_KEYWORD arguments are captured.
def test_get_callable_argument_names_positional_or_keyword(self):
def fn_positional_or_keyword_args_only(x, y):
return x + y
self.assertEqual(
["x", "y"],
torch._jit_internal.get_callable_argument_names(fn_positional_or_keyword_args_only))
# Tests that POSITIONAL_ONLY arguments are ignored.
@unittest.skipIf(sys.version_info < (3, 8), 'POSITIONAL_ONLY arguments are not supported before 3.8')
def test_get_callable_argument_names_positional_only(self):
code = dedent('''
def fn_positional_only_arg(x, /, y):
return x + y
''')
fn_positional_only_arg = jit_utils._get_py3_code(code, 'fn_positional_only_arg')
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_positional_only_arg))
# Tests that VAR_POSITIONAL arguments are ignored.
def test_get_callable_argument_names_var_positional(self):
# Tests that VAR_POSITIONAL arguments are ignored.
def fn_var_positional_arg(x, *arg):
return x + arg[0]
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_var_positional_arg))
# Tests that KEYWORD_ONLY arguments are ignored.
def test_get_callable_argument_names_keyword_only(self):
def fn_keyword_only_arg(x, *, y):
return x + y
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_keyword_only_arg))
# Tests that VAR_KEYWORD arguments are ignored.
def test_get_callable_argument_names_var_keyword(self):
def fn_var_keyword_arg(**args):
return args['x'] + args['y']
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_var_keyword_arg))
# Tests that a function signature containing various different types of
# arguments are ignored.
@unittest.skipIf(sys.version_info < (3, 8), 'POSITIONAL_ONLY arguments are not supported before 3.8')
def test_get_callable_argument_names_hybrid(self):
code = dedent('''
def fn_hybrid_args(x, /, y, *args, **kwargs):
return x + y + args[0] + kwargs['z']
''')
fn_hybrid_args = jit_utils._get_py3_code(code, 'fn_hybrid_args')
self.assertEqual(
[],
torch._jit_internal.get_callable_argument_names(fn_hybrid_args))
def test_checkscriptassertraisesregex(self):
def fn():
tup = (1, 2)
return tup[2]
self.checkScriptRaisesRegex(fn, (), Exception, "range", name="fn")
s = dedent("""
def fn():
tup = (1, 2)
return tup[2]
""")
self.checkScriptRaisesRegex(s, (), Exception, "range", name="fn")
def test_no_tracer_warn_context_manager(self):
torch._C._jit_set_tracer_state_warn(True)
with jit_utils.NoTracerWarnContextManager() as no_warn:
self.assertEqual(
False,
torch._C._jit_get_tracer_state_warn()
)
self.assertEqual(
True,
torch._C._jit_get_tracer_state_warn()
)
|
pytorch-master
|
test/jit/test_jit_utils.py
|
# Owner(s): ["oncall: jit"]
from itertools import product as product
import io
import os
import sys
import hypothesis.strategies as st
from hypothesis import example, settings, given
from typing import Union
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.jit.mobile import _load_for_lite_interpreter
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestSaveLoadForOpVersion(JitTestCase):
# Helper that returns the module after saving and loading
def _save_load_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO()
torch.jit.save(scripted_module, buffer)
buffer.seek(0)
return torch.jit.load(buffer)
def _save_load_mobile_module(self, m):
scripted_module = torch.jit.script(m())
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
return _load_for_lite_interpreter(buffer)
# Helper which returns the result of a function or the exception the
# function threw.
def _try_fn(self, fn, *args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
return e
def _verify_no(self, kind, m):
self._verify_count(kind, m, 0)
def _verify_count(self, kind, m, count):
node_count = sum(str(n).count(kind) for n in m.graph.nodes())
self.assertEqual(node_count, count)
"""
Tests that verify Torchscript remaps aten::div(_) from versions 0-3
to call either aten::true_divide(_), if an input is a float type,
or truncated aten::divide(_) otherwise.
NOTE: currently compares against current div behavior, too, since
div behavior has not yet been updated.
"""
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor(self, sample_input):
def historic_div(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide(other)
return self.divide(other, rounding_mode='trunc')
# Tensor x Tensor
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b):
result_0 = a / b
result_1 = torch.div(a, b)
result_2 = a.div(b)
return result_0, result_1, result_2
# Loads historic module
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
m_results = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_results, Exception):
self.assertTrue(isinstance(fn_result, Exception))
else:
for result in m_results:
self.assertEqual(result, fn_result)
_helper(v3_mobile_module, historic_div)
_helper(current_mobile_module, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_inplace(self, sample_input):
def historic_div_(self, other):
if self.is_floating_point() or other.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b):
a /= b
return a
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_inplace_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
def _helper(m, fn):
fn_result = self._try_fn(fn, a.clone(), b)
m_result = self._try_fn(m, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, a)
_helper(v3_mobile_module, historic_div_)
# Recreates a since it was modified in place
a = torch.tensor((val_a,))
_helper(current_mobile_module, torch.Tensor.div_)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_tensor_out(self, sample_input):
def historic_div_out(self, other, out):
if self.is_floating_point() or other.is_floating_point() or out.is_floating_point():
return torch.true_divide(self, other, out=out)
return torch.divide(self, other, out=out, rounding_mode='trunc')
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a, b, out):
return a.div(b, out=out)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_tensor_out_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = torch.tensor((val_b,))
for out in (torch.empty((1,)), torch.empty((1,), dtype=torch.long)):
def _helper(m, fn):
fn_result = None
if fn is torch.div:
fn_result = self._try_fn(fn, a, b, out=out.clone())
else:
fn_result = self._try_fn(fn, a, b, out.clone())
m_result = self._try_fn(m, a, b, out)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
self.assertEqual(m_result, out)
_helper(v3_mobile_module, historic_div_out)
_helper(current_mobile_module, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar(self, sample_input):
def historic_div_scalar_float(self, other: float):
return torch.true_divide(self, other)
def historic_div_scalar_int(self, other: int):
if self.is_floating_point():
return torch.true_divide(self, other)
return torch.divide(self, other, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
return a / b
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
return a / b
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, historic_div_scalar_int)
_helper(current_mobile_module_int, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_reciprocal(self, sample_input):
def historic_div_scalar_float_reciprocal(self, other: float):
return other / self
def historic_div_scalar_int_reciprocal(self, other: int):
if self.is_floating_point():
return other / self
return torch.divide(other, self, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
return b / a
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
return b / a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_reciprocal_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_mobile_module(MyModuleFloat)
current_mobile_module_int = self._save_load_mobile_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = None
# Reverses argument order for torch.div
if fn is torch.div:
fn_result = self._try_fn(torch.div, b, a)
else:
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(isinstance(fn_result, Exception))
elif fn is torch.div or a.is_floating_point():
self.assertEqual(m_result, fn_result)
else:
# Skip when fn is not torch.div and a is integral because
# historic_div_scalar_int performs floored division
pass
if isinstance(b, float):
_helper(v3_mobile_module_float, current_mobile_module_float)
_helper(current_mobile_module_float, torch.div)
else:
_helper(v3_mobile_module_int, current_mobile_module_int)
_helper(current_mobile_module_int, torch.div)
@settings(max_examples=10, deadline=200000) # A total of 10 examples will be generated
@given(
sample_input=st.tuples(st.integers(min_value=5, max_value=199), st.floats(min_value=5.0, max_value=199.0))
) # Generate a pair (integer, float)
@example((2, 3, 2.0, 3.0)) # Ensure this example will be covered
def test_versioned_div_scalar_inplace(self, sample_input):
def historic_div_scalar_float_inplace(self, other: float):
return self.true_divide_(other)
def historic_div_scalar_int_inplace(self, other: int):
if self.is_floating_point():
return self.true_divide_(other)
return self.divide_(other, rounding_mode='trunc')
class MyModuleFloat(torch.nn.Module):
def __init__(self):
super(MyModuleFloat, self).__init__()
def forward(self, a, b: float):
a /= b
return a
class MyModuleInt(torch.nn.Module):
def __init__(self):
super(MyModuleInt, self).__init__()
def forward(self, a, b: int):
a /= b
return a
try:
v3_mobile_module_float = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_float_v2.ptl")
v3_mobile_module_int = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_inplace_int_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module_float = self._save_load_module(MyModuleFloat)
current_mobile_module_int = self._save_load_module(MyModuleInt)
for val_a, val_b in product(sample_input, sample_input):
a = torch.tensor((val_a,))
b = val_b
def _helper(m, fn):
m_result = self._try_fn(m, a, b)
fn_result = self._try_fn(fn, a, b)
if isinstance(m_result, Exception):
self.assertTrue(fn_result, Exception)
else:
self.assertEqual(m_result, fn_result)
if isinstance(b, float):
_helper(current_mobile_module_float, torch.Tensor.div_)
else:
_helper(current_mobile_module_int, torch.Tensor.div_)
# NOTE: Scalar division was already true division in op version 3,
# so this test verifies the behavior is unchanged.
def test_versioned_div_scalar_scalar(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
def forward(self, a: float, b: int, c: float, d: int):
result_0 = a / b
result_1 = a / c
result_2 = b / c
result_3 = b / d
return (result_0, result_1, result_2, result_3)
try:
v3_mobile_module = _load_for_lite_interpreter(
pytorch_test_dir + "/cpp/jit/upgrader_models/test_versioned_div_scalar_scalar_v2.ptl")
except Exception as e:
self.skipTest("Failed to load fixture!")
current_mobile_module = self._save_load_mobile_module(MyModule)
def _helper(m, fn):
vals = (5., 3, 2., 7)
m_result = m(*vals)
fn_result = fn(*vals)
for mr, hr in zip(m_result, fn_result):
self.assertEqual(mr, hr)
_helper(v3_mobile_module, current_mobile_module)
def test_versioned_linspace(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.linspace(a, b, steps=5)
d = torch.linspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl")
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for (a, b) in sample_inputs:
(output_with_step, output_without_step) = v7_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
# outputs should be equal to the newest version
self.assertEqual(output_with_step, current_with_step)
self.assertEqual(output_without_step, current_without_step)
def test_versioned_linspace_out(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
return torch.linspace(a, b, steps=100, out=out)
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v7_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(-10, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(4.0, 6.0, torch.empty((100,), dtype=torch.float64), torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64), torch.empty((100,), dtype=torch.complex64)),
)
for (start, end, out_for_old, out_for_new) in sample_inputs:
output = v7_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
def test_versioned_logspace(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex]):
c = torch.logspace(a, b, steps=5)
d = torch.logspace(a, b, steps=100)
return c, d
scripted_module = torch.jit.load(
pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl")
buffer = io.BytesIO(scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v8_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for (a, b) in sample_inputs:
(output_with_step, output_without_step) = v8_mobile_module(a, b)
(current_with_step, current_without_step) = current_mobile_module(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
# outputs should be equal to the newest version
self.assertEqual(output_with_step, current_with_step)
self.assertEqual(output_without_step, current_without_step)
def test_versioned_logspace_out(self):
class Module(torch.nn.Module):
def __init__(self):
super(Module, self).__init__()
def forward(self, a: Union[int, float, complex], b: Union[int, float, complex], out: torch.Tensor):
return torch.logspace(a, b, steps=100, out=out)
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
loaded_model = torch.jit.load(model_path)
buffer = io.BytesIO(loaded_model._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
v8_mobile_module = _load_for_lite_interpreter(buffer)
current_mobile_module = self._save_load_mobile_module(Module)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(-10, 10, torch.empty((100,), dtype=torch.int64), torch.empty((100,), dtype=torch.int64)),
(4.0, 6.0, torch.empty((100,), dtype=torch.float64), torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64), torch.empty((100,), dtype=torch.complex64)),
)
for (start, end, out_for_old, out_for_new) in sample_inputs:
output = v8_mobile_module(start, end, out_for_old)
output_current = current_mobile_module(start, end, out_for_new)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
# "Upgraded" model should match the new version output
self.assertEqual(output, output_current)
|
pytorch-master
|
test/jit/test_save_load_for_op_version.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.jit.frontend import _IS_ASTUNPARSE_INSTALLED
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestIgnoreContextManager(JitTestCase):
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_inp_out(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self):
a: int = 4
b: int = 5
c: int = 0
d: int = 6
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int", c="out:int", d="out:int"):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
d = 9
return c + d
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), model())
self.assertEqual(s(), 20)
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self):
a: int = 4
b: int = 5
c: int = 0
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int", c="out:int"):
l = [2 for i in range(a) if i > 2]
c = l[0] + a + b
return c
model = B()
s = torch.jit.script(model)
self.assertEqual(s(), 11)
self.assertEqual(s(), model())
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="out:int"):
l = [2 for i in range(a) if i > 2]
b = l[0] + a
return b
model = C()
s = torch.jit.script(model)
self.assertEqual(s(), 6)
self.assertEqual(s(), model())
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_just_inp(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self):
a: int = 4
b: int = 5
with torch.jit._IgnoreContextManager(a="inp:int", b="inp:int"):
l = [2 + b for i in range(a) if i > 2]
return a
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 4)
self.assertEqual(s(), model())
@unittest.skipUnless(_IS_ASTUNPARSE_INSTALLED, "astunparse package is required")
def test_with_ignore_context_manager_with_just_out(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self):
with torch.jit._IgnoreContextManager(c="out:List[int]"):
c = [2 for i in range(7) if i > 2]
c[0] = 3
return c[0] + c[1]
model = A()
s = torch.jit.script(model)
self.assertEqual(s(), 5)
self.assertEqual(s(), model())
|
pytorch-master
|
test/jit/test_ignore_context_manager.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestStringFormatting(JitTestCase):
def test_modulo_operator(self):
def fn(dividend: int, divisor: int) -> int:
return dividend % divisor
self.checkScript(fn, (5, 2))
def test_string_interpolation_with_string_placeholder_and_string_variable(self):
def fn(arg1: str):
return "%s in template" % arg1
self.checkScript(fn, ("foo",))
def test_string_interpolation_with_string_placeholder_and_format_string_variable(self):
def fn(arg1: str):
return arg1 % "foo"
self.checkScript(fn, ("%s in template",))
def test_string_interpolation_with_double_percent_in_string(self):
def fn(arg1: str):
return "%s in template %%" % arg1
self.checkScript(fn, ("foo",))
def test_string_interpolation_with_percent_in_string(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%s in template %" % arg1 # noqa: F501
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Incomplete format specifier",
"\"%s in template %\" % arg1"):
fn("foo")
def test_string_interpolation_with_string_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%s in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_digit_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%d in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_alternate_digit_placeholder(self):
def fn(arg1: int) -> str:
return "%i in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_digit_placeholder_and_string_variable(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%d in template" % arg1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"%d requires a number for formatting, but got String",
"\"%d in template\" % arg1"):
fn("1")
def test_string_interpolation_with_exponent_placeholder_and_string_variable(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%e in template" % arg1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"%e requires a number for formatting, but got String",
"\"%e in template\" % arg1"):
fn("1")
def test_string_interpolation_with_lowercase_exponent_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%e in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_capital_exponent_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%E in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_float_placeholder_and_float_variable(self):
def fn(arg1: float) -> str:
return "%f in template" % arg1
self.checkScript(fn, (1.0,))
def test_string_interpolation_with_float_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%f in template" % arg1
self.checkScript(fn, (1,))
def test_string_interpolation_with_char_placeholder_and_char_variable(self):
def fn(arg1: str) -> str:
return "%c in template" % arg1
self.checkScript(fn, ("a",))
def test_string_interpolation_with_char_placeholder_and_digit_variable(self):
def fn(arg1: int) -> str:
return "%c in template" % arg1
self.checkScript(fn, (97,))
def test_string_interpolation_with_char_placeholder_and_true_string_variable(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%c in template" % arg1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"%c requires an int or char for formatting, but got String",
"\"%c in template\" % arg1"):
fn("foo")
def test_string_interpolation_with_multiple_placeholders(self):
def fn(arg1: str, arg2: int, arg3: float) -> str:
return "%s %d %f in template" % (arg1, arg2, arg3)
self.checkScript(fn, ("foo", 1, 1))
def test_string_interpolation_with_subscript(self):
def fn(arg1: List[str]) -> str:
return "%s in template" % arg1[0]
self.checkScript(fn, (["foo", "bar"],))
def test_string_interpolation_with_too_few_arguments(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%s %s in template" % arg1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Too few arguments for format string",
"\"%s %s in template\" % arg1"):
fn("foo")
def test_string_interpolation_with_too_many_arguments(self):
@torch.jit.script
def fn(arg1: str, arg2: str) -> str:
return "%s in template" % (arg1, arg2) # noqa: F507
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Too many arguments for format string",
"\"%s in template\" % (arg1, arg2"):
fn("foo", "bar")
def test_string_interpolation_with_unknown_format_specifier(self):
@torch.jit.script
def fn(arg1: str) -> str:
return "%a in template" % arg1 # noqa: F501
with self.assertRaisesRegexWithHighlight(RuntimeError,
"The specifier %a is not supported in TorchScript format strings",
"\"%a in template\" % arg1"):
fn("foo")
|
pytorch-master
|
test/jit/test_string_formatting.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import IS_WINDOWS
from collections import namedtuple
from typing import List, Tuple, Optional, Dict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestTyping(JitTestCase):
def test_dict_in_not_in(self):
def test_in_dict(x):
# type: (Dict[str, int]) -> bool
return 'hi' in x
self.checkScript(test_in_dict, ({'hi': 2, 'bye': 3},))
self.checkScript(test_in_dict, ({'bye': 3},))
# Check evaluation order
@torch.jit.script
def a():
print("a")
return 3
@torch.jit.script
def b():
print("b")
return {3: 2, 4: 1}
@torch.jit.script
def fn():
return a() in b()
with self.capture_stdout() as captured:
self.assertTrue(fn())
if not IS_WINDOWS:
# no stdout capturing on windows
self.assertEqual(captured[0], "a\nb\n")
def test_not_in_dict(a):
# type: (Dict[str, int]) -> bool
if "hello" not in a:
return False
else:
return True
self.checkScript(test_not_in_dict, ({"hello": 1, "world": 2}, ))
self.checkScript(test_not_in_dict, ({"world": 2}, ))
def test_dict_tensor_key(a, t):
# type: (Dict[Tensor, int], Tensor) -> bool
if t in a:
return True
else:
return False
inp1 = torch.tensor(3)
inp2 = torch.tensor(5)
dict_a = {inp1: 1, inp2: 3}
self.checkScript(test_dict_tensor_key, (dict_a, torch.tensor(4)))
self.checkScript(test_dict_tensor_key, (dict_a, torch.tensor(3)))
self.checkScript(test_dict_tensor_key, (dict_a, inp1))
self.checkScript(test_dict_tensor_key, (dict_a, inp2))
def test_list_type_refinement_annotation_element_mismatch(self):
def fn():
l: List[int] = [1, 2, "foo", 3]
return l
with self.assertRaisesRegex(RuntimeError, "List type annotation"
r" `List\[int\]` did not match the "
"types of the given list elements"):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_key_mismatch(self):
def fn():
l1 = [1, 2, "foo", 3]
l2 = ["foo", "bar", "baz", "qux"]
d: Dict[int, str] = {k : v for k, v in zip(l1, l2)}
return d
with self.assertRaisesRegex(RuntimeError, "Dicts may only "
"contain homogeneous keys, but the "
"type of the first generated key "
r"was Union\[int, str\]"):
torch.jit.script(fn)
def test_dict_type_refinement_annotation_value_mismatch(self):
def fn():
l1 = ["foo", "bar", "baz", "qux"]
l2 = [1, 2, "foo", 3]
d: Dict[str, int] = {k : v for k, v in zip(l1, l2)}
return d
with self.assertRaisesRegex(RuntimeError, "Dict type annotation"
r" `Dict\[str, int\]` did not match"
" the type of an actual value type"
r" `Union\[int, str\]`"):
torch.jit.script(fn)
def test_dict_invalid_annotations(self):
# Check for invalid value type annotation
def wrong_value_type(dictionary: Dict[str, torch.jit.ScriptModule]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_value_type)
# Check for invalid key type annotation
def wrong_key_type(dictionary: Dict[torch.jit.ScriptModule, str]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_type)
# Check for invalid key and value type annotation
def wrong_key_value_type(dictionary: Dict[torch.jit.ScriptModule, torch.jit.ScriptModule]):
return
with self.assertRaisesRegex(ValueError, "Unknown type annotation"):
torch.jit.script(wrong_key_value_type)
def test_tuple_specialization(self):
@torch.jit.script
def f(t, s):
# type: (Tuple[Tensor, Tuple[int, Tensor]], str) -> Tensor
x, t2 = t
_, y = t2
return x + y
t = torch.randn(2, 2), (1, torch.randn(2, 2)),
f(t, "hi")
graph = f.graph_for(t, "hi")
input_types = list(next(graph.inputs()).type().elements())
w = input_types[0]
self.assertEqual(input_types[0].kind(), 'TensorType')
self.assertEqual(input_types[1].elements()[1].kind(), 'TensorType')
def test_tuple_io(self):
def stuff(x):
# type: (Tuple[Tensor, Tensor]) -> Tuple[Tensor, Tensor]
a, b = x
return b, a
a = (torch.rand(3), torch.rand(3))
self.checkScript(stuff, (a,))
def test_tuple_keyword(self):
def bar():
f = tuple((1, 2)) # noqa: C409
return f
self.checkScript(bar, ())
def foo():
return tuple(1, 2)
self.checkScriptRaisesRegex(foo, (), Exception,
"1 argument")
def cant_infer_size():
return tuple([1, 2, 3]) # noqa: C409
with self.assertRaisesRegex(Exception, "cannot statically infer the expected"):
torch.jit.script(cant_infer_size)
def test_tuple_create_return(self):
def stuff2(x):
# type: (int) -> Tuple[Tensor, Tensor]
a = (torch.ones(x), torch.zeros(x))
return a
self.checkScript(stuff2, (3,))
def test_list_io(self):
def stuff3(x):
# type: (List[int]) -> Tuple[Tensor, List[int]]
return torch.ones(x), x
self.checkScript(stuff3, ([3, 2],))
def test_bool_list_io(self):
@torch.jit.script
def stuff4(x):
# type: (List[bool]) -> Tuple[List[bool], List[bool], List[List[bool]]]
return x, [True, False], [[True]]
li_1, li_2, li_3 = stuff4([True])
li_3 = li_3[0]
for li in [li_1, li_2, li_3]:
self.assertTrue(type(li[0]) == type(True))
def test_nested_list(self):
def foo(z):
# type: (Tuple[int, List[List[int]]]) -> int
x, y = z
return y[0][1]
self.checkScript(foo, ((1, [[1, 2], [3, 4]]),))
def test_list_sum(self):
def fn(x: List[int]) -> int:
return sum(x)
def fn1(x: List[float]):
return sum(x)
def fn2(x: List[bool]):
return sum(x)
self.checkScript(fn, ([1, 2, 3], ))
self.checkScript(fn1, ([1.0, 2.0, 3.0], ))
self.checkScript(fn1, ([1, 2.8, 3], ))
self.checkScript(fn2, ([True, False, False], ))
self.checkScript(fn2, ([False, False, False], ))
self.checkScript(fn2, ([0, 1, 1, 0], ))
def test_list_unification(self):
def fn():
return [1, None, 2]
def fn2(x):
return [torch.ones(2, 2), None, x]
self.checkScript(fn, [])
self.checkScript(fn2, (torch.ones(2, 2),))
# to avoid defining sum_list in multiple tests
def get_sum_list_fn(self):
def sum_list(a):
# type: (List[int]) -> int
sum = 0
for i in a:
sum += i
return sum
return sum_list
def test_sum_list_diff_elms(self):
self.checkScript(self.get_sum_list_fn(), ([1, 2, 3, 4, 5],))
def test_sum_list_empty(self):
self.checkScript(self.get_sum_list_fn(), ([],))
def test_sum_list_one(self):
self.checkScript(self.get_sum_list_fn(), ([1],))
def test_sum_list_literal(self):
def sum_list():
# type: () -> int
sum = 0
for i in [1, 2, 3, 4, 5]:
sum += i
return sum
self.checkScript(sum_list, ())
def test_sum_list_wrong_type(self):
with self.assertRaisesRegex(RuntimeError, "'int' object is not iterable"):
@torch.jit.script
def sum_list(a):
# type: (int) -> int
sum = 0
for i in a: # noqa: T484
sum += i
return sum
sum_list(1)
def test_list_iterables(self):
with self.assertRaisesRegex(RuntimeError, 'List of iterables is not supported currently'):
cu = torch.jit.CompilationUnit('''
def list_iterables(x):
for i, j in [2, 3, 4], [5, 6, 7]:
x += i
x += j
return x
''')
def test_for_in_string(self):
def test_strings(x):
# type: (str) -> str
reverse = ""
for c in x:
reverse = c + reverse
return reverse
self.checkScript(test_strings, ("hello",))
self.checkScript(test_strings, ("",))
def test_list_strings(x):
# type: (List[str]) -> str
result = ""
for sub_str in x:
result += sub_str
return result
self.checkScript(test_list_strings, (["hello", "world"],))
self.checkScript(test_list_strings, (["hello", " ", "world", ""],))
def test_for_in_dict(self):
def test_dicts(x):
# type: (Dict[str, int]) -> int
sum = 0
for key in x:
sum += x[key]
return sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_dict_keys_values(x):
# type: (Dict[str, int]) -> Tuple[str, int]
key_str = ""
sum = 0
for key in x.keys():
key_str += key
for val in x.values():
sum += val
return key_str, sum
self.checkScript(test_dicts, ({"a": 1, "b": 2, "c": 3},))
def test_for_tuple_unpack(self):
def for_tuple_unpack(x, y):
for i, j in [[3, 4], [5, 6], [7, 8]]:
x += i
y += j
return x, y
self.checkScript(for_tuple_unpack, (torch.tensor(3), torch.tensor(5)))
def nested_tuple_unpack(x, y):
# type: (List[int], List[int]) -> int
sum = 0
for i, (j, k), v in zip(x, enumerate(x), y):
sum += i + j + k + v
return sum
self.checkScript(nested_tuple_unpack, ([1, 3, 5], [2, 4, 6]))
def test_dict_comprehension(self):
def fn():
return {i : chr(i + 65) for i in range(4)}
self.checkScript(fn, ())
def test_dict_comprehension_with_type_annotation(self):
def fn():
d: Dict[int, str] = {i : chr(i + 65) for i in range(4)}
return d
self.checkScript(fn, ())
with self.assertRaisesRegex(RuntimeError, ""):
with self.assertRaisesRegex(AssertionError, "Expected Dict "
"type annotation for dict "
"comprehension, found "
"Tuple[int, str]"):
@torch.jit.script
def fn():
d: Tuple[int, str] = {i : chr(i + 65) for i in range(4)}
return d
def test_dict_comprehension_scope(self):
def comprehension_can_access_outer_scope_variables():
lst = ["foo", "bar", "baz"]
return {l : len(l) for l in lst}
self.checkScript(comprehension_can_access_outer_scope_variables, ())
with self.assertRaisesRegex(RuntimeError, "undefined value i"):
@torch.jit.script
def outer_scope_cannot_access_comprehension_variables():
d = {i : chr(i + 65) for i in range(4)}
i = i + 1
def test_for_tuple_assign(self):
def test_simple_assign(x):
# type: (Tuple[int, float]) -> float
sum = 0.0
for a in x:
sum += float(a)
return sum
self.checkScript(test_simple_assign, ((1, 2.5),))
def test_tuple_assign(x):
# type: (Tuple[Tuple[int, int], Tuple[int, int]]) -> int
sum = 0
for a in x:
sum += a[0]
sum += a[1]
return sum
self.checkScript(test_tuple_assign, (((1, 2), (4, 7)), ))
def test_single_starred_lhs(self):
with self.assertRaisesRegex(RuntimeError, 'A Starred expression may only appear on the lhs within the presence'
' of another non-starred expression'):
cu = torch.jit.CompilationUnit('''
def single_starred_lhs(x):
a = (x, x, x)
*b, = a
return b
''')
def test_singleton_tuple_unpack(self):
def foo(a):
b, = (a,)
return b + 1
self.checkScript(foo, (torch.rand(3),))
def test_tuple_assignments(self):
def var_tuple_assign(x, y):
# type: (Tuple[Tensor, Tensor], Tensor) -> Tensor
(a, b), c = x, y
return a + b + c
tuple_inputs = (torch.randn(1, 4), torch.randn(3, 4))
self.checkScript(var_tuple_assign, (tuple_inputs, torch.randn(3, 4)))
def nested_tuple_assign(x, y, z):
# type: (int, Tuple[int, Tuple[int, int]], Tuple[int, int]) -> int
a, (b, (c, d)), (e, f) = x, y, z
return a + b + c + d + e + f
self.checkScript(nested_tuple_assign, ((1, (2, (3, 4)), (5, 6))))
def subscript_tuple_assign(a, x, i):
# type: (List[int], Tensor, int) -> Tuple[int, Tensor, int]
a[i], (x[i], b) = 1, (2, 3)
return a[i] + 1, x + 5, b
self.checkScript(subscript_tuple_assign, ([12, 7, 9, 11], torch.tensor((3, 13, 17)), 0))
def star_tuple_assign():
# type: () -> Tuple[int, int, Tuple[int, int], Tuple[int, int]]
a, (b, *c), *d = 1, (2, 3, 4), 5, 6
return a, b, c, d
self.checkScript(star_tuple_assign, ())
def subscript_tuple_augmented_assign(a):
# type: (Tuple[int, int]) -> Tuple[int, int]
a[0] += 1
return a
with self.assertRaisesRegex(RuntimeError, 'does not support augmented assign'):
scripted_aug_assign = torch.jit.script(subscript_tuple_augmented_assign)
def test_multiple_assign(self):
def test():
a = b, c = d, f = (1, 1)
# side effect
ten = torch.tensor(1)
ten1 = ten2 = ten.add_(1)
# ordering
x = 1
y = 3
x, y = y, x + y
return a, b, c, d, f, ten, ten1, ten2, x, y
self.checkScript(test, ())
def test_opt_opt_refinement(self):
@torch.jit.script
def test_unify(weight, bias):
# type: (Optional[int], Optional[int]) -> Optional[int]
if weight is not None:
opt = None
else:
if bias is not None:
opt = 1
else:
opt = None
return opt
def test_optional_refinement(self):
@torch.jit.script
def test_if_none_assignment(x):
# type: (Optional[int]) -> int
if x is None:
x = 1
return x + 1
self.assertEqual(test_if_none_assignment(1), 2)
def test_optional_conversion(self):
@torch.jit.script
def other_fn(x=None):
# type: (Optional[int]) -> int
return torch.jit._unwrap_optional(x)
@torch.jit.script
def fn(x):
# type: (int) -> int
return other_fn(x)
self.assertEqual(fn(2), 2)
@torch.jit.script
def unify_to_optional(x):
# type: (bool) -> Optional[int]
if x:
a = None
else:
a = 2
return a
self.assertEqual(unify_to_optional(True), None)
self.assertEqual(unify_to_optional(False), 2)
@torch.jit.script
def opt_list(x):
# type: (Optional[List[float]]) -> int
return 2
@torch.jit.script
def broadcast_opt_list(x):
# type: (Optional[BroadcastingList2[float]]) -> int
return 2
@torch.jit.script
def opt_list_tuple_caller(x):
# type: (Tuple[float, float]) -> int
return opt_list(x) + broadcast_opt_list(x)
self.assertEqual(opt_list_tuple_caller((2., 3.)), 4)
def test_optional_tuple(self):
def fn(x=None):
# type: (Optional[Tuple[int, int]]) -> Tuple[int, int]
if x is None:
new_x = (1, 2)
else:
new_x = x
return new_x
self.checkScript(fn, ((3, 4),))
self.checkScript(fn, ())
def test_namedtuple_redefine(self):
global _1, _2
_1 = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
_2 = namedtuple('GoogLeNetOutputs', ['different'])
with self.assertRaisesRegex(RuntimeError, r'redefine'):
@torch.jit.script
def foo(x, y):
# type: (_1, _2) -> _1
return x
def test_namedtuple_py2(self):
global _GoogLeNetOutputs # see [local resolution in python]
_GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
@torch.jit.script
def foo(x):
# type: (_GoogLeNetOutputs) -> _GoogLeNetOutputs
return x
vals = torch.rand(3), torch.rand(4), torch.rand(5)
out = foo(_GoogLeNetOutputs(logits=vals[0], aux_logits2=vals[1], aux_logits1=vals[2]))
self.assertEqual(out.logits, vals[0])
self.assertEqual(out.aux_logits2, vals[1])
self.assertEqual(out.aux_logits1, vals[2])
def test_namedtuple_good_error(self):
global _GoogLeNetOutputs # see [local resolution in python]
_GoogLeNetOutputs = namedtuple('GoogLeNetOutputs', ['logits', 'aux_logits2', 'aux_logits1'])
@torch.jit.script
def foo(x):
# type: (_GoogLeNetOutputs) -> _GoogLeNetOutputs
return x
with self.assertRaisesRegex(RuntimeError,
r'aka NamedTuple\(logits, aux_logits2, aux_logits1\)'):
out = foo(_GoogLeNetOutputs(logits="3", aux_logits2="4", aux_logits1="5"))
|
pytorch-master
|
test/jit/test_typing.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing._internal.jit_utils import JitTestCase, make_global
from torch.jit._monkeytype_config import _IS_MONKEYTYPE_INSTALLED
from typing import List, Dict, Tuple, Any, Optional, NamedTuple # noqa: F401
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if not _IS_MONKEYTYPE_INSTALLED:
print("monkeytype is not installed. Skipping tests for Profile-Directed Typing", file=sys.stderr)
JitTestCase = object # type: ignore[misc, assignment] # noqa: F811
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestPDT(JitTestCase):
"""
A suite of tests for profile directed typing in TorchScript.
"""
def test_nn_module(self):
class TestPDTModel(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x) -> Any:
if isinstance(x, int):
return x + 1
elif isinstance(x, float):
return x - 1
else:
return x
make_global(TestPDTModel)
pdt_model = TestPDTModel()
inp: List[Tuple[Any, ...]] = [(20, ), (2.7, ), (False, ), ]
scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model: inp})
self.assertEqual(scripted_pdt_model(50), pdt_model(50))
self.assertEqual(scripted_pdt_model(1.8), pdt_model(1.8))
self.assertTrue(scripted_pdt_model(True), pdt_model(True))
def test_nested_nn_module_class(self):
class NestedPDTInner(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
if isinstance(x, int):
return x * 10
return x
class NestedModulePDTWrapper(torch.nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
return self.inner(x)
make_global(NestedPDTInner, NestedModulePDTWrapper)
inner_pdt_model = NestedPDTInner()
wrapped_pdt_model = NestedModulePDTWrapper(inner_pdt_model)
inp: List[Tuple[Any, ...]] = [(20, ), (False, )]
scripted_pdt_model = torch.jit.script(wrapped_pdt_model, example_inputs={wrapped_pdt_model: inp})
self.assertEqual(scripted_pdt_model(30), wrapped_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), wrapped_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), wrapped_pdt_model(True))
def test_nested_nn_module_class_with_args(self):
class NestedModulePDTInner(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, y):
if isinstance(x, int):
return x * 10 + y
return x
class NestedModulePDTOuter(torch.nn.Module):
def __init__(self, inner):
super().__init__()
self.inner = inner
def forward(self, x):
return self.inner(x, 20)
make_global(NestedModulePDTInner, NestedModulePDTOuter)
inner_pdt_model = NestedModulePDTInner()
outer_pdt_model = NestedModulePDTOuter(inner_pdt_model)
inner_input: List[Tuple[Any, ...]] = [(10, 10), (1.9, 20), ]
outer_input: List[Tuple[Any, ...]] = [(20, ), (False, )]
scripted_pdt_model = torch.jit.script(outer_pdt_model, example_inputs={inner_pdt_model: inner_input,
outer_pdt_model: outer_input, })
self.assertEqual(scripted_pdt_model(30), outer_pdt_model(30))
self.assertEqual(scripted_pdt_model(1.9), outer_pdt_model(1.9))
self.assertTrue(scripted_pdt_model(True), outer_pdt_model(True))
def test_nested_function_in_forward(self):
class NestedFunctionInForward(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return self.fun(x) + 10
def fun(self, x):
if isinstance(x, bool):
return 0
elif isinstance(x, int):
return x + 1
return 0
make_global(NestedFunctionInForward)
pdt_model = NestedFunctionInForward()
inp: List[Tuple[Any, ...]] = [(-1, ), (False, )]
scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model: inp})
self.assertEqual(scripted_pdt_model(30), pdt_model(30))
self.assertEqual(scripted_pdt_model(True), pdt_model(True))
def test_nn_module_with_export_function(self):
class TestModelWithExport(torch.nn.Module):
def __init__(self):
super().__init__()
@torch.jit.export
def fn(self, x, y) -> Any:
assert not (isinstance(x, bool) and isinstance(y, bool))
if isinstance(x, int) and isinstance(y, int):
return x + y
elif isinstance(x, float) and isinstance(y, float):
return x - y
else:
return -1
make_global(TestModelWithExport)
pdt_model = TestModelWithExport()
inp: List[Tuple[Any, ...]] = [(20, 10, ), (2.7, 8.9, ), ]
scripted_pdt_model = torch.jit.script(pdt_model, example_inputs={pdt_model.fn: inp})
self.assertEqual(scripted_pdt_model.fn(10, 90), pdt_model.fn(10, 90))
self.assertEqual(scripted_pdt_model.fn(1.8, 2.2), pdt_model.fn(1.8, 2.2))
self.assertTrue(scripted_pdt_model.fn(torch.ones(1), 2), pdt_model.fn(torch.ones(1), 2))
def test_class_methods(self):
class PDTModel:
def test_sum(self, a):
return sum(a)
make_global(PDTModel)
pdt_model = PDTModel()
inp: List[Tuple[Any, ...]] = [([10, 20, ], ), ]
scripted_pdt_model = torch.jit.script(PDTModel, example_inputs={pdt_model.test_sum: inp})
script_model = scripted_pdt_model()
self.assertEqual(script_model.test_sum([10, 20, 30, ], ), pdt_model.test_sum([10, 20, 30, ], ))
def test_class_with_multiple_methods(self):
class PDTModelWithManyMethods:
def test_list_to_dict(self, a):
new_dictionary: Dict[float, bool] = {}
for element in a:
new_dictionary[element] = True
return new_dictionary
def test_substring(self, a, b):
return b in a
make_global(PDTModelWithManyMethods)
pdt_model = PDTModelWithManyMethods()
list_inp: List[Tuple[Any, ...]] = [([1.2, 2.3, ], ), ]
str_inp: List[Tuple[Any, ...]] = [("abc", "b", ), ]
scripted_pdt_model = torch.jit.script(PDTModelWithManyMethods, example_inputs={pdt_model.test_list_to_dict: list_inp,
pdt_model.test_substring: str_inp})
script_model = scripted_pdt_model()
self.assertEqual(script_model.test_list_to_dict([1.1, 2.2, 3.3, ], ), pdt_model.test_list_to_dict([1.1, 2.2, 3.3, ], ))
self.assertEqual(script_model.test_substring("helloworld", "world", ), pdt_model.test_substring("helloworld", "world", ))
self.assertEqual(script_model.test_substring("helloworld", "def", ), pdt_model.test_substring("helloworld", "def", ))
def test_multiple_class_with_same_method(self):
class PDTModelOne:
def test_find(self, a, b):
return b in a.keys()
class PDTModelTwo:
def test_find(self, a, b):
return b in a
make_global(PDTModelOne, PDTModelTwo)
pdt_model_one = PDTModelOne()
pdt_model_two = PDTModelTwo()
dict_inp: List[Tuple[Any, ...]] = [({1.2: True, 2.3: False, }, 1.2), ]
list_inp: List[Tuple[Any, ...]] = [(["abc", "b", ], "c"), ]
scripted_pdt_model_one = torch.jit.script(PDTModelOne, example_inputs={pdt_model_one.test_find: dict_inp})
scripted_pdt_model_two = torch.jit.script(PDTModelTwo, example_inputs={pdt_model_two.test_find: list_inp})
script_model_one, script_model_two = scripted_pdt_model_one(), scripted_pdt_model_two()
self.assertEqual(script_model_one.test_find({1.1: True, 2.2: True, 3.3: False, }, 4.4),
pdt_model_one.test_find({1.1: True, 2.2: True, 3.3: False, }, 4.4))
self.assertEqual(script_model_two.test_find(["hello", "world", ], "world"),
pdt_model_two.test_find(["hello", "world", ], "world"))
def test_pdt(self):
def test_sum(a, b):
return a + b
make_global(test_sum)
scripted_fn_add = torch.jit.script(test_sum, example_inputs=[(3, 4)])
self.assertEqual(scripted_fn_add(10, 2), test_sum(10, 2))
def test_sub(a, b):
return a - b
make_global(test_sub)
scripted_fn_sub = torch.jit.script(test_sub, example_inputs=[(3.9, 4.10)])
self.assertEqual(scripted_fn_sub(6.5, 2.9), test_sub(6.5, 2.9))
def test_mul(a, b):
return a * b
make_global(test_mul)
scripted_fn_mul = torch.jit.script(test_mul, example_inputs=[(-10, 9)])
self.assertEqual(scripted_fn_mul(-1, 3), test_mul(-1, 3))
def test_args_complex(real, img):
return torch.complex(real, img)
make_global(test_args_complex)
scripted_fn_complex = torch.jit.script(test_args_complex, example_inputs=[(torch.rand(3, 4), torch.rand(3, 4))])
arg1, arg2 = torch.rand(3, 4), torch.rand(3, 4)
self.assertEqual(scripted_fn_complex(arg1, arg2), test_args_complex(arg1, arg2))
def test_bool(a):
if a:
return -1
else:
return 0
make_global(test_bool)
scripted_fn_bool = torch.jit.script(test_bool, example_inputs=[(True,)])
self.assertEqual(scripted_fn_bool(True), test_bool(True))
def test_str(a):
if a == "":
return False
else:
return True
make_global(test_str)
scripted_fn_str = torch.jit.script(test_str, example_inputs=[("",)])
self.assertEqual(scripted_fn_str("abc"), test_str("abc"))
def test_pdt_list_and_tuple(self):
def test_list_and_tuple(a):
return sum(a)
make_global(test_list_and_tuple)
scripted_fn_float_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([4.9, 8.9],)])
self.assertEqual(scripted_fn_float_list_input([11.9, 7.6]), test_list_and_tuple([11.9, 7.6]))
scripted_fn_bool_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([True, False, True],)])
self.assertEqual(scripted_fn_bool_list_input([True, True, True]), test_list_and_tuple([True, True, True]))
scripted_fn_int_list_input = torch.jit.script(test_list_and_tuple, example_inputs=[([3, 4, 5], )])
self.assertEqual(scripted_fn_int_list_input([1, 2, 3]), test_list_and_tuple([1, 2, 3]))
scripted_fn_float_tuple_input = torch.jit.script(test_list_and_tuple, example_inputs=[((4.9, 8.9),)])
self.assertEqual(scripted_fn_float_tuple_input((11.9, 7.6)), test_list_and_tuple((11.9, 7.6)))
scripted_fn_bool_tuple_input = torch.jit.script(test_list_and_tuple,
example_inputs=[((True, False, True),)])
self.assertEqual(scripted_fn_bool_tuple_input((True, True, True)),
test_list_and_tuple((True, True, True)))
scripted_fn_int_tuple_input = torch.jit.script(test_list_and_tuple, example_inputs=[((3, 4, 5), )])
self.assertEqual(scripted_fn_int_tuple_input((1, 2, 3)), test_list_and_tuple((1, 2, 3)))
def test_nested_list_and_tuple(self):
def test_nested_list(inp):
return [sum(v) for v in inp]
def test_nested_tuple(inp):
ans = 0.0
for tup in inp:
for val in tup:
if val > 0:
ans *= val
return ans
make_global(test_nested_list, test_nested_tuple)
list_inp = [[1, 2, 3, ], [5, 6, 7, ]]
scripted_fn = torch.jit.script(test_nested_list, example_inputs=[(list_inp, ), ])
inp = [[0, 4, 7, ], [8, 11, ], [6, -1, -20, ]]
self.assertEqual(scripted_fn(inp, ), test_nested_list(inp, ))
list_inp = ([1, 2, 3, ], [5, 6, 7, ])
scripted_fn = torch.jit.script(test_nested_list, example_inputs=[(list_inp, ), ])
inp = ([0, 4, 7, ], [8, 11, ], [6, -1, -20, ])
self.assertEqual(scripted_fn(inp, ), test_nested_list(inp, ))
tup_inp = [(1.0, 2.6, 3.7, ), (5.7, 6.1, 1.7, )]
scripted_fn = torch.jit.script(test_nested_tuple, example_inputs=[(tup_inp, ), ])
inp = [(1.0, 4.1, 7.4, ), (4.8, 1.1, -1.2, ), (6.3, -1.3, -2.0, )]
self.assertEqual(scripted_fn(inp, ), test_nested_tuple(inp, ))
tup_inp = ((True, False, True, ), (False, False, False, ))
scripted_fn = torch.jit.script(test_nested_tuple, example_inputs=[(tup_inp, ), ])
inp = ((True, True, True, ), (False, False, True, ))
self.assertEqual(scripted_fn(inp, ), test_nested_tuple(inp, ))
def test_pdt_dict(self):
def test_dict(a):
return a['foo']
def test_dict_int_list(a):
return a[1]
make_global(test_dict, test_dict_int_list)
str_bool_inp = {'foo' : True, 'bar': False}
scripted_fn = torch.jit.script(test_dict, example_inputs=[(str_bool_inp,)])
self.assertEqual(scripted_fn({'foo' : False, 'bar': True}, ), test_dict({'foo' : False, 'bar': True}, ))
str_list_inp = {0 : [True, False], 1: [False, True]}
scripted_fn = torch.jit.script(test_dict_int_list, example_inputs=[(str_list_inp,)])
self.assertEqual(scripted_fn({0 : [False, False], 1: [True, True]}, ),
test_dict_int_list({0 : [False, False], 1: [True, True]}, ))
def test_any(self):
def test_multiple_types(a):
assert not isinstance(a, bool)
return a
def test_multiple_type_refinement(a):
if isinstance(a, bool):
return 1
elif isinstance(a, int):
return 1 + a
elif isinstance(a, float):
return 1 + int(a)
else:
return -1
make_global(test_multiple_types, test_multiple_type_refinement)
scripted_fn = torch.jit.script(test_multiple_types, example_inputs=[(1,), ("abc", ), (8.9,), ([3, 4, 5], )])
self.assertEqual(scripted_fn(10), test_multiple_types(10))
self.assertEqual(scripted_fn("def"), test_multiple_types("def"))
self.assertEqual(scripted_fn(7.89999), test_multiple_types(7.89999))
self.assertEqual(scripted_fn([10, 11, 14]), test_multiple_types([10, 11, 14]))
scripted_fn = torch.jit.script(test_multiple_type_refinement, example_inputs=[(1,), ("abc", ), (8.9,),
([3, 4, 5],), (True, ), ({"a": True}, ), ])
self.assertEqual(scripted_fn(10), test_multiple_type_refinement(10))
self.assertEqual(scripted_fn("def"), test_multiple_type_refinement("def"))
self.assertEqual(scripted_fn(7.89999), test_multiple_type_refinement(7.89999))
self.assertEqual(scripted_fn([10, 11, 14]), test_multiple_type_refinement([10, 11, 14]))
self.assertEqual(scripted_fn(False), test_multiple_type_refinement(False))
self.assertEqual(scripted_fn({"abc" : True, "def": False}), test_multiple_type_refinement({"abc" : True, "def": False}))
def test_class_as_profiled_types(self):
class UserDefinedClass:
def fn(self, b) -> Any:
assert b is not None
if isinstance(b, int):
return b if b > 0 else -1
elif isinstance(b, float):
return b if b > 0.0 else -1.0
return 0
def test_model(a, m):
assert not isinstance(a, bool)
return m.fn(a)
make_global(UserDefinedClass, test_model)
user_class = UserDefinedClass()
scripted_fn = torch.jit.script(test_model, example_inputs=[(10, user_class, ), (10.9, user_class, ), ])
self.assertEqual(scripted_fn(100, user_class, ), test_model(100, user_class))
self.assertEqual(scripted_fn(1.9, user_class, ), test_model(1.9, user_class))
def test_class_with_args_as_profiled_types(self):
class ClassWithArgs:
def __init__(self, a: bool):
self.a = a
def fn(self, b):
if self.a:
return b
else:
return -1
def test_model_with_args(a, m):
assert not isinstance(a, bool)
return m.fn(a)
make_global(ClassWithArgs, test_model_with_args)
user_class = ClassWithArgs(False)
scripted_fn = torch.jit.script(test_model_with_args, example_inputs=[(10, user_class, ), (10.9, user_class, ), ])
self.assertEqual(scripted_fn(100, ClassWithArgs(True), ), test_model_with_args(100, ClassWithArgs(True)))
def test_nn_parameter_as_arg(self):
class TestNNParameter(torch.nn.Module):
def __init__(self):
super().__init__()
self.inp = torch.nn.Parameter(torch.ones(2, 3))
def add_nn_parameter_with_int(self, x, y):
return torch.add(x, y)
def forward(self, y):
return self.add_nn_parameter_with_int(self.inp, y)
make_global(TestNNParameter)
pdt_model = TestNNParameter()
scripted_fn = torch.jit.script(pdt_model, example_inputs={pdt_model: [(10, ), ], })
self.assertEqual(scripted_fn(20), pdt_model(20))
def test_fx_tracing_with_typing(self):
class FXModelOutput(NamedTuple):
result: List[int]
class FXModel(torch.nn.Module):
def forward(self, a) -> FXModelOutput:
result = FXModelOutput(result=a)
return result
make_global(FXModel, FXModelOutput)
pdt_model = FXModel()
scripted_fn = torch.jit.script(pdt_model, example_inputs={pdt_model: [([10, 20, ], ), ], })
self.assertEqual(scripted_fn([20]), pdt_model([20]))
def test_nonetype_as_optional_of_type(self):
def test_none(a) -> Any:
if a is None:
return 0
else:
return a + torch.ones(1)
make_global(test_none)
scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (10.6, )])
self.assertEqual(scripted_fn(30.9, ), test_none(30.9, ))
scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (10, )])
self.assertEqual(scripted_fn(2, ), test_none(2, ))
scripted_fn = torch.jit.script(test_none, example_inputs=[(None, ), (torch.Tensor(1), )])
self.assertEqual(scripted_fn(torch.ones(1), ), test_none(torch.ones(1), ))
|
pytorch-master
|
test/jit/test_pdt.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestTensorCreationOps(JitTestCase):
"""
A suite of tests for ops that create tensors.
"""
def test_randperm_default_dtype(self):
def randperm(x: int):
perm = torch.randperm(x)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.int64
self.checkScript(randperm, (3, ))
def test_randperm_specifed_dtype(self):
def randperm(x: int):
perm = torch.randperm(x, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert perm.dtype == torch.float
self.checkScript(randperm, (3, ))
def test_triu_indices_default_dtype(self):
def triu_indices(rows: int, cols: int):
indices = torch.triu_indices(rows, cols)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int64
self.checkScript(triu_indices, (3, 3))
def test_triu_indices_specified_dtype(self):
def triu_indices(rows: int, cols: int):
indices = torch.triu_indices(rows, cols, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.float
self.checkScript(triu_indices, (3, 3))
def test_tril_indices_default_dtype(self):
def tril_indices(rows: int, cols: int):
indices = torch.tril_indices(rows, cols)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.int64
self.checkScript(tril_indices, (3, 3))
def test_tril_indices_specified_dtype(self):
def tril_indices(rows: int, cols: int):
indices = torch.tril_indices(rows, cols, dtype=torch.float)
# Have to perform assertion here because TorchScript returns dtypes
# as integers, which are not comparable against eager torch.dtype.
assert indices.dtype == torch.float
self.checkScript(tril_indices, (3, 3))
|
pytorch-master
|
test/jit/test_tensor_creation_ops.py
|
pytorch-master
|
test/jit/__init__.py
|
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
def canonical(graph):
return torch._C._jit_pass_canonicalize(graph).str(False)
class TestCustomOperators(JitTestCase):
def test_dynamic_op_registry(self):
from torch._ops import _OpNamespace
self.assertTrue(hasattr(torch, 'ops'))
if '_test' in torch.ops.__dict__:
torch.ops.__dict__.pop('_test')
# Don't use `hasattr()` because it will call `__getattr__`.
self.assertNotIn('_test', torch.ops.__dict__)
torch.ops._test
self.assertIn('_test', torch.ops.__dict__)
self.assertEqual(type(torch.ops._test), _OpNamespace)
self.assertNotIn('leaky_relu', torch.ops._test.__dict__)
op = torch.ops._test.leaky_relu
self.assertTrue(callable(op))
self.assertIn('leaky_relu', torch.ops._test.__dict__)
op2 = torch.ops._test.leaky_relu
self.assertEqual(op, op2)
def test_simply_calling_an_operator(self):
input = torch.randn(100)
output = torch.ops.aten.relu(input)
self.assertEqual(output, input.relu())
def test_default_arguments_are_used(self):
output = torch.ops._test.leaky_relu(torch.tensor([-1.0, 1.0]))
self.assertEqual(output, torch.tensor([-0.01, 1]))
def test_passing_too_many_args(self):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"aten::relu\(\) expected at most 1 argument\(s\) but received 2 argument\(s\)",
""
):
torch.ops.aten.relu(1, 2)
def test_passing_too_few_args(self):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"aten::relu\(\) is missing value for argument 'self'.",
""
):
torch.ops.aten.relu()
def test_passing_one_positional_but_not_the_second(self):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
r"aten::type_as\(\) is missing value for argument 'other'.",
""
):
torch.ops.aten.type_as(torch.ones(5, 5))
def test_passing_unknown_kwargs(self):
with self.assertRaisesRegexWithHighlight(
RuntimeError,
"Unknown keyword argument 'foo' for operator '_test::leaky_relu'",
""
):
torch.ops._test.leaky_relu(torch.ones(5), foo=torch.ones(5))
def test_passing_and_returning_lists(self):
# Replace with actual test once we support lists.
a, b = torch.rand(5), torch.rand(5)
output = torch.ops._test.cat([a, b])
output_ref = torch.cat([a, b])
self.assertEqual(output, output_ref)
def test_calling_scripted_custom_op(self):
@torch.jit.script
def func(x):
return torch.ops.aten.relu(x)
input = torch.ones(5, 5)
self.assertEqual(func(input), input.relu())
def test_calling_traced_custom_op(self):
input = torch.ones(5, 5)
func = torch.jit.trace(torch.ops.aten.relu, [input])
self.assertEqual(func(input), input.relu())
@unittest.skip("Need to figure out default dtype differences between fbcode and oss")
def test_script_graph_for_custom_ops_matches_traced_graph(self):
input = torch.ones(5, 5)
trace = torch.jit.trace(torch.ops.aten.relu, [input])
self.assertExpectedInline(canonical(trace.graph), '''\
graph(%0 : Float(5, 5)):
%1 : Float(5, 5) = aten::relu(%0)
return (%1)
''')
def test_script_graph_contains_custom_op(self):
@torch.jit.script
def func(x):
return torch.ops.aten.relu(x)
self.assertExpectedInline(canonical(func.graph), '''\
graph(%x.1 : Tensor):
%1 : Tensor = aten::relu(%x.1)
return (%1)
''')
def test_generic_list(self):
self.assertEqual(torch.ops._test.get_first([['hello']]), 'hello')
# https://github.com/pytorch/pytorch/issues/80508
def test_where_no_scalar(self):
x = torch.rand(1, 3, 224, 224)
torch.ops.aten.where(x > 0.5, -1.5, 1.5) # does not raise
|
pytorch-master
|
test/jit/test_custom_operators.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
from torch.testing._internal.common_utils import GRAPH_EXECUTOR, ProfilingMode, \
num_profiled_runs, enable_profiling_mode_for_profiling_tests
from torch.testing._internal.common_jit import check_against_reference
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, disable_autodiff_subgraph_inlining
from torch.testing import FileCheck
from typing import List, Tuple, Optional
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.SIMPLE, "Simple Executor doesn't support gradients")
class TestAutodiffSubgraphSlicing(JitTestCase):
# TODO: It is better if we can test directly on graphs instead of the current
# end-to-end fashion.
def _perform_ad_subgraph_slicing(self, fn, *input_sizes):
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
ge = torch.jit.script(fn)
inputs = [torch.randn(size, requires_grad=True) for size in input_sizes]
ge(*inputs, profile_and_replay=True)
return ge.graph_for(*inputs)
def assertGraphSize(self, graph, size):
nodes = list(filter(lambda n: (n.kind() != "prim::BailOut" and
n.kind() != "prim::BailoutTemplate" and
n.kind() != "prim::TypeCheck" and
n.kind() != "prim::RequiresGradCheck"),
graph.nodes()))
self.assertEqual(len(list(nodes)), size)
def test_chunk_constant_script_ad(self):
@torch.jit.script
def func(x):
x1, x2 = torch.chunk(x, 2)
return (x1, x2)
input = torch.rand(6, 10).requires_grad_()
with disable_autodiff_subgraph_inlining():
with enable_profiling_mode_for_profiling_tests():
output = func(input, profile_and_replay=True)
FileCheck().check_not("prim::DifferentiableGraph").run(func.graph_for(input))
@unittest.skipIf(GRAPH_EXECUTOR != ProfilingMode.PROFILING, "This threshold is only valid for Profiling Executor")
def test_diff_graph_inline_threshold(self):
with enable_profiling_mode_for_profiling_tests():
NUM_RUNS = 1
with num_profiled_runs(NUM_RUNS):
@torch.jit.script
def foo(x):
# two nodes should be fused
# see https://github.com/pytorch/pytorch/blob/master/torch/csrc/jit/runtime/graph_executor_impl.h#L49
return torch.sigmoid(torch.sigmoid(x))
@torch.jit.script
def bar(x):
# two nodes should NOT be fused
return torch.sigmoid(x)
input = torch.rand([4, 4], requires_grad=True)
foo(input)
foo(input)
bar(input)
bar(input)
self.assertGraphContainsExactly(foo.graph_for(input), 'prim::DifferentiableGraph', 1)
self.assertGraphContainsExactly(bar.graph_for(input), 'prim::DifferentiableGraph', 0)
def test_bias_as_module_attr(self):
with enable_profiling_mode_for_profiling_tests():
class M(torch.nn.Module):
def __init__(self, has_bias):
super(M, self).__init__()
self.ll = torch.nn.Linear(10, 10, has_bias)
def forward(self, x, y):
return self.ll(x + y) * x + y
x = torch.rand(10, 10, requires_grad=True)
no_bias = M(False)
scripted_no_bias = torch.jit.script(no_bias)
scripted_no_bias(x, x)
scripted_no_bias(x, x)
scripted_no_bias(x, x)
has_bias = M(True)
check_against_reference(self, scripted_no_bias, no_bias, lambda x: x, (x, x,), check_types=False)
scripted_has_bias = torch.jit.script(has_bias)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
scripted_has_bias(x, x)
check_against_reference(self, scripted_has_bias, has_bias, lambda x: x, (x, x,), check_types=False)
def test_constructed_bias(self):
with enable_profiling_mode_for_profiling_tests():
def method1(x, weight, b1, b2):
bias = b1 * b2
return torch.nn.functional.linear(x, weight, bias)
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
b1 = torch.rand(N, N, requires_grad=True)
b2 = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, b1, b2))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, b1, b2), check_types=False)
def test_bias_as_arg(self):
with enable_profiling_mode_for_profiling_tests():
def method1(x, weight, bias: Optional[torch.Tensor]):
return torch.nn.functional.linear(x, weight, bias).relu() + 2
N = 10
x = torch.rand(N, N, requires_grad=True)
weight = torch.rand(N, N, requires_grad=True)
bias = None
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
bias = torch.rand(N, N, requires_grad=True)
scripted = self.checkScript(method1, (x, weight, bias))
# check_types requires last_graph on scripted to be set, so we just skip it
check_against_reference(self, scripted, method1, lambda x: x, (x, weight, bias), check_types=False)
def test_requires_grad_for_tensor_list(self):
with enable_profiling_mode_for_profiling_tests():
# output & var_list[0] should have requires_grad set to True
def func(input0: torch.Tensor, input1: torch.Tensor) -> Tuple[torch.Tensor, List[torch.Tensor]]:
var_list = [input0, input1]
var = torch.cat(var_list)
output = var + 1.0
return output, var_list
jit_f = torch.jit.script(func)
input0 = torch.randn((2,), requires_grad=True)
input1 = torch.randn((2,))
output_ref = func(input0, input1)
for i in range(2):
output = jit_f(input0, input1)
assert(output_ref[0].requires_grad == output[0].requires_grad)
assert(output_ref[1][0].requires_grad == output[1][0].requires_grad)
assert(output_ref[1][1].requires_grad == output[1][1].requires_grad)
@unittest.skip("disable until we property handle tensor lists with undefined gradients")
def test_differentiable_graph_ops_requires_grad(self):
x = torch.randn(8, 2, dtype=torch.float).requires_grad_()
y = torch.randn(8, 2, dtype=torch.float)
def t(x : torch.Tensor, y : torch.Tensor, flag : bool):
o = x + 1.0
o1 = torch.relu(o)
o = y + 1.5
o2 = torch.relu(o)
o3 = o1 + o2
if flag:
o = o1 + 1.0
oo1 = torch.relu(o)
o = o2 + 2.5
oo2 = torch.relu(o)
oo3 = oo1 + oo2
else:
o = o1 * 1.0
oo1 = torch.relu(o)
o = o2 * 2.0
oo2 = torch.relu(o)
oo3 = oo1 + oo2
return o1, o2, o3, oo1, oo2, oo3
with enable_profiling_mode_for_profiling_tests():
t_jit = torch.jit.script(t)
jit_o = t_jit(x, y, False)
jit_o = t_jit(x, y, False)
o = t(x, y, False)
FileCheck().check("prim::DifferentiableGraph").run(t_jit.graph_for(x, y, False))
# validate the differentiableGraphOps are marking proper requires_grad
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
self.assertEqual(oo, jit_oo)
# one more runs to trigger fusion
jit_o = t_jit(x, y, False)
for oo, jit_oo in zip(o, jit_o):
self.assertEqual(oo.dtype, jit_oo.dtype)
self.assertEqual(oo.requires_grad, jit_oo.requires_grad)
self.assertEqual(oo, jit_oo)
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.PROFILING, "Simple Executor doesn't support gradients")
def test_prune_grad(self):
@torch.jit.script
def t(input, bias):
return torch.nn.functional.relu(input + bias)
input = torch.randn(2, 8, requires_grad=True)
bias = torch.randn(8, requires_grad=False) # bias does NOT require grad
NUM_PROFILED_RUNS = 1
with num_profiled_runs(NUM_PROFILED_RUNS):
WARMUP = 3 # 2 runs to reach backward + 1 to optimize it
for x in range(WARMUP):
o = t(input, bias)
o.sum().backward()
fwd_plan = list(t.get_debug_state().execution_plans.values())[0]
bwd_graph = list(fwd_plan.code.grad_executor_states()[0].execution_plans.values())[0].graph
tup = next(bwd_graph.outputs())
self.assertEqual(len(list(tup.node().inputs())), 1)
def test_simple_merge(self):
# o --> o
def fn(x, y, z):
a = x * y
b = a * z
return b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_simple_no_merge(self):
# o: autodiff supported. x: not autodiff supported.
# o --> x
def fn(x, y, z):
a = x * y
b = torch.zeros([abs(int(y))])
return a, b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
g_str = str(graph)
FileCheck().check("aten::Int").check("aten::zeros").check_not("aten::mul").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_merge_unrelated(self):
# o o
def fn(w, x, y, z):
a = x * y
b = w * z
return a, b
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
self.assertGraphSize(graph, 3)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_without_cycles(self):
# o --> o --> o
# | ^
# \_________/
def fn(w, x, y):
a = w * x
b = a * y
c = a * b
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphSize(graph, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_dense(self):
# o o
# |\ /|
# | \ / |
# | /\ |
# vv vv
# o o
def fn(x, y):
a, b = x.chunk(2)
c, d = y.chunk(2)
return a + c, b + d
graph = self._perform_ad_subgraph_slicing(fn, 2, 2)
self.assertGraphSize(graph, 2)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_does_not_create_cycles(self):
# o --> x --> o
# | ^
# \_________/
def fn(w, x, y):
a = w * x
b = torch.zeros(abs(int(a)))
c = a * b
return c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_merges_up(self):
# o --> x o
# | ^
# \_________/
def fn(w, x, y, z):
a = w * x
b = torch.zeros(abs(int(y)))
c = a * z
return b, c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
g_str = str(graph)
FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_merges_down(self):
# o x --> o
# | ^
# \_________/
def fn(v, w, x, y):
a = v * w
b = torch.ones(int(y))
c = b * a
return a, c
graph = self._perform_ad_subgraph_slicing(fn, 1, 1, 1, 1)
num_nodes = 4 if GRAPH_EXECUTOR == ProfilingMode.PROFILING else 3
# add moved down
g_str = str(graph)
FileCheck().check_not("aten::add").run(g_str[0:g_str.find("return")])
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 1)
def test_respects_lexical_scoping(self):
def fn(x, k):
y = x * 1.1
if bool(k):
k = k + y
z = y * k
return z, k
graph = self._perform_ad_subgraph_slicing(fn, 1, 1)
# We should not have combined the two multiplications into
# the same group; they should each be a separate DiffGraph
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 3)
def test_merge_respects_aliasing(self):
def fn(x, k, cond):
y = x * 1.1
y = y * k
y = y * 2.2
if bool(cond):
z1 = y[0]
z2 = y[1]
z1.add_(3)
out = z2 + k + 3.3
out = out * out
return out
graph = self._perform_ad_subgraph_slicing(fn, [2, 2], [2, 2], 1)
# z2 did did not get merged into the subgraph
FileCheck().check("prim::If").check("aten::select").check_next("aten::select")\
.check_next("aten::add_").check("Differentiable").run(graph)
self.assertGraphContainsExactly(graph, 'prim::DifferentiableGraph', 2)
def test_aliased_outputs(self):
with enable_profiling_mode_for_profiling_tests():
# Case 1: aliasing between relu and t
# is within a DifferentiableGraph. It should be valid
# to merge both split_with_sizes in relu in one graph
input_str = """
graph(%a : Tensor):
%b : Tensor = aten::relu(%a)
%2 : Tensor = aten::t(%b)
return (%2)
"""
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
FileCheck().check("with prim::DifferentiableGraph") \
.check("aten::relu").check("aten::t") \
.run(graph)
# Case 2: aliasing between relu and split_with_sizes
# are both outputs of a Diff graph. It should be invalid
# to merge both split_with_sizes in relu in one graph
# i.e. relu and split_with_sizes should be in different
# differentiable graphs
input_str = """
graph(%a : Tensor):
%b : Tensor = aten::relu(%a)
%0 : int[] = prim::Constant[value=[2, 2, 1]]()
%1 : int = prim::Constant[value=0]()
%2 : Tensor[] = aten::split_with_sizes(%b, %0, %1)
%3 : (Tensor[], Tensor[]) = prim::TupleConstruct(%b, %2)
return (%3)
"""
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
FileCheck().check("Tensor = prim::DifferentiableGraph") \
.check("with prim::DifferentiableGraph") \
.check("Tensor = aten::relu") \
.check_not("aten::split_with_sizes") \
.run(graph)
# Case 3: two aliased nodes in a graph.
# Both `split_with_sizes` should be unfused
input_str = """
graph(%a : Tensor):
%b : Tensor = aten::relu(%a)
%s1 : int[] = prim::Constant[value=[2, 2, 1]]()
%s2 : int[] = prim::Constant[value=[3, 1]]()
%1 : int = prim::Constant[value=0]()
%2 : Tensor[] = aten::split_with_sizes(%b, %s1, %1)
%3 : Tensor[] = aten::split_with_sizes(%b, %s2, %1)
%4 : (Tensor, Tensor[]) = prim::TupleConstruct(%b, %2, %3)
return (%4)
"""
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
FileCheck().check("Tensor = prim::DifferentiableGraph") \
.check("with prim::DifferentiableGraph") \
.check("Tensor = aten::relu") \
.check_not("aten::split_with_sizes") \
.run(graph)
# Case 4: the aliased output has a descendant
# Both should be unfused. Note, %3 comes before %2
# to test that we unfuse in the reverse topo order
input_str = """
graph(%a : Tensor):
%b : Tensor = aten::relu(%a)
%0 : int[] = prim::Constant[value=[2, 2, 1]]()
%1 : int = prim::Constant[value=0]()
%2 : Tensor = aten::t(%b)
%3 : Tensor = aten::relu(%2)
%4 : (Tensor, Tensor, Tensor[]) = prim::TupleConstruct(%b, %3, %2)
return (%4)
"""
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
FileCheck().check("Tensor = prim::DifferentiableGraph") \
.check("with prim::DifferentiableGraph") \
.check("Tensor = aten::relu") \
.check_not("aten::t") \
.run(graph)
# Case 5: multiple aliased groups
# Both should be unfused. Note, %3 comes before %2
# to test that we unfuse in the reverse topo order
input_str = """
graph(%a : Tensor):
%b : Tensor = aten::relu(%a)
%c : Tensor = aten::abs(%a)
%0 : int[] = prim::Constant[value=[2, 2, 1]]()
%1 : int = prim::Constant[value=0]()
%d : Tensor = aten::t(%c)
%2 : Tensor = aten::t(%b)
%3 : Tensor = aten::relu(%2)
%4 : (Tensor, Tensor, Tensor[]) = prim::TupleConstruct(%3, %2, %d, %b, %c, %b)
return (%4)
"""
graph = torch._C.parse_ir(input_str)
torch._C._jit_pass_create_autodiff_subgraphs(graph, 1)
FileCheck().check("Tensor = prim::DifferentiableGraph") \
.check("with prim::DifferentiableGraph") \
.check("Tensor = aten::relu") \
.check_not("aten::t") \
.run(graph)
def test_has_profiled_info_aliasing_outputs(self):
# The expectation is that CallFunction will prevent the final profile node from
# getting merged into the DifferentiableGraph, and that create_autodiff_subgraphs
# will instead add this to the type for %4.
ir = """
graph(%a : Tensor):
%1 : Tensor = prim::profile[profiled_type=Float(requires_grad=0)](%a)
%2 : Tensor = aten::relu(%1)
%3 : Tensor = prim::profile[profiled_type=Float(requires_grad=0)](%2)
%4 : Tensor = aten::relu(%3)
%5 : Tensor = prim::CallFunction(%4)
%6 : Tensor = prim::profile[profiled_type=Float(requires_grad=0)](%4)
return (%6)
"""
graph = torch._C.parse_ir(ir)
torch._C._jit_pass_create_autodiff_subgraphs(graph)
for n in graph.nodes():
if n.kind() == "prim::DifferentiableGraph":
diff_graph = n.g("Subgraph")
outputs = list(diff_graph.outputs())
self.assertEqual(1, len(outputs))
output = outputs[0]
self.assertEqual(False, output.requiresGrad())
FileCheck().check("= prim::DifferentiableGraph") \
.check("with prim::DifferentiableGraph") \
.check(" = aten::relu") \
.check("requires_grad=0") \
.check("aten::relu") \
.run(graph)
|
pytorch-master
|
test/jit/test_autodiff_subgraph_slicing.py
|
# Owner(s): ["oncall: jit"]
import unittest
import io
import os
import sys
import copy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable, Function
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.common_utils import suppress_warnings, \
skipIfCompiledWithoutNumpy, enable_profiling_mode_for_profiling_tests, \
IS_SANDCASTLE, TemporaryFileName, skipIfCrossRef, skipIfTorchDynamo
from torch.testing._internal.jit_utils import JitTestCase, enable_cpu_fuser, \
_tmp_donotuse_dont_inline_everything, _trace, RUN_CUDA, \
RUN_CUDA_MULTI_GPU, make_global
from torch.testing._internal.common_cuda import with_tf32_off
from torch import Tensor
# Standard library
from collections import namedtuple
from itertools import chain
from typing import Dict, List, Optional, Tuple
import warnings
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestTracer(JitTestCase):
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_large_nbr_kernel_args(self):
class Recurrence(nn.Module):
def __init__(self, seq_len):
super(Recurrence, self).__init__()
self.seq_len = seq_len
def forward(self, input):
input = input.transpose(0, 1)
# Main loop
output = []
for i in range(self.seq_len):
b = input[i] * 2
output.append(b)
output = torch.cat(output, 0).view(input.size(0), *output[0].size())
output = output.transpose(0, 1)
return output
input_size = 8
batch_size = 2
seq_len = 130
rec = Recurrence(seq_len)
input = torch.rand(batch_size, seq_len, input_size)
torch.cuda.set_device(0)
rec = rec.cuda()
input = input.cuda()
traced_rec = torch.jit.trace(rec, (input))
def test_trace_legacy_ctor(self):
class MyModule(nn.Module):
def forward(self, x):
return (x + 1, torch.FloatTensor([0]))
traced_rec = torch.jit.trace(MyModule(), torch.randn(2, 2))
def test_simple(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return torch.sigmoid(torch.tanh(x * (x + y)))
self.checkTrace(f, (x, y))
def test_trace_checking_with_global_name(self):
class MyClass(torch.nn.Module):
def __init__(self):
super(MyClass, self).__init__()
def forward(self, xs: List[Tensor]):
y = torch.cat(xs, dim=0)
return y
model = MyClass()
# Simulate these inputs being in the globals, like they would be if,
# e.g. they were defined outermost scope of a script
global input1, input2
input1 = torch.ones(2, 2)
input2 = torch.ones(2, 2)
m2 = torch.jit.trace(model, ((input1, input2),))
def test_trace_aliased_parameter(self):
class M(nn.Module):
def __init__(self, x):
super(M, self).__init__()
self.x = nn.Parameter(x)
def forward(self, y):
return self.x + y
m = M(torch.rand(3, 4))
r = torch.jit.trace(m, m.x)
t2 = torch.rand(3, 4)
self.assertEqual(r(t2), m.x + t2)
def test_trace_nested_fn(self):
class TracedInlineDecision(torch.nn.Module):
def forward(self, x, flag):
@torch.jit.script
def make_decision(flag, x):
if flag:
return x
else:
return torch.zeros_like(x)
x = torch.neg(x)
return make_decision(flag, x)
decision = TracedInlineDecision()
torch.jit.trace(decision, (torch.rand(3, 4), torch.tensor([True], dtype=torch.bool)), check_trace=True)
def test_trace_single_tuple(self):
x = torch.tensor(2.)
def f2(x):
return (x,)
jit_f2 = torch.jit.trace(f2, x)
assert f2(x) == jit_f2(x) # fails
def test_trace_namedtuple(self):
Point = namedtuple('point', ['x', 'y'])
def f(p):
if type(p) is tuple:
p = Point(*p)
return p.x + p.y
p = Point(torch.randn(1), torch.randn(1))
traced = torch.jit.trace(f, (p,))
self.assertEqual(f(p), traced(p))
def test_trace_topk(self):
class M(torch.nn.Module):
def forward(self, x, y):
return x.topk(y, dim=1)[1]
mod = M()
inputs = (torch.randint(0, 10, (20, 20)), torch.tensor(17))
traced_func = torch.jit.trace(mod, inputs)
test_inputs = (torch.randint(0, 9, (9, 9)), torch.tensor(8))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
self.assertEqual(eager_out, traced_out)
test_inputs = (torch.randint(0, 50, (50, 50)), torch.tensor(12))
eager_out = mod(*test_inputs)
traced_out = traced_func(*test_inputs)
self.assertNotWarn(lambda: traced_func(*test_inputs), "Shouldn't throw slicing related warn here")
self.assertEqual(eager_out, traced_out)
def test_typeas_trace_check(self):
a = torch.tensor([0.4], requires_grad=True)
b = torch.tensor([0.7], requires_grad=True)
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, b))
def test_trace_index(self):
x = torch.tensor([0.4], requires_grad=True)
y = torch.tensor([0], dtype=torch.int64)
def fn(x, y):
return x[y]
fn_traced = torch.jit.trace(fn, (x, y,))
self.assertEqual(fn(x, y), fn_traced(x, y))
# Backwards tracing was broken for indexing by a constant,
# because it's internally implemented using as_strided,
# and we attempted to trace its derivative (which is not
# currently supported.) It currently works because
# slice() is now not marked as traceable.
def test_trace_index_constant(self):
x = torch.tensor([0.4], requires_grad=True)
def fn(x):
return x[0]
def run(f):
y = f(x)
grad = torch.autograd.grad(y, x)[0].clone()
return y, grad
traced_fn = torch.jit.trace(fn, torch.ones(1))
self.assertEqual(run(fn), run(traced_fn))
def test_index_put(self):
ten = torch.zeros(3, 3)
mask = torch.tensor([[True, True, True],
[True, False, False],
[True, True, False]])
def test_fn(ten, mask):
ten[mask] = torch.ones(6)
return ten
traced_test_fn = torch.jit.trace(test_fn, (ten, mask))
ten = torch.rand(3, 3)
self.assertEqual(test_fn(ten, mask), traced_test_fn(ten, mask))
def test_canonicalize_tensor_iterator(self):
x = torch.randn(4, 4)
def f(x):
x = x + 2
x = x - 4
x = x * 6
x = x / 8
return x
traced = torch.jit.trace(f, (x,))
f(x)
graph = traced.graph_for(x)
# There should be 4 int constants for the right sides of operators, plus one
# for the alpha argument for add and sub
self.assertTrue(str(traced.graph_for(x)).count(': int = prim::Constant') == 5)
@suppress_warnings
def test_constant(self):
x = torch.randn(2, 2, requires_grad=True)
def f(x):
return x.matmul(torch.diag(torch.tensor([2., 2.])))
self.checkTrace(f, (x,), (torch.ones(2, 2, requires_grad=True),))
def test_wrapped_number(self):
# Scalar's get converted to 'wrapped' tensors of default tensor type.
# Wrapped tensors behave differently in certain promotion operations:
# float_tensor * double -> float but wrapped_float * double -> double.
# This can cause issues in check-trace if not handled correctly in
# `aten::isclose()`.
def foobar():
x = -10000.0
result = x * torch.ones(1, dtype=torch.float)
return result
scripted = torch.jit.trace(foobar, (), check_trace=True)
def test_inplace_transplant(self):
x = torch.tensor([0.], requires_grad=True)
def fn(x):
y = x.clone()
y.add_(2)
y.add_(3)
return y
g, _ = torch.jit._get_trace_graph(fn, (x,))
self.run_pass('dce', g)
FileCheck().check_count("aten::clone", 1, exactly=True) \
.check_count("aten::add_", 2, exactly=True) \
.check_next("return").run(str(g))
self.assertExportImport(g, (x,))
def test_inplace_flags(self):
class InplaceFn(Function):
@staticmethod
def forward(ctx, x):
ctx.mark_dirty(x)
return x.add_(1)
@staticmethod
def backward(ctx, go):
return go
class RegularFn(Function):
@staticmethod
def forward(ctx, x):
return x.add(1)
@staticmethod
def backward(ctx, go):
return go
x = torch.tensor([0.], requires_grad=True)
def fn(x):
y = RegularFn.apply(x)
y = InplaceFn.apply(y)
y = InplaceFn.apply(y)
y = RegularFn.apply(y)
return y
trace_graph, _ = torch.jit._get_trace_graph(fn, (x,), _force_outplace=True)
self.run_pass('dce', trace_graph)
ops = list(trace_graph.nodes())
for op in ops:
self.assertTrue(op.hasAttribute('inplace'))
inplace_flags = [False, True, True, False]
for op, is_inplace in zip(ops, inplace_flags):
self.assertEqual(op.i('inplace'), is_inplace)
def test_inplace_check(self):
class MyInplaceFn(Function):
@staticmethod
def forward(self, x):
x.add_(1)
self.mark_dirty(x)
return x
@staticmethod
def backward(self, grad):
return grad
def fn(x):
return MyInplaceFn.apply(x)
x = torch.randn(5, 5)
ge = torch.jit.trace(fn, (x,), _force_outplace=True, check_trace=False)
with self.assertRaisesRegex(RuntimeError, 'inplace MyInplaceFn'):
ge(x)
def test_force_outplace_check_fill(self):
def f(x):
return torch.empty(x.shape).fill_(7)
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def test_force_outplace_check_zero(self):
def f(x):
return torch.empty(x.shape).zero_()
x = torch.randn(10, 15)
ft = torch.jit.trace(f, x, _force_outplace=True)
self.assertEqual(f(x), ft(x))
def do_trace_size(self, requires_grad):
def fn(x):
return x.view(x.shape[1] * 2, x.size(0), 2)
x = torch.randn(5, 2, 4, requires_grad=requires_grad)
y = torch.randn(4, 8, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_fn = torch.jit.trace(fn, x)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
def test_trace_size(self):
self.do_trace_size(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_size_with_grad(self):
self.do_trace_size(True)
def test_trace_numel(self):
def fn(x):
return x.numel()
x = torch.randn(2, 3, 4)
y = torch.randn(4, 5, 6)
traced_fn = torch.jit.trace(fn, x)
self.assertEqual(traced_fn(y), fn(y))
self.assertEqual(traced_fn(x), fn(x))
def do_trace_arange(self, requires_grad):
def arange(x):
return torch.arange(x.shape[0])
def arange_scalar(x):
return torch.arange(12)
def arange_start_end(x):
return torch.arange(start=x.shape[0], end=x.shape[0] + 5)
x = torch.randn(5, 3, 2, requires_grad=requires_grad)
y = torch.randn(8, 2, 4, requires_grad=requires_grad)
# Check that it behaves as expected
traced_arange = torch.jit.trace(arange, x)
self.assertEqual(traced_arange(y), arange(y))
self.assertEqual(traced_arange(x), arange(x))
traced_arange_scalar = torch.jit.trace(arange_scalar, x)
self.assertEqual(traced_arange_scalar(y), arange_scalar(y))
self.assertEqual(traced_arange_scalar(x), arange_scalar(x))
traced_arange_start_end = torch.jit.trace(arange_start_end, x)
self.assertEqual(traced_arange_start_end(y), arange_start_end(y))
self.assertEqual(traced_arange_start_end(x), arange_start_end(x))
def test_trace_arange(self):
self.do_trace_arange(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_arange_with_grad(self):
self.do_trace_arange(True)
# Test that a trace of torch.full(x.shape) doesn't store the shape as a constant
def test_trace_full_dynamic_shape(self):
def full_with_shape_like(x):
return torch.full(x.shape, 2.)
x = torch.randn(3, 4)
ge = torch.jit.trace(full_with_shape_like, example_inputs=x)
y = torch.randn(2, 7)
self.assertEqual(ge(y).shape, y.shape)
self.assertEqual(ge(x).shape, x.shape)
# Test that the trace of setitem doesn't store shapes as constants
# Fix https://github.com/pytorch/pytorch/issues/43548
def test_trace_slice_setitem_dynamic_shape(self):
def slice_setitem(x, y):
x[:, 2] = y + 1
return x
x = torch.randn(3, 4)
traced = torch.jit.trace(slice_setitem, (x, x[:, 0]))
x = torch.randn(10, 5)
self.assertEqual(traced(x.clone(), x[:, 0]), slice_setitem(x.clone(), x[:, 0]))
# Suppression: we are intentionally slicing a tensor, we don't care that it
# will be constantified
@suppress_warnings
def do_trace_slice(self, requires_grad):
def slice(x):
results = []
for i in range(4):
results.append(x[:x.size(0) - i, i:x.size(2), i:3])
return tuple(results)
def slice_select(x):
results = []
for i in range(4):
results.append(x[:, i:, x.size(2) - 5])
return tuple(results)
x = torch.randn(5, 6, 7, requires_grad=requires_grad)
y = torch.randn(7, 8, 9, requires_grad=requires_grad)
# Check that it behaves as expected
traced_slice = torch.jit.trace(slice, x)
self.assertEqual(traced_slice(y), slice(y))
self.assertEqual(traced_slice(x), slice(x))
traced_slice_select = torch.jit.trace(slice_select, x)
self.assertEqual(traced_slice_select(y), slice_select(y))
self.assertEqual(traced_slice_select(x), slice_select(x))
def test_trace_slice(self):
self.do_trace_slice(False)
# test the different graph_executor path that happens when
# gradients are required and sizes are involved
def test_trace_slice_with_grad(self):
self.do_trace_slice(True)
def test_trace_casts(self):
casts = [
lambda x: x.byte(),
lambda x: x.float(),
lambda x: x.cpu(),
lambda x: x.to(device='cpu'),
lambda x: x.to(dtype=torch.int64),
lambda x: x.to(device='cpu', dtype=torch.float),
lambda x: x.to(x)
]
def assertContainsCast(trace):
self.assertEqual(sum(n.kind() == 'aten::to' for n in trace.graph.nodes()), 1)
for cast in casts:
trace = torch.jit.trace(cast, torch.randn(2, 2))
assertContainsCast(trace)
x = torch.randn(2, 2)
self.assertEqual(trace(x), cast(x))
def to_tensor(x, y):
return x.to(y)
to_tensor_trace = torch.jit.trace(to_tensor, (torch.randn(2, 2), torch.randn(1, 8)))
assertContainsCast(to_tensor_trace)
x, y = torch.randn(2, 2), torch.randn(1, 10)
self.assertEqual(to_tensor_trace(x, y), to_tensor(x, y))
@skipIfCompiledWithoutNumpy
@skipIfCrossRef
def test_trace_warn(self):
def fn(x):
int(x) # Warning 1.
y = x * 1
if y: # Warning 2.
pass
q = [x, x * 4]
z = q[y]
float(z) # Warning 3.
z.tolist() # Warning 4.
z.numpy() # Warning 5.
for _ in torch.ones(4, 4): # Warning 6.
pass
return z + 4
with warnings.catch_warnings(record=True) as warns:
traced_fn = torch.jit.trace(fn, torch.tensor([1]))
for warn in warns:
self.assertIs(warn.category, torch.jit.TracerWarning)
warns = [str(w.message) for w in warns]
self.assertIn('a Python integer', warns[0])
self.assertIn('a Python boolean', warns[1])
self.assertIn('a Python float', warns[2])
self.assertIn('a Python list', warns[3])
self.assertIn('a NumPy array', warns[4])
self.assertIn('Iterating over', warns[5])
def test_trace_tuple(self):
def fn(x, y):
return x, (x * y[1], x * y[0])
x, y = torch.randn(2, 2), (torch.ones(2, 2), torch.randn(2, 2))
traced_fn = torch.jit.trace(fn, (x, y))
self.assertEqual(traced_fn(x, y), fn(x, y))
# should be a tuple nested within another tuple
FileCheck().check_count("prim::TupleConstruct", 2, exactly=True).check_next("return") \
.run(str(traced_fn.graph))
self.assertExportImport(traced_fn.graph, (x, y))
def test_trace_random(self):
def f(mean, std):
return torch.normal(mean, std)
traced = torch.jit.trace(f, (torch.zeros(2, 3), torch.ones(2, 3)), check_trace=False)
mean, std = torch.zeros(5, 5), torch.ones(5, 5)
with torch.random.fork_rng(devices=[]):
output = f(mean, std)
traced_output = traced(mean, std)
self.assertEqual(output, traced_output)
def test_trace_tensor_factory(self):
def run(**kwargs):
inputs_require_grads = kwargs.pop('inputs_require_grads', True)
def fn(x):
return x + torch.ones(2, 3, **kwargs)
input_kwargs = kwargs.copy()
if 'out' in input_kwargs:
del input_kwargs['out']
input = torch.ones(2, 3, **input_kwargs)
self.checkTrace(fn, (input,), inputs_require_grads=inputs_require_grads)
# check we recorded 'ones' and did not just record a constant
tfn = torch.jit.trace(fn, input)
self.assertTrue("ones" in str(tfn.graph))
run()
run(dtype=torch.int, inputs_require_grads=False)
run(out=torch.tensor([]))
if RUN_CUDA:
run(device="cuda:0")
if RUN_CUDA_MULTI_GPU:
run(device="cuda:1")
def test_trace_indexed_assignment(self):
def stuff(x, y):
x = x.clone()
x[0] = y
return x
example = torch.rand(3, 4)
self.checkTrace(stuff, (example, example[0] + 1))
# TODO: implement
@unittest.expectedFailure
def test_output_unflatten(self):
"""Check that outputs of traced functions retain the original structure and nesting"""
def fn(x):
return (x * 2, (x ** 2, x + 4, (x + 2,), ), x * 4)
self.checkTrace(fn, (torch.randn(2, 2),))
def test_input_flatten(self):
"""Check that inputs to traced functions are flattened"""
def fn(x, t):
y, z = t
return x * y * z
inputs = (torch.randn(1), (torch.randn(1), torch.randn(1)))
self.checkTrace(fn, inputs)
def test_input_dict_empty(self):
def test(d):
pass
with self.assertRaises(RuntimeError):
self.checkTrace(test, {})
def test_input_dict_remembers_keys(self):
"""Check that the trace remembers which keys were in a dict input"""
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x']
input_1 = {'x': torch.tensor(1)}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1, ))
self.assertEqual(m_traced(input_1), torch.tensor(1))
# should work to change the values and not the keys
input_same_key_different_value = {'x': torch.tensor(2)}
self.assertEqual(m_traced(input_same_key_different_value), torch.tensor(2))
# error to use something that doesn't have `x`
input_different_key = {'y': torch.tensor(3)}
with self.assertRaises(RuntimeError):
m_traced(input_different_key)
# it's okay to have additional elements in the dictionary, so long as 'x' is there
input_additional_key = {'x': torch.tensor(4), 'y': torch.tensor(3)}
self.assertEqual(m_traced(input_additional_key), torch.tensor(4))
def test_input_dict_insertion_order(self):
"""Check that dictionary access doesn't care about insertion order"""
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x'], dict_input['y']
input_x_then_y = {}
input_x_then_y['x'] = torch.tensor(1)
input_x_then_y['y'] = torch.tensor(2)
m = TestModule()
m_traced = torch.jit.trace(m, (input_x_then_y, ))
self.assertEqual(m_traced(input_x_then_y), (torch.tensor(1), torch.tensor(2)))
input_y_then_x = {}
input_y_then_x['y'] = torch.tensor(4)
input_y_then_x['x'] = torch.tensor(3)
self.assertEqual(m_traced(input_y_then_x), (torch.tensor(3), torch.tensor(4)))
def test_input_dict_recursive(self):
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, dict_input):
return dict_input['x'][1]
input_1 = {'x': {1: torch.tensor(1)}}
m = TestModule()
m_traced = torch.jit.trace(m, (input_1, ))
input_2 = {'x': {1: torch.tensor(2)}}
self.assertEqual(m_traced(input_2), torch.tensor(2))
def test_input_dict_checkTrace_mut(self):
def test(d):
d['x'].tanh_()
return d['x']
inputs = {'x': torch.rand(3, 4), 'y': torch.rand(3, 4)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_dict_unify(self):
def test(d):
return d['int'], d['float']
inputs = {'int': torch.ones((2, 2), dtype=torch.int32),
'float': torch.ones((2, 2), dtype=torch.float32)}
self.checkTrace(test, (inputs,), inputs_require_grads=False)
def test_input_tuple_of_dicts(self):
def test(t):
d = t[0]
return d['x']['y']
inputs = {'x': {'y': torch.rand(2, 3)}}
self.checkTrace(test, ((inputs, inputs),), allow_unused=True)
def test_input_dict_of_dicts(self):
def test(d):
return d['x']['y']
nested_input = {'y': torch.rand(2, 3)}
unified_nested = {'y': torch.rand(3, 2)}
inputs = {'x': nested_input, 'force_unify': unified_nested}
self.checkTrace(test, (inputs,), allow_unused=True)
def test_input_dict_of_lists(self):
def test(d):
return d['x'][0]
inputs = {'x': [torch.rand(3, 2)]}
self.checkTrace(test, (inputs,))
def test_input_list_toplevel_flatten(self):
def test(t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
self.checkTrace(test, inputs)
def test_input_list_toplevel_flatten_direct(self):
class Test(torch.nn.Module):
def forward(self, t1, t2):
return torch.add(t1, t2)
inputs = [torch.ones(2, 2), torch.rand(2, 2)]
torch.jit.trace(Test(), inputs)
def test_input_list_of_tuples(self):
def test(l):
return l[0][0]
inputs = [(torch.ones(2, 2),)]
self.checkTrace(test, (inputs,))
def test_input_dict_empty_list(self):
def test(d):
pass
inputs = {1: []}
with self.assertRaisesRegex(RuntimeError, 'List trace'):
self.checkTrace(test, (inputs,))
def test_input_list_mixed_type(self):
def test(d):
pass
inputs = [torch.rand(2, 3), (torch.ones(2), torch.ones(2))]
with self.assertRaisesRegex(RuntimeError, 'consistent'):
self.checkTrace(test, (inputs,))
def test_conv(self):
x = torch.ones(20, 16, 50, 40)
g, outputs, inputs = torch.jit._get_trace_graph(nn.Conv2d(16, 13, 3, bias=False), x, return_inputs=True)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
def test_max_pool(self):
x = torch.rand(20, 16, 10, 10)
def max_pool2d(x):
return F.max_pool2d(x, 2) + 2
trace = torch.jit.trace(max_pool2d, (x))
graph = trace.graph_for(x)
FileCheck().check("aten::max_pool2d(").run(graph)
self.assertEqual(max_pool2d(x), trace(x))
def test_nested_inplace(self):
x = torch.randn(2, 2)
g, outputs, inputs = torch.jit._get_trace_graph(
lambda x: F.threshold(x, 0, 0, inplace=True), (x, ), return_inputs=True)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
FileCheck().check("threshold_").run(str(g))
self.assertExportImport(g, (x,))
def test_repeated_input(self):
def fn(a, b):
return a + b
ge = self.checkTrace(fn, [torch.randn(2, 2)] * 2)
inputs = set(ge.graph.inputs())
# three instead of 2 because the export/import in checkTrace adds a
# `self` module argument
self.assertTrue(len(inputs) == 3)
def test_repeated_output(self):
def fn(a, b):
z = a + b
return z, z
ge = self.checkTrace(fn, [torch.randn(2, 2) for _ in range(2)])
tuple_output = list(ge.graph.outputs())[0]
tuple_inputs = list(tuple_output.node().inputs())
self.assertTrue(tuple_inputs[0] == tuple_inputs[1])
def test_inplace_copy(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(f, (x, ), return_inputs=True)
self.run_pass('dce', g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
def test_inplace_copy_force_outplace(self):
x = torch.randn(4, 4, requires_grad=True)
def f(x):
out = torch.zeros(x.size())
out.copy_(x)
return out
g, outputs, inputs = torch.jit._get_trace_graph(
f, (x, ), return_inputs=True, _force_outplace=True)
self.run_pass('dce', g)
m = self.createFunctionFromGraph(g)
self.assertEqual(outputs, m(*inputs))
self.assertExportImport(g, (x,))
FileCheck().check("expand_as").run(str(g))
def test_shared_param(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.b = self.a = nn.Parameter(torch.randn(2, 2))
def forward(self, x):
return x * self.a + self.b
m = MyModule()
g, _ = torch.jit._get_trace_graph(m, (torch.randn(2, 2),))
self.run_pass('dce', g)
self.assertEqual(len(list(g.inputs())), 2)
FileCheck().check("mul").check("add").run(str(g))
def test_trace_c10_ops(self):
try:
_ = torch.ops._caffe2.GenerateProposals
except AttributeError:
self.skipTest("Skip the test since c2 ops are not registered.")
class MyModel(torch.nn.Module):
def __init__(self):
super(MyModel, self).__init__()
def forward(self, scores, bbox_deltas, im_info, anchors):
a, b = torch.ops._caffe2.GenerateProposals(
(scores), (bbox_deltas), (im_info), (anchors),
2.0, 6000, 300, 0.7, 16, True, -90, 90, 1.0, True,
)
return a, b
model = MyModel()
A = 4
H = 10
W = 8
img_count = 3
scores = torch.ones(img_count, A, H, W, dtype=torch.float32)
bbox_deltas = torch.linspace(0, 10, steps=img_count * 4 * A * H * W,
dtype=torch.float32)
bbox_deltas = bbox_deltas.view(img_count, 4 * A, H, W)
im_info = torch.ones(img_count, 3, dtype=torch.float32)
anchors = torch.ones(A, 4, dtype=torch.float32)
inputs = (scores, bbox_deltas, im_info, anchors)
traced_model = torch.jit.trace(model, inputs)
self.assertEqual(traced_model(*inputs), model(*inputs))
self.assertExportImportModule(traced_model, (scores, bbox_deltas, im_info, anchors))
def run_ge_tests(self, optimize, use_cuda):
with enable_profiling_mode_for_profiling_tests():
with torch.jit.optimized_execution(optimize):
def rand(*args):
t = torch.rand(*args).float()
if use_cuda:
t = t.cuda()
return t
self.checkTrace(lambda a, b: a * b + b,
[rand(1), rand(1)], [rand(2, 3), rand(2, 3)])
# trivial identity
self.checkTrace(lambda a, b: (b, a), [rand(1), rand(1)])
def foo(a):
t = a * a
return t * t, 4 * t
self.checkTrace(foo, [rand(1)])
# unused input
self.checkTrace(
lambda a, b: a * a, [rand(1), rand(1)], allow_unused=True)
# test outputs that do not get used in grad
self.checkTrace(foo, [rand(1)], drop=1)
# test autograd fallback
self.checkTrace(lambda a, b: a * b /
(a - 2 * b) + b, [rand(1), rand(1)])
def test_ge_unoptimized(self):
self.run_ge_tests(False, False)
@unittest.skipIf(IS_SANDCASTLE, "NYI: fuser support for Sandcastle")
@enable_cpu_fuser
def test_ge_optimized(self):
with enable_profiling_mode_for_profiling_tests():
self.run_ge_tests(True, False)
@unittest.skipIf(not RUN_CUDA, "requires CUDA")
def test_ge_cuda(self):
self.run_ge_tests(True, True)
# more manual test of graph executor that can be used as a scratchpad
def test_ge(self):
def foo(a, b):
return a * b / (a - b) + b
V = Variable
a, b = V(torch.rand(1)), V(torch.rand(1))
ge = torch.jit.trace(foo, (a, b))
a, b = V(torch.rand(1), requires_grad=True), V(
torch.rand(1), requires_grad=True)
r, = ge(a, b)
da, db = torch.autograd.grad(r + 3, [a, b], create_graph=True)
l2 = (da * db + db * db)
g2result = torch.autograd.grad(l2, [da, db])
r = foo(a, b)
da2, db2 = torch.autograd.grad(r + 3, [a, b], create_graph=True)
self.assertEqual(da, da2)
self.assertEqual(db, db2)
l3 = (da2 * db2 + db2 * db2)
g2result2 = torch.autograd.grad(l3, [da2, db2])
self.assertEqual(g2result, g2result2)
def test_trace_annotation(self):
@_trace(torch.rand(1))
def foo(a):
return a + a + a
x = torch.randn(5, 5)
self.assertEqual(foo(x), x + x + x)
@unittest.skipIf(not RUN_CUDA, "calls .cuda()")
# By default, on Ampere or later GPUs, nn.Linear computes float tensors at TF32 precision.
# We want float tensors to be computed at full precision in order to use the default precision
@with_tf32_off
def test_traced_module_cuda(self):
class Model(nn.Module):
def __init__(self, num_features, num_layers):
super(Model, self).__init__()
self.num_layers = num_layers
layers = [[nn.Linear(num_features, num_features), nn.Sigmoid()]
for _ in range(num_layers)]
self.submodule = nn.Sequential(*chain(*layers))
def forward(self, x):
for i in range(self.num_layers):
x = self.submodule[i](x) + x
return x
model = Model(5, 3)
x = torch.randn(2, 5)
traced_model = torch.jit.trace(model, x)
# We're missing some attributes these modules had initially. Make sure we can
# still get the __repr__()
model.__repr__()
# XXX: indexing sequentials is broken
linear_submodule = next(iter(traced_model.submodule._modules.values()))
# All attributes that aren't parameters should raise
with self.assertRaises(AttributeError):
linear_submodule.in_features
linear_submodule.weight
linear_submodule.weight = nn.Parameter(torch.randn(linear_submodule.weight.shape))
with self.assertRaises(RuntimeError):
del linear_submodule.weight
# Submodules can't be called
with self.assertRaises(RuntimeError):
linear_submodule(x)
# Type casts
linear_submodule.cuda()
traced_model.float().cuda()
cuda_out = traced_model(x.float().cuda())
traced_model.cpu()
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.to('cuda')
cuda_out = traced_model(x.float().cuda())
traced_model.to('cpu')
cpu_out = traced_model(x.float())
self.assertEqual(cpu_out, cuda_out)
traced_model.double()
# state_dict + load_state_dict
state = {k: v.clone() for k, v in traced_model.state_dict().items()}
new_state = {k: v.clone().fill_(1) for k, v in state.items()}
out = traced_model(x)
traced_model.load_state_dict(new_state)
out_ones = traced_model(x)
traced_model.load_state_dict(state)
out_state = traced_model(x)
self.assertEqual(out, out_state)
self.assertNotEqual(out, out_ones)
def test_export_no_reorder(self):
def func(a, b):
return a * b / (a - 2 * b) + b
recording_inputs = [torch.tensor([0.55619788169860839844], dtype=torch.float32, requires_grad=True),
torch.tensor([0.25947844982147216797], dtype=torch.float32, requires_grad=True)]
ge1 = torch.jit.trace(func, recording_inputs)
ge2 = self.getExportImportCopy(ge1)
outputs_ge1 = ge1(*recording_inputs)
outputs_ge2 = ge2(*recording_inputs)
grad_ge1 = torch.autograd.grad(outputs_ge1, recording_inputs)
grad_ge2 = torch.autograd.grad(outputs_ge2, recording_inputs)
self.assertTrue(outputs_ge1 == outputs_ge2)
self.assertTrue(grad_ge1 == grad_ge2)
def test_python_function(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1
@staticmethod
def backward(ctx, grad_output):
return grad_output
@_trace(torch.zeros(2))
def fn(x):
return MyFn.apply(x + 2) + 3
x = torch.tensor([1., 2., 3.])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_python_function_tup(self):
class MyFn(Function):
@staticmethod
def forward(ctx, x):
return x + 1, x - 1
@staticmethod
def backward(ctx, grad_output):
return grad_output, grad_output
@_trace(torch.zeros(2))
def fn(x):
a, b = MyFn.apply(x + 2)
return a + b + 3
x = torch.tensor([1., 2., 3.])
y = torch.randn(2, 2, requires_grad=True)
fn(x)
fn(y)
def test_trace_detach(self):
def foo(x, w):
return torch.matmul(x, w).detach()
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y = y.detach()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_detach_inplace(self):
def foo(x, w):
y = torch.matmul(x, w)
y.detach_()
return y
traced = torch.jit.trace(foo, (torch.rand(3, 4), torch.rand(4, 5)))
FileCheck().check("matmul").check("detach(").run(str(traced.graph))
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
traced_result = traced(x, w)
self.assertEqual(foo(x, w), traced_result)
self.assertFalse(traced_result.requires_grad)
self.assertIsNone(traced_result.grad_fn)
def test_trace_detach_inplace_redispatch(self):
def foo(x, w):
y = torch.matmul(x, w)
assert y.requires_grad
y.detach_()
# Make sure trace kernel redispatches to the right lower kernel.
assert not y.requires_grad
return y
x, w = torch.rand(3, 4), torch.rand(4, 5, requires_grad=True)
# With `check_trace=True` it will run with `@torch.no_grad()` and break assert.
torch.jit.trace(foo, (x, w), check_trace=False)
def test_trace_detach_onnx_erase(self):
class Mod(torch.nn.Module):
def forward(self, x, w):
return torch.matmul(x, w).detach()
torch.onnx.export_to_pretty_string(
Mod(), (torch.rand(3, 4), torch.rand(4, 5)))
def test_trace_slice_full_dim(self):
def foo(x):
return x[0:5, 0] + 1.0
traced = torch.jit.trace(foo, (torch.rand(5, 4),))
test_x = torch.rand(6, 3)
self.assertEqual(foo(test_x), traced(test_x))
def test_trace_dict_input(self):
class Bar(torch.nn.Module):
def __init__(self):
super(Bar, self).__init__()
self.foo = Foo()
def forward(self, a, b):
return self.foo({'a': a, 'b': b})['a']
class Foo(torch.nn.Module):
def forward(self, x):
return {'a': x['a'] * x['b']}
x = (torch.rand(3), torch.rand(3))
model = Bar()
self.checkTrace(model, x)
def test_trace_dict_output(self):
class TraceDictStrTensor(torch.nn.Module):
def forward(self, a, b):
return {'a': a, 'b': b}
class TraceDictTensorTensor(torch.nn.Module):
def forward(self, a, b):
return {a: b, b: a}
x = (torch.rand(3), torch.rand(3))
with self.assertRaisesRegex(RuntimeError, r"Encountering a dict at the output"):
torch.jit.trace(TraceDictStrTensor(), x)
traced_dict_str_mod = torch.jit.trace(TraceDictStrTensor(), x, strict=False)
self.assertEqual(traced_dict_str_mod(*x), {'a': x[0], 'b': x[1]})
traced_dict_tensor_mod = torch.jit.trace(TraceDictTensorTensor(), x, strict=False)
self.assertEqual(traced_dict_tensor_mod(*x), {x[0]: x[1], x[1]: x[0]})
def test_trace_with_tensor_list_output(self):
def f():
return [torch.zeros(1), torch.zeros(5)]
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(f, [])
traced_non_strict_f = torch.jit.trace(f, [], strict=False)
self.assertEqual(traced_non_strict_f(), f())
def test_trace_with_number_list_output(self):
def f():
return [1, 5]
with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
traced_f = torch.jit.trace(f, [])
def test_trace_with_nested_tensor_list_output(self):
def f():
return [[torch.zeros(1)], [torch.zeros(5)]]
with self.assertRaisesRegex(RuntimeError, r"Only tensors.+can be output from traced functions"):
traced_f = torch.jit.trace(f, [])
def test_trace_variable_instantiation(self):
def random_foo(x):
return Variable(Variable(x) + 1.0)
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
x = torch.rand(5, 6)
self.assertEqual(random_foo(x), random_foo_traced(x))
def test_trace_slice_expr_complete_type(self):
def random_foo(x):
return x + 1.0
random_foo_traced = torch.jit.trace(random_foo, (torch.rand(3, 4),))
@torch.jit.script
def random_bar(x):
return random_foo_traced(x)[0:1]
x = torch.rand(3, 4)
self.assertEqual(random_bar(x), (x + 1)[0:1])
def test_trace_inline_shape(self):
# testing peephole optimization of size is turned into a constant
# in script fn
@torch.jit.script
def tensor_size(x: torch.Tensor) -> torch.Tensor:
return torch.tensor([x.size()[0]])
self.assertEqual(
tensor_size(torch.rand(15,)),
torch.tensor([15])
)
traced_tensor_size = torch.jit.trace(tensor_size, torch.rand(7,))
self.assertEqual(
traced_tensor_size(torch.rand(15,)),
torch.tensor([15])
)
@torch.jit.script
def use_device(x):
return torch.zeros_like(x, device=x.device)
def foo(x):
return use_device(x)
traced_tensor_size = torch.jit.trace(foo, torch.rand(7,))
self.run_pass('inline', traced_tensor_size.graph)
FileCheck().check("prim::device").run(traced_tensor_size.graph)
def test_trace_save(self):
def fn(x):
return x + 2
def check(func):
with TemporaryFileName() as fname:
func.save(fname)
loaded = torch.jit.load(fname)
input = torch.randn(2, 2)
self.assertEqual(func(input), loaded(input))
out = torch.jit.trace(fn, (torch.ones(2, 2),))
check(out)
def test_trace_optioanl_dtype(self):
class Test(torch.nn.Module):
def forward(self):
return torch.arange(5)
traced = torch.jit.trace(Test(), ())
torch.allclose(traced(), Test()())
def test_trace_save_load_copy(self):
class Test(torch.nn.Module):
def __init__(self):
super(Test, self).__init__()
self.conv = torch.nn.Conv2d(3, 3, 3)
def forward(self, x):
return self.conv(x)
traced = torch.jit.trace(Test(), torch.rand(1, 3, 224, 224))
buffer = io.BytesIO()
torch.jit.save(traced, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
# should work
copy.copy(loaded)
copy.deepcopy(loaded)
def test_trace_export_fns(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
f = Foo()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['__getstate__', '__setstate__']
def check(mod):
self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
check(traced)
imported = self.getExportImportCopy(traced)
check(imported)
def test_trace_export_fns_recursive(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.a = 3
@torch.jit.export
def __getstate__(self):
return (3, self.training)
@torch.jit.export
def __setstate__(self, state):
self.a = state[0]
self.training = state[1]
def forward(self, x):
return x + self.a
class Wrapper(torch.nn.Module):
def __init__(self):
super(Wrapper, self).__init__()
self.foo = Foo()
def forward(self, x):
return self.foo(x)
f = Wrapper()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['__getstate__', '__setstate__']
def check(mod):
self.assertTrue(all(name in mod._c._method_names() for name in expected_names))
check(traced.foo)
imported = self.getExportImportCopy(traced)
check(imported.foo)
# Note that Bar's forward can only be traced, but not scripted
class Bar(nn.Module):
def __init__(self):
super().__init__()
@torch.jit.export
def addTwo(self, x):
return x + 2
def forward(self, input):
return (lambda a: a + 1)(input)
# When tracing Bar as a submodule, we only want to script the
# exported methods, and we want to keep the forwards still
# being traced.
class WrapperExports(torch.nn.Module):
def __init__(self):
super(WrapperExports, self).__init__()
self.bar = Bar()
@torch.jit.export
def addOne(self, x):
return x + 1
def forward(self, x):
return self.bar(x)
f = WrapperExports()
traced = torch.jit.trace(f, (torch.rand(3, 4),))
expected_names = ['addOne']
check(traced)
def test_trace_autograd_function(self):
class TestFunc(torch.autograd.Function):
@staticmethod
def forward(ctx, input):
return torch.neg(input)
@staticmethod
def backward(ctx, grad_output):
return torch.neg(grad_output)
class TracedModule(torch.nn.Module):
def forward(self, x):
return torch.relu(TestFunc.apply(x))
class Wrapper(torch.nn.Module):
def __init__(self):
super(Wrapper, self).__init__()
self.tm = TracedModule()
def forward(self, x):
return self.tm(x)
traced = torch.jit.trace(Wrapper(), (torch.rand(3, 4),))
def test_trace_multi_output_function(self):
# An autograd.Function with two outputs.
# It swaps inputs so we can check if shape
# handling is correct in TorchScript.
class Foo(torch.autograd.Function):
@staticmethod
def forward(ctx, x, y):
return y, x
@staticmethod
def backward(ctx, du, dv):
return dv, du
class Bar(torch.nn.Module):
def forward(self, x, y):
x = x.relu()
y = y.relu()
z = Foo.apply(x, y)
return z
x = torch.rand(3, 2, dtype=torch.double)
y = torch.rand(1, 2, dtype=torch.double)
# Generate JIT IR.
traced = torch.jit.trace(Bar(), (x, y))
print(traced.graph)
# Expected output schema of the custom autograd.Function.
schema = '(Double(1, 2, strides=[2, 1], requires_grad=0, device=cpu), '\
'Double(3, 2, strides=[2, 1], requires_grad=0, device=cpu)) '\
'= ^Foo'
# See if expected schema exists.
FileCheck().check(schema).run(traced.graph)
# Also examine if the graph is runnable and produces
# the right result.
u, v = traced(x, y)
self.assertEqual(u, y)
self.assertEqual(v, x)
def test_interpolate_trace(self):
class test(nn.Module):
def __init__(self):
super(test, self).__init__()
self.conv = nn.Conv2d(1, 32, kernel_size=3, padding=1)
def forward(self, x):
y = self.conv(x)
w = nn.functional.interpolate(y, mode='bilinear', align_corners=False, scale_factor=3)
return w
f = test()
# no failure
g = torch.jit.trace(f, (torch.zeros(1, 1, 28, 28),))
x = torch.zeros(1, 1, 14, 14)
# constants not baked in
self.assertEqual(g(x), f(x))
@_tmp_donotuse_dont_inline_everything
def test_trace_optional(self):
@torch.jit.script
def test(x: Optional[Tensor]):
if x is None:
return torch.zeros(1)
else:
return x
def test_none():
return test(None)
def test_tensor():
return test(torch.zeros(2))
f_none = torch.jit.trace(test_none, ())
self.assertEqual(f_none(), torch.zeros(1))
f_tensor = torch.jit.trace(test_tensor, ())
self.assertEqual(f_tensor(), torch.zeros(2))
graph = f_tensor.graph
FileCheck().check('name="test"').check_next("prim::CallFunction").run(graph)
def test_trace_nested_datatypes(self):
@torch.jit.script
def foo(x):
return [[x + 1, x - 1], [x + 2, x - 2]]
def bar(x):
list_stuff = foo(x)
return list_stuff[0][0], list_stuff[1][1]
traced = torch.jit.trace(bar, torch.rand(3, 4))
x = torch.rand(5, 6)
self.assertEqual(bar(x), traced(x))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_traced_module(self):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
def forward(self, x):
return traced_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
# Note: neg op from the traced function should be properly inlined
FileCheck().check("aten::mm") \
.check('name="traced_fn"') \
.check_next("prim::CallFunction") \
.run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_module_from_traced_module(self):
class TracedModule1(torch.nn.Module):
def __init__(self):
super(TracedModule1, self).__init__()
self.param = torch.nn.Parameter(torch.rand(5, 7))
def forward(self, x):
return torch.mm(x, self.param)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = torch.jit.trace(TracedModule1(), torch.rand(3, 5))
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
def test_index_put_trace_with_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(1, 1, 1, 4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check("aten::view").check("index_put_").run(str(test_index_put.graph))
def test_index_put_trace_without_view(self):
@_trace(torch.rand(100), torch.tensor([1, 2, 3, 4]), torch.rand(4))
def test_index_put(target, indices, rhs):
target[indices] = rhs
return target
FileCheck().check_not("aten::view").check("index_put_").run(str(test_index_put.graph))
@suppress_warnings
def test_trace_checker_dot_data(self):
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Tensor-valued Constant nodes differed in value '
r'across invocations'):
@_trace(torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def foo(x):
y = x.data
return x + y
@suppress_warnings
def test_trace_checker_control_flow(self):
def foo(x):
for _ in range(x.size(0)):
x = torch.neg(x)
return x
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
torch.jit.trace(foo, torch.randn(3, 4), check_inputs=[torch.randn(4, 4)])
@suppress_warnings
def test_trace_checker_memoization(self):
with self.assertRaisesRegex(torch.jit.TracingCheckError, r'Graphs differed across invocations!'):
def foo(x):
if not hasattr(foo, 'cache'):
foo.cache = torch.neg(x)
return x + foo.cache
traced = torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[(torch.rand(3, 4),)])
def test_trace_checker_slice_lhs(self):
def foo(x):
for i in range(3):
x[i, :] = torch.zeros(4)
return x
self.checkTrace(foo, (torch.rand(3, 4),), inputs_require_grads=False)
def test_trace_checker_inplace_on_view(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Output nr 1. of the traced function does not match the '
'corresponding output of the Python function'):
torch.jit.trace(foo,
torch.rand(3, 4),
check_inputs=[torch.rand(5, 6)],
_force_outplace=True)
def test_lhs_index_fails(self):
def foo(x):
x[0, 1] = 4
return x
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
def test_lhs_index_trivial(self):
def foo(y, x):
y[...] = x
return y
self.checkTrace(foo, (torch.rand(3, 4), torch.rand(4)), inputs_require_grads=False)
def test_inplace_warn(self):
def foo(x):
x.view(-1).add_(-x.view(-1))
return x
with self.assertWarnsRegex(torch.jit.TracerWarning, "cause the trace to be incorrect"):
torch.jit.trace(foo, torch.rand(3, 4), _force_outplace=True)
@suppress_warnings
def test_trace_checker_dropout_train(self):
def foo(x):
return torch.dropout(x, p=0.5, train=True)
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Output nr 1. of the traced function does not match the '
'corresponding output of the Python function'):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
with self.assertWarnsRegex(torch.jit.TracerWarning,
'Trace had nondeterministic nodes'):
torch.jit.trace(foo, torch.rand(3, 4), check_inputs=[torch.rand(5, 6)])
def test_trace_checker_dropout_notrain(self):
input = torch.rand(3, 4)
@_trace(input)
def foo(x):
return torch.dropout(x, p=0.5, train=False)
self.assertEqual(foo(input), input)
def test_trace_contiguous(self):
def foo(x):
return x[:, :, ::2].contiguous().view(12)
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
y = traced(x)
self.assertNotEqual(x.storage().data_ptr(), y.storage().data_ptr())
# This tests the logic in THPVariable_contiguous. There is short-circuiting
# code that prevents us from even getting to VariableType::contiguous, since
# it is an optimization that prevents us from acquiring the GIL for touching
# the device. We needed to add the tracing logic directly into the
# THPVariable_contiguous function only for the path where we are skipping
# dispatch into contiguous. We should see an aten::contiguous in this trace!
def test_trace_contiguous_short_circuit(self):
def foo(x):
return x.contiguous()
x = torch.rand(2, 3, 4)
traced = torch.jit.trace(foo, (x,))
FileCheck().check("aten::contiguous").run(str(traced.graph))
def test_trace_inverse(self):
def foo(x):
return ~x
foo_traced = torch.jit.trace(foo, torch.zeros(3, 4, dtype=torch.uint8))
eg = torch.zeros(3, dtype=torch.uint8)
self.assertEqual(foo_traced(eg), foo(eg))
def test_trace_modulelist(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x)
class MyMod(torch.nn.Module):
def __init__(self):
super(MyMod, self).__init__()
self.ml = torch.nn.ModuleList([
MySubmod(),
MySubmod()
])
def forward(self, x):
for mod in self.ml:
x = mod(x)
return x
traced = torch.jit.trace(MyMod(), (torch.rand(3, 4),))
def test_trace_fork_join_and_module(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.ml = torch.nn.ModuleList([
MySubmod() for i in range(2)
])
def forward(self, x):
futs = []
for i in range(2):
futs.append(torch.jit._fork(self.ml[i], x))
results = []
for i in range(2):
results.append(torch.jit._wait(futs[i])[0])
return torch.stack(results)
m = Mod()
traced = torch.jit.trace(m, torch.rand(3, 4))
def test_trace_invert_module_hierarchy(self):
class MySubmod(torch.nn.Module):
def __init__(self):
super(MySubmod, self).__init__()
self.relu = torch.nn.ReLU()
def forward(self, x):
return self.relu(x), torch.neg(x)
class MyFunctionalMod(torch.nn.Module):
def forward(self, x, submod):
return submod(x)
class Mod(torch.nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.sm = MySubmod()
self.fm = MyFunctionalMod()
def forward(self, x):
return self.fm(x, self.sm)
torch.jit.trace(Mod(), (torch.rand(3, 4),))
@skipIfCrossRef
def test_trace_records_names(self):
def foo(bar, baz):
baz = bar + 3
quick_brown_fox = torch.neg(baz)
for _ in range(20):
yeet = quick_brown_fox - 3.14
return yeet
traced = torch.jit.trace(foo, (torch.rand(3, 3), torch.rand(3, 3)))
graph_str = str(traced.graph)
assert 'bar' in graph_str
assert 'baz' in graph_str
assert 'quick_brown_fox' in graph_str
@skipIfTorchDynamo("Not a suitable test for TorchDynamo")
def test_tracing_hooks(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
return x + x
def test_hook(is_post_hook, hook, fc):
n = Net()
if is_post_hook:
n.register_forward_hook(hook)
else:
n.register_forward_pre_hook(hook)
module = torch.jit.trace(n, (torch.tensor(1.0),))
eager_input = torch.tensor(1.0)
eager_out = n(eager_input)
fc.run(module.forward.graph)
input = torch.tensor(1.0)
output = module(input)
self.assertEqual(input, eager_input)
self.assertEqual(output, eager_out)
def hook_no_return(mod, input, output):
input[0].add_(1)
output.sub_(1)
fc = FileCheck().check("add(").check("add_(").check("sub_(")
test_hook(True, hook_no_return, fc)
def hook_return(mod, input, output):
input[0].add_(1)
return output - 3
fc = FileCheck().check("add(").check("add_(").check("sub(")
test_hook(True, hook_return, fc)
b = torch.tensor(3.0)
def captured_hook(mod, input, output):
return output - b
fc = FileCheck().check("add(").check("sub(")
test_hook(True, captured_hook, fc)
def pre_hook_no_ret(mod, input):
input[0].add_(3)
fc = FileCheck().check("add_(").check("add(")
test_hook(False, pre_hook_no_ret, fc)
def pre_hook_ret(mod, input):
return input[0] - 4
fc = FileCheck().check("sub(").check("add(")
test_hook(False, pre_hook_ret, fc)
def test_tracing_backward_hook_error(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def forward(self, x):
return x + x
n = Net()
def backward_hook(module, grad_input, grad_output):
pass
n.register_backward_hook(backward_hook)
with self.assertRaisesRegex(Exception, "backward hooks assigned"):
torch.jit.trace(n, (torch.tensor(1.0),))
def test_tracing_multiple_methods(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, x):
return self.conv(x)
def weighted_kernel_sum(self, weight):
return weight * self.conv.weight
example_weight = torch.rand(1, 1, 3, 3)
example_forward_input = torch.rand(1, 1, 3, 3)
inputs = {'forward' : example_forward_input, 'weighted_kernel_sum' : example_weight}
n = Net()
module = torch.jit.trace_module(n, inputs)
check_inputs = []
for i in range(2):
check_weight = torch.rand(1, 1, 3, 3)
check_forward_input = torch.rand(1, 1, 3, 3)
check_inputs.append({'forward' : check_forward_input, 'weighted_kernel_sum' : check_weight})
module = torch.jit.trace_module(n, inputs, check_trace=True, check_inputs=check_inputs)
self.assertTrue(module._c._has_method("forward"))
self.assertTrue(module._c._has_method("weighted_kernel_sum"))
module = torch.jit.trace(n.forward, example_forward_input)
module = torch.jit.trace(n.forward, example_forward_input, check_trace=True, check_inputs=[example_forward_input])
with self.assertRaisesRegex(AttributeError, "trace doesn't support compiling individual module's functions"):
module = torch.jit.trace(n.weighted_kernel_sum, inputs)
def test_tensor_with_grad_as_constant(self):
param = torch.randn(3).requires_grad_()
x = torch.randn(3)
def f(x):
return x + param
with self.assertRaisesRegex(RuntimeError, "Cannot insert a Tensor that requires grad as a constant"):
torch.jit.trace(f, x)
def test_non_tensor_tracing(self):
def f(x):
return x + param
with self.assertRaisesRegex(RuntimeError, r"Type 'Tuple\[int\]' cannot be traced"):
torch.jit.trace(f, (1,))
def test_trace_skip_none_submodule(self):
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.submod = torch.nn.Linear(3, 4)
self.submod = None
def forward(self, inputs):
return inputs
m = TestModule()
tm = torch.jit.trace(m, torch.tensor(1.))
self.assertFalse(hasattr(tm, "submod"))
def test_trace_with_conditional_property(self):
class Net(nn.Module):
def __init__(self, attr=None):
super(Net, self).__init__()
if attr is not None:
self._attr = attr
self.attr_name = '_attr'
@property
def attr(self):
return getattr(self, self.attr_name)
def forward(self, x):
return x
x = torch.ones(1)
torch.jit.trace(Net(), x)
def test_trace_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1), torch.ones(1)))
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_fn.graph))
def test_trace_partial_func_argument_names_captured(self):
def fn(first_arg: torch.Tensor, second_arg=1) -> torch.Tensor:
return first_arg + second_arg
traced_fn = torch.jit.trace(fn, (torch.ones(1),))
FileCheck().check("first_arg").check_not("second_arg") \
.run(str(traced_fn.graph))
def test_trace_module_argument_names_captured(self):
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor):
return self.conv(first_arg) + second_arg
m = TestModule()
example_input = (torch.ones(1, 1, 3, 3), torch.ones(1, 1, 3, 3))
# Explicitly tracing module's forward method
traced_module_forward = torch.jit.trace(m.forward, example_input)
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_module_forward.graph))
# Tracing module's directly
traced_module = torch.jit.trace(m, example_input)
FileCheck().check("first_arg").check_next("second_arg") \
.run(str(traced_module.graph))
class TestMixTracingScripting(JitTestCase):
def test_trace_script(self):
@torch.jit.script
def func1(x: Tuple[Tensor, Tensor]) -> Tensor:
return x[0] + x[1]
@torch.jit.script
def func2(x: List[Tensor]) -> Tensor:
return x[0] + x[1]
a = torch.randn(5)
b = torch.randn(5)
self.checkTrace(func1, ((a, b),))
self.checkTrace(func2, ((a, b),))
@torch.jit.script
def func3(x: Tensor, method: str = 'bilinear', align_corners: bool = True) -> Tensor:
hw = x.shape[2:4]
return F.interpolate(x, hw, mode=method, align_corners=align_corners)
inp = torch.rand(1, 3, 6, 6)
self.checkTrace(func3, (inp,))
@torch.jit.script
def func4(x: Tensor, a: List[Optional[str]]) -> Tensor:
if len(a) == 2:
return x + 2
else:
return x
def test_trace_mixed_by_script_with_dict_output(self):
@torch.jit.script
def return_dict(input: torch.Tensor) -> Dict[str, torch.Tensor]:
return {"foo" : input + 1}
class TraceModule(torch.nn.Module):
def forward(self, input):
dict = return_dict(input)
return dict["foo"] + dict["foo"]
x = torch.ones(1)
tm = torch.jit.trace(TraceModule(), x)
self.assertEqual(tm(x), x + 1 + x + 1)
def test_trace_of_script(self):
@torch.jit.script
def foo(a, c):
b = 0.0
if bool(a == 0.0):
b = 1.0
return b + c
a = torch.ones(1, dtype=torch.float)
@_trace(torch.zeros(1, dtype=torch.float))
def use(b):
return foo(b - 1.0, a) + 1.0
# test we propagated shapes through the function
self.assertTrue("Dynamic" not in str(use.graph))
self.assertEqual(3, use(torch.ones(1, dtype=torch.float)))
self.assertEqual(2, use(torch.zeros(1, dtype=torch.float)))
def test_trace_with_size(self):
@_trace(torch.zeros(1, 1))
def foo(x):
return x + 1
@torch.jit.script
def bar(x):
y = int(foo(x))
if 1 == 1:
y = 7
return y + 1
self.assertEqual(8, bar(torch.ones(1, 1)))
def test_tracing_slicing(self):
@_trace(torch.zeros(10))
def foo_trace(x):
return x[-5:-3]
@torch.jit.script
def foo_script(x):
return x[-5:-3]
def foo(x):
return x[-5:-3]
a = torch.arange(0, 8)
b = torch.arange(0, 20)
self.assertEqual(foo_trace(a), foo_script(a))
self.assertEqual(foo_trace(a), foo(a))
self.assertNotEqual(foo_trace(a), foo_trace(b))
def test_tracing_indexing(self):
@_trace(torch.zeros(10))
def foo_trace(x):
return x[-2]
@torch.jit.script
def foo_script(x):
return x[-2]
def foo(x):
return x[-2]
a = torch.arange(0, 8)
b = torch.arange(0, 20)
self.assertEqual(foo_script(a), foo_trace(a))
self.assertEqual(foo_trace(a), foo(a))
self.assertNotEqual(foo_trace(a), foo_trace(b))
def test_trace_hierarchy(self):
# Test that we preserve the module hierarchy for a ScriptModule
# submodule during tracing
class AnotherScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(AnotherScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(1, 2, 3))
@torch.jit.script_method
def bar(self):
return torch.zeros(4, 5)
class SomeScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(SomeScriptMod, self).__init__()
self.asm = AnotherScriptMod()
@torch.jit.script_method
def foo(self):
return torch.zeros(3, 4)
@torch.jit.script_method
def bar(self):
return torch.zeros(4, 3)
class TraceMe(torch.nn.Module):
def __init__(self):
super(TraceMe, self).__init__()
self.ssm = SomeScriptMod()
def forward(self, x):
return self.ssm.bar() + x
orig = TraceMe()
traced = torch.jit.trace(orig, (torch.rand(4, 3),))
# for each of these checks, check that *BOTH* the underlying
# _C.ScriptModule object has the expected method/param, as well as the
# Python object that wraps it.
self.assertTrue(traced.ssm._c._has_method('foo'))
self.assertTrue(hasattr(traced.ssm, 'foo'))
imported = self.getExportImportCopy(traced)
self.assertTrue(imported.ssm._c._has_method('foo'))
self.assertTrue(hasattr(imported.ssm, 'foo'))
self.assertTrue(imported.ssm.asm._c._has_method('bar'))
self.assertTrue(hasattr(imported.ssm.asm, 'bar'))
self.assertTrue(hasattr(imported.ssm.asm, 'param'))
def test_trace_parameter(self):
class Param(nn.Module):
def __init__(self):
super(Param, self).__init__()
self.register_parameter("bias", nn.Parameter(torch.empty(4, 4)))
def forward(self, x):
return x
class M3(torch.jit.ScriptModule):
def __init__(self, model):
super(M3, self).__init__()
self.traced = torch.jit.trace(model, (torch.rand(3, 3)))
@torch.jit.script_method
def forward(self, x):
return self.traced(x)
class M2(nn.Module):
def __init__(self, model):
super(M2, self).__init__()
self.module = M3(model)
def forward(self, x):
return self.module(x)
class M1(torch.jit.ScriptModule):
def __init__(self, model):
super(M1, self).__init__()
self.traced = torch.jit.trace(M2(model), (torch.rand(3, 3)))
@torch.jit.script_method
def forward(self, x):
return self.traced(x)
with torch.jit.optimized_execution(False):
module = M1(Param())
f = io.BytesIO()
torch.jit.save(module, f)
@_tmp_donotuse_dont_inline_everything
def test_call_script_fn_from_traced_module(self):
@torch.jit.script
def scripted_fn(x):
return torch.neg(x)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
def forward(self, x):
return scripted_fn(torch.mm(x, self.param))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("name=\"scripted_fn\"").check("prim::CallFunction").run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_script_module_from_traced_module(self):
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param_foo = torch.nn.Parameter(torch.rand(5, 7))
@torch.jit.script_method
def forward(self, x):
return torch.mm(x, self.param_foo)
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 5))
self.mod = ScriptMod()
def forward(self, x):
return self.mod(torch.mm(x, self.param)) + 1.0
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
FileCheck().check("aten::mm").check("prim::CallMethod").check_same("forward").check("aten::add").run(str(tm.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_traced_fn_from_script_fn(self):
@_trace(torch.rand(3, 4))
def traced_fn(x):
return torch.neg(x)
@torch.jit.script
def script_fn(x):
return traced_fn(x) + 1
FileCheck().check("prim::CallFunction").check("aten::add").run(str(script_fn.graph))
def test_call_traced_mod_from_script_fn(self):
with self.assertRaisesRegex(RuntimeError, "Cannot call a ScriptModule that is not a submodule of the caller"):
class TracedModule(torch.nn.Module):
def __init__(self):
super(TracedModule, self).__init__()
def forward(self, x):
return torch.mm(x, torch.zeros(4, 3))
tm = torch.jit.trace(TracedModule(), torch.rand(3, 4))
@torch.jit.script
def script_fn(x):
return tm(x) + 1
@_tmp_donotuse_dont_inline_everything
def test_call_tracing_fn_from_script_module(self):
@_trace(torch.rand(3, 3))
def traced_fn(x):
return torch.neg(x)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
@torch.jit.script_method
def forward(self, x):
return traced_fn(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("prim::CallFunction").run(str(sm.forward.graph))
@_tmp_donotuse_dont_inline_everything
def test_call_tracing_mod_from_script_module(self):
class TracedMod(torch.nn.Module):
def __init__(self):
super(TracedMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(3, 5))
def forward(self, x):
return torch.mm(x, self.param)
class ScriptMod(torch.jit.ScriptModule):
def __init__(self):
super(ScriptMod, self).__init__()
self.param = torch.nn.Parameter(torch.rand(4, 3))
self.tm = torch.jit.trace(TracedMod(), torch.rand(3, 3))
@torch.jit.script_method
def forward(self, x):
return self.tm(torch.mm(x, self.param))
sm = ScriptMod()
FileCheck().check("aten::mm").check("prim::CallMethod").run(str(sm.graph))
def test_script_inline_trace_multiple_args(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
def forward(self, input, input2):
return input + input2
class M2(torch.jit.ScriptModule):
def __init__(self):
super(M2, self).__init__()
self.m = torch.jit.trace(M(), (torch.zeros(4, 3), torch.zeros(4, 3)))
@torch.jit.script_method
def forward(self, inp):
return self.m(inp, inp)
with torch.jit.optimized_execution(False):
m2 = M2()
m2(torch.zeros(4, 3))
def test_trace_dict_mix_script(self):
class testB(torch.nn.Module):
def __init__(self):
super(testB, self).__init__()
self.linear = torch.nn.Linear(2, 2)
def forward(self, feature_map: Dict[str, List[Tensor]]) -> Tensor:
output = []
for i, j in feature_map.items():
output.append(self.linear(j[0]))
return torch.stack(output)
class testA(torch.nn.Module):
def __init__(self):
super(testA, self).__init__()
self.b = torch.jit.script(testB())
def forward(self, input_map: Dict[str, List[Tensor]]) -> Tensor:
feature_map = {}
for i, j in input_map.items():
feature_map[i] = [j[0]]
return self.b(feature_map)
input_map = {"1" : [torch.rand(2, 2), torch.rand(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
model = testA()
traced_model = torch.jit.trace(model, input_map)
new_input_map = {"1" : [torch.rand(2, 2), torch.randn(2, 2)], "3" : [torch.rand(2, 2), torch.rand(2, 2)]}
self.assertEqual(model(new_input_map), traced_model(new_input_map))
def test_trace_script_returning_complex_dict(self):
"""Tracing over a script function returning a dictionary should work.
The dictionary can should be able to contain other containers (like a tuple) recursively.
"""
class ReturnsDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, id_score_list: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]:
# do some random operations and then return a dict of the same structure
v = id_score_list["1000"]
idx_keys = v[1] - 1500000
weights = v[2]
result = {
"1000": (v[0], idx_keys, weights)
}
return result
class ChecksDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
v = input["1000"]
return v[1] + 1
class TestModule(torch.nn.Module):
def __init__(self, checks_dict, returns_dict):
super().__init__()
self.checks_dict = checks_dict
self.returns_dict = returns_dict
def forward(self, input: Dict[str, Tuple[torch.Tensor, torch.Tensor, torch.Tensor]]):
foo = self.returns_dict(input)
return self.checks_dict(foo)
input1 = {
"1000": (
torch.tensor([0]),
torch.tensor([], dtype=torch.int64),
torch.tensor([])
)
}
input2 = {
"1000": (
torch.tensor([0]),
torch.tensor([1500000, 1500004], dtype=torch.int64),
torch.tensor([2.0, 3.0])
)
}
checks_dict = torch.jit.script(ChecksDict())
returns_dict = torch.jit.script(ReturnsDict())
eager_module = TestModule(checks_dict, returns_dict)
traced_module = torch.jit.trace(eager_module, input1)
self.assertEqual(traced_module(input1), eager_module(input1))
self.assertEqual(traced_module(input2), eager_module(input2))
def test_trace_returning_dict_with_tensor_tuples(self):
"""Tracing over a module returning a dictionary whose values are tuples of tensors
should work.
"""
class ReturnsDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, torch.Tensor]]:
x = 2 * k
y = 3 * v
result = {
"imakey": (x, y)
}
return result
class ReturnsBadDict(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(
self, k: torch.Tensor, v: torch.Tensor
) -> Dict[str, Tuple[torch.Tensor, float]]:
x = 2 * k
result = {
"imakey": (x, 1)
}
return result
mod = ReturnsDict()
traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
out = traced_module(torch.ones(1), torch.ones(1))
expected = {
"imakey": (torch.tensor([2.]), torch.tensor([3.]))
}
self.assertEqual(out, expected)
with self.assertRaisesRegex(RuntimeError, "cannot be understood by the tracer, only outputs matching"):
mod = ReturnsBadDict()
traced_module = torch.jit.trace(mod, [torch.ones(1), torch.ones(1)], strict=False)
def test_trace_linear(self):
m = torch.nn.Linear(20, 20)
inp = torch.rand([20, 20])
self.checkTrace(m, (inp,))
g = torch.jit.trace(m, (inp,)).graph
FileCheck().check("aten::linear").run(g)
def test_traced_module_implements_interface(self):
@torch.jit.interface
class TestModuleInterface(nn.Module):
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
pass
make_global(TestModuleInterface)
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.conv = nn.Conv2d(1, 1, 3)
def forward(self, first_arg: torch.Tensor, second_arg: torch.Tensor) -> torch.Tensor:
return self.conv(first_arg) + second_arg
def fn_takes_interface(x: TestModuleInterface):
ones = torch.ones(1, 1, 3, 3)
return x.forward(ones, ones)
scripted_test_module = torch.jit.script(TestModule())
self.checkScript(fn_takes_interface, (scripted_test_module,))
def test_traced_module_contains_scripted_interface_types(self):
class LeafModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.weight = torch.nn.Parameter(torch.rand(19))
def forward(self, input: torch.Tensor):
return input + self.weight
class LowerModuleImpl(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.leaf = LeafModule()
def forward(self, input: torch.Tensor) -> torch.Tensor:
return self.leaf(input)
@torch.jit.interface
class LowerModuleInterface(torch.nn.Module):
def forward(self, input: torch.Tensor) -> torch.Tensor:
pass
class MiddleModule(torch.nn.Module):
lower: LowerModuleInterface
def __init__(self, feature_processor_modules=None):
super().__init__()
self.lower = LowerModuleImpl()
def forward(self, input):
return self.lower(input)
class WrapperModule(torch.nn.Module):
def __init__(self, m):
super().__init__()
self.middle = m
def forward(self, input):
return self.middle(input)
class TopModule(torch.nn.Module):
def __init__(self):
super().__init__()
m = MiddleModule()
m = torch.jit.script(m)
self.sub1 = m
self.sub2 = WrapperModule(m)
def forward(self, input: torch.Tensor):
return self.sub1(input) + self.sub2(input)
top = TopModule()
top_example_input = torch.ones(1)
torch.jit.trace(top, top_example_input)
|
pytorch-master
|
test/jit/test_tracer.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
import torch.nn as nn
import torch.nn.parallel as dp
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA_MULTI_GPU
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestDataParallel(JitTestCase):
class Mpy(torch.nn.Module):
def __init__(self):
super(TestDataParallel.Mpy, self).__init__()
self.m = nn.Sequential(nn.Linear(2, 2), nn.BatchNorm1d(2),
nn.ReLU(), nn.Linear(2, 2))
@torch.jit.ignore
def forward(self, input):
return self.m(input)
class Mpy1(torch.nn.Module):
def __init__(self, block):
super(TestDataParallel.Mpy1, self).__init__()
self.m = block
@torch.jit.ignore
def forward(self, input):
return self.m.forward(input)
class Mpy2(torch.nn.Module):
def __init__(self, block1, block2):
super(TestDataParallel.Mpy2, self).__init__()
self.m1 = block1
self.m2 = block2
@torch.jit.ignore
def forward(self, input):
x = self.m1.forward(input)
return self.m2(x)
class Msm(torch.jit.ScriptModule):
__constants__ = ['m']
def __init__(self):
super(TestDataParallel.Msm, self).__init__()
self.m = nn.Sequential(nn.Linear(2, 2), nn.BatchNorm1d(2),
nn.ReLU(), nn.Linear(2, 2))
@torch.jit.script_method
def forward(self, input):
return self.m(input)
class Msm1(torch.jit.ScriptModule):
def __init__(self, block):
super(TestDataParallel.Msm1, self).__init__()
self.block = block
@torch.jit.script_method
def forward(self, input):
x = self.block(input)
return x
def check_replicas(self, module, replicas, input_shape=(2, 2)):
input = torch.randn(input_shape).cuda()
expected_output = module(input).data
for i, replica in enumerate(replicas):
for p in replica.parameters():
self.assertEqual(p.get_device(), i)
for b in replica.buffers():
self.assertEqual(b.get_device(), i)
replica_input = input.cuda(i)
self.assertEqual(replica(replica_input).data, expected_output)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_python_submodule_script(self):
module = self.Mpy1(self.Msm()).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_shared_module(self):
s = self.Msm()
p1 = self.Mpy1(s)
module = self.Mpy2(p1, s).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_traced_module(self):
module = torch.jit.trace(self.Mpy1(self.Mpy()), torch.ones(2, 2)).cuda()
replicas = dp.replicate(module, {0, 1})
self.check_replicas(module, replicas)
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_tensor_sharing(self):
module = self.Msm1(self.Msm()).cuda()
replica = dp.replicate(module, {0, 1})
def assert_share_data(t1, t2):
# Only checks that they point to the same memory on the same device.
if t1.device != t2.device:
return False
if t1.storage().data_ptr() != t2.storage().data_ptr():
return False
return True
for p1, p2 in zip(module.parameters(), replica[0].parameters()):
self.assertTrue(assert_share_data(p1, p2))
for p1, p2 in zip(module.buffers(), replica[0].buffers()):
self.assertTrue(assert_share_data(p1, p2))
for p1, p2 in zip(module.parameters(), replica[1].parameters()):
self.assertFalse(assert_share_data(p1, p2))
for p1, p2 in zip(module.buffers(), replica[1].buffers()):
self.assertFalse(assert_share_data(p1, p2))
@unittest.skipIf(not RUN_CUDA_MULTI_GPU, "multi-GPU not supported")
def test_tensor_sharing_with_forward(self):
module = self.Msm1(self.Msm()).cuda()
replica = dp.replicate(module, {0, 1})
x = torch.ones(2, 2, requires_grad=True).cuda()
first_forward = module(x)
first_forward.sum().backward()
with torch.no_grad():
for p in module.parameters():
# Use .data here to avoid version counter bump.
# The graph created by the following forward will be wrong but
# we never backward through them so it's fine
p.data -= 1. * p.grad
second_forward = module(x)
# replica which is on the same GPU has a shallow copy of the original
# params and buffers
r0_forward = replica[0](x)
self.assertEqual(second_forward, r0_forward)
# replica which is on a different GPU has a deep copy of the original
# params and buffers
x1 = torch.ones(2, 2, requires_grad=True).cuda(device=1)
r1_forward = replica[1](x1)
self.assertEqual(first_forward, r1_forward)
|
pytorch-master
|
test/jit/test_data_parallel.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import types
import typing
import typing_extensions
from typing import List, Dict, Optional, Tuple
import torch
import torch.nn as nn
from torch import Tensor
from torch.testing import FileCheck
from collections import OrderedDict
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, _tmp_donotuse_dont_inline_everything
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestRecursiveScript(JitTestCase):
def test_inferred_nonetype(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = None
def forward(self):
assert self.x is None
m = torch.jit.script(M())
self.checkModule(M(), ())
def test_script_function_attribute(self):
@torch.jit.script
def fn1(x):
return x + x
@torch.jit.script
def fn2(x):
return x - x
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
fn1_mod = M(fn1)
fn2_mod = M(fn2)
self.checkModule(fn1_mod, (torch.randn(2, 2),))
self.checkModule(fn2_mod, (torch.randn(2, 2),))
def test_python_function_attribute(self):
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
mod = M(torch.sigmoid)
self.checkModule(mod, (torch.randn(2, 2),))
def test_failed_function_compilation(self):
def fn(x):
return i_dont_exist
class M(torch.nn.Module):
def __init__(self, fn):
super(M, self).__init__()
self.fn = fn
def forward(self, x):
return self.fn(x)
m = M(fn)
with self.assertRaisesRegexWithHighlight(RuntimeError, "failed to compile", "i_dont_exist"):
torch.jit.script(m)
def test_init_error(self):
class M(nn.Module):
def __init__(self):
self.x = 2
def forward(self):
pass
with self.assertRaisesRegex(RuntimeError, "has not been initialized"):
torch.jit.script(M())
def test_script_after_eval(self):
class M(nn.Module):
def forward(self):
if self.training:
return 2
else:
return 0
m = M()
sm1 = torch.jit.script(m)
m.eval()
sm2 = torch.jit.script(m)
# m is in eval mode, training should be False
self.assertFalse(m.training)
# sm1 was created while m had training = True
self.assertTrue(sm1.training)
self.assertEqual(sm1.training, sm1._c.getattr('training'))
self.assertEqual(sm1(), 2)
# sm2 was created after m was eval'ed
self.assertFalse(sm2.training)
self.assertEqual(sm2.training, sm2._c.getattr('training'))
self.assertEqual(sm2(), 0)
def test_module_name(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.x = 2
def forward(self, t):
return t + self.x
m = torch.jit.script(MyModule())
FileCheck().check("MyModule").run(m.graph)
def test_repeated_error_stack(self):
def d(x):
return "a" - 2
def c(x):
return d(x)
def b(x):
return c(x)
def a(x):
return b(x)
try:
torch.jit.script(a)
except Exception as e:
FileCheck().check_count("is being compiled", 2).run(str(e))
try:
torch.jit.script(a)
except Exception as e:
# Make sure that no entries are left over from the previous failure
FileCheck().check_count("is being compiled", 2).run(str(e))
def test_constants_with_final(self):
class M1(torch.nn.Module):
x : torch.jit.Final[int]
def __init__(self):
super().__init__()
self.x = 2
def forward(self, t):
return t + self.x
self.checkModule(M1(), (torch.randn(2, 2),))
class M2(torch.nn.Module):
x : typing_extensions.Final[int]
def __init__(self):
super().__init__()
self.x = 2
def forward(self, t):
return t + self.x
self.checkModule(M2(), (torch.randn(2, 2),))
if sys.version_info[:2] >= (3, 8):
class M3(torch.nn.Module):
x : typing.Final[int]
def __init__(self):
super().__init__()
self.x = 2
def forward(self, t):
return t + self.x
self.checkModule(M3(), (torch.randn(2, 2),))
def test_ignore_class(self):
@torch.jit.ignore
class MyScriptClass(object):
def unscriptable(self):
return "a" + 200
class TestModule(torch.nn.Module):
def __init__(self):
super(TestModule, self).__init__()
def forward(self, x):
return MyScriptClass()
with self.assertRaisesRegexWithHighlight(torch.jit.frontend.FrontendError, "Cannot instantiate class", "MyScriptClass"):
t = torch.jit.script(TestModule())
def test_method_call(self):
class M(nn.Module):
def test(self, x):
return x
def forward(self, z):
y = self.test(z)
return z + 20 + y
self.checkModule(M(), (torch.randn(2, 2),))
def test_module_repr(self):
class Submodule(nn.Module):
def forward(self, x):
return x
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = nn.Conv2d(10, 10, 3)
self.lin = nn.Linear(10, 10)
self.sub = Submodule()
def forward(self, x):
return self.lin(x) + self.sub(x) + self.conv(x)
m = torch.jit.script(MyModule())
with self.capture_stdout() as out:
print(m)
f = FileCheck()
f.check('MyModule')
f.check('Conv2d')
f.check('Linear')
f.check('Submodule')
f.run(out[0])
self.assertEqual(m.original_name, 'MyModule')
def test_dir(self):
def test_module_dir(mod):
dir_set = dir(mod)
scripted_mod = torch.jit.script(mod)
dir_scripted = set(dir(scripted_mod))
# set not currently copied over
ignore_set = ["training", "__delitem__", "__setitem__", "clear", "items",
"keys", "pop", "update", "values"]
for attr in dir_set:
if attr in ignore_set:
continue
self.assertTrue(attr in dir_scripted, attr)
class MyModule(nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.conv = nn.Conv2d(10, 10, 3)
self.lin = nn.Linear(10, 10)
def forward(self, x):
return self.lin(x) + self.conv(x)
test_module_dir(MyModule())
# test custom __dir__ for containers
conv = nn.Conv2d(10, 10, 3)
linear = nn.Linear(10, 10)
test_module_dir(nn.Sequential(conv, linear))
test_module_dir(nn.ModuleDict(OrderedDict([("conv", conv), ("linear", linear)])))
def test_class_compile(self):
def other_fn(a: int, b: Tensor) -> Tensor:
return a * b
class B(object):
def __init__(self, x):
self.x = 2
def helper(self, a):
return self.x + a + other_fn(self.x, a)
class N(torch.nn.Module):
def __init__(self):
super(N, self).__init__()
def forward(self, x):
b = B(x)
return b.helper(x)
self.checkModule(N(), (torch.randn(2, 2),))
def test_error_stack(self):
def d(x: int) -> int:
return x + 10
def c(x):
return d("hello") + d(x)
def b(x):
return c(x)
def a(x):
return b(x)
try:
scripted = torch.jit.script(a)
except RuntimeError as e:
checker = FileCheck()
checker.check("Expected a value of type 'int'")
checker.check("def c(x)")
checker.check("def b(x)")
checker.check("def a(x)")
checker.run(str(e))
def test_error_stack_module(self):
def d(x: int) -> int:
return x + 10
def c(x):
return d("hello") + d(x)
def b(x):
return c(x)
class Submodule(torch.nn.Module):
def __init__(self):
super(Submodule, self).__init__()
def forward(self, x):
return b(x)
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.submodule = Submodule()
def some_method(self, y):
return y + self.submodule(y)
def forward(self, x):
return self.some_method(x)
try:
scripted = torch.jit.script(M())
except RuntimeError as e:
checker = FileCheck()
checker.check("Expected a value of type 'int'")
checker.check("'c' is being compiled since it was called from 'b'")
checker.check("'b' is being compiled since it was called from")
checker.run(str(e))
@_tmp_donotuse_dont_inline_everything
def test_script_basic(self):
def a_python_fn(a, b, c):
return a + b + c
@torch.jit.script
def a_script_fn(d, e, f):
return a_python_fn(d, e, f)
graph = str(a_script_fn.graph)
FileCheck().check("prim::CallFunction").run(graph)
FileCheck().check_not("^a_python_fn").run(graph)
t = torch.ones(2, 2)
self.assertEqual(a_script_fn(t, t, t), t + t + t)
def test_error_stack_class(self):
class X(object):
def bad_fn(self):
import pdb # noqa: F401
def fn(x) -> X:
return X(10)
try:
torch.jit.script(fn)
except Exception as e:
checker = FileCheck()
checker.check("import statements")
checker.check("is being compiled since it was called from")
checker.run(str(e))
def test_error_stack_annotation(self):
class X(object):
def bad_fn(self):
import pdb # noqa: F401
def fn(x) -> X:
return X(10)
try:
torch.jit.script(fn)
except Exception as e:
checker = FileCheck()
checker.check("import statements")
checker.check("is being compiled since it was called from")
checker.check("-> X")
checker.run(str(e))
def test_module_basic(self):
class Other(torch.nn.Module):
__constants__ = ['x']
def __init__(self, x):
super(Other, self).__init__()
self.x = x
self.param = torch.nn.Parameter(torch.ones(2, 2))
def some_unscriptable_method(self):
a = 2
a = [2]
return a
def forward(self, t):
return t + self.x + self.param
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.other = Other(200)
def forward(self, t):
return self.other(t) * 2
self.checkModule(M(), (torch.ones(2, 2),))
def test_module_function_export(self):
class Other(torch.nn.Module):
__constants__ = ['x']
def __init__(self, x):
super(Other, self).__init__()
self.x = x
self.param = torch.nn.Parameter(torch.ones(2, 2))
@torch.jit.export
def some_entry_point(self, y):
return y + 20
def forward(self, t):
return t + self.x + self.param
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.other = Other(200)
def forward(self, t):
return self.other(t) * 2
self.checkModule(M(), (torch.ones(2, 2),))
def test_iterable_modules(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.sequential = nn.Sequential(
Inner(),
Inner(),
nn.Sequential(Inner(), Inner())
)
self.module_list = nn.ModuleList([Inner(), Inner()])
def forward(self, x):
for mod in self.module_list:
x += mod(x)
x += self.sequential(x)
return x
self.checkModule(M(), (torch.randn(5, 5),))
def test_prepare_scriptable_basic(self):
class SeluButReluWhenScripted(torch.nn.SELU):
def __prepare_scriptable__(self):
return nn.ReLU()
t = torch.randn(5, 5)
m = SeluButReluWhenScripted()
sm = torch.jit.script(m)
eager_out = m(t)
script_out = sm(t)
self.assertNotEqual(eager_out, script_out)
def test_prepare_scriptable_iterable_modules(self):
class SeluButReluWhenScripted(torch.nn.SELU):
def __prepare_scriptable__(self):
return nn.ReLU()
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
shared = SeluButReluWhenScripted()
self.sequential = nn.Sequential(
SeluButReluWhenScripted(),
SeluButReluWhenScripted(),
nn.Sequential(SeluButReluWhenScripted(), shared, SeluButReluWhenScripted()),
shared,
)
self.module_list = nn.ModuleList([SeluButReluWhenScripted(),
shared,
SeluButReluWhenScripted()])
def forward(self, x):
for mod in self.module_list:
x += mod(x)
x += self.sequential(x)
return x
t = torch.randn(5, 5)
m = M()
eager_out = m(t.clone())
sm = torch.jit.script(m)
script_out = sm(t.clone())
self.assertNotEqual(eager_out, script_out)
def test_prepare_scriptable_cycle(self):
t = torch.randn(5, 5)
c = torch.nn.Module()
p = torch.nn.Module()
c.__dict__["_p"] = p
p.__dict__["_c"] = c
sm = torch.jit.script(p)
def test_attributes(self):
@torch.jit.script
class Inner2(object):
def __init__(self):
self.b = "a string"
@torch.jit.script
class Foo(object):
def __init__(self):
self.a = 4
self.inner = Inner2()
@torch.jit.script
class SFoo(object):
def __init__(self):
self.a = 4
self.inner = Inner2()
def __setstate__(self, obj: Tuple[int, Inner2]) -> None:
a, inner = obj
self.a = a
self.inner = inner
def __getstate__(self):
return (self.a, self.inner)
untyped_values = (
('my_dict', {"I": "am", "a test": "test"}),
('my_float', 2.3),
('my_int', 99),
('my_bool', False),
('my_tuple', (1, 2, 3, 4)),
('my_list', [(1, 2), (3, 4)]),
# ('my_tensor', torch.randn(2, 2)),
('my_int_list', [1, 2, 3, 4]),
# ('my_tensor_list', [torch.ones(2, 2) + i for i in range(4)]),
('my_bool_list', [True, True, False, True]),
('my_float_list', [1., 2., 3., 4.]),
('my_str_list', ['hello', 'bye']),
)
typed_values = (
('my_empty_list', []),
('my_empty_dict', {}),
('my_none', None),
('my_object', Foo()),
('my_object2', SFoo()),
)
class M(torch.nn.Module):
# TODO: re-enable this once this test is in a Python 3-only syntax
# file
# my_empty_list : List[int]
# my_empty_dict : Dict[str, int]
# my_none : Optional[int]
def __init__(self):
super(M, self).__init__()
def forward(self, x):
return (
self.my_dict,
self.my_float,
self.my_int,
self.my_bool,
# self.my_tensor,
self.my_int_list,
# self.my_tensor_list,
self.my_bool_list,
self.my_float_list,
self.my_str_list,
self.my_empty_list,
self.my_empty_dict,
self.my_none,
self.my_object.a,
self.my_object.inner.b,
self.my_object.a,
self.my_object2.inner.b,
)
# TODO: as a followup, fix this test
# We can't define class attributes like we should be doing:
# class M(torch.nn.Module):
# my_empty_list : List[int]
# my_empty_dict : Dict[str, int]
# my_none : Optional[int]
# my_out_of_line_attribute: List[int] = [1, 2, 3]
# since there's no string frontend for Python classes (so the `define`)
# trick doesn't work.
M.__annotations__ = {
'my_empty_list': List[int],
'my_empty_dict': Dict[str, int],
'my_none': Optional[int],
'my_object': Foo,
'my_object2': SFoo,
}
m = M()
for name, value in untyped_values + typed_values:
setattr(m, name, value)
self.checkModule(m, (torch.randn(5, 5),))
def test_function_attribute_in_submodule(self):
class N(nn.Module):
def __init__(self, norm):
super(N, self).__init__()
self.activation = torch.nn.functional.relu
self.norm = norm
def forward(self, src):
output = src
output = self.norm(output)
return output
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
encoder_norm = nn.ReLU()
self.encoder = N(encoder_norm)
def forward(self, x):
return self.encoder(x)
m = M()
self.checkModule(m, (torch.randn(5, 5), ))
def test_inner_traced_module(self):
class Dummy(nn.Module):
def forward(self, x):
return x
class Model(nn.Module):
def __init__(self, dummies):
super(Model, self).__init__()
self._dummies = dummies
def forward(self, x):
out = []
for dummy in self._dummies:
out.append(dummy(x))
return out
dummy = torch.jit.trace(Dummy(), torch.randn(1, 2))
dummies = nn.ModuleList([dummy])
model = Model(dummies)
self.checkModule(model, (torch.rand(5, 5), ))
def test_script_loaded_module(self):
"""
Test that we can hold a loaded ScriptModule as a submodule.
"""
class Dummy(nn.Module):
def forward(self, x):
return x
dummy = torch.jit.script(Dummy())
dummy = self.getExportImportCopy(dummy)
class ContainsLoaded(torch.nn.Module):
def __init__(self):
super(ContainsLoaded, self).__init__()
self.encoder = dummy
def forward(self, input):
return self.encoder(input)
self.checkModule(ContainsLoaded(), (torch.rand(2, 3), ))
def test_optional_module(self):
class Dummy(nn.Module):
def __init__(self):
super(Dummy, self).__init__()
self.foo = nn.Linear(2, 2)
def forward(self, x):
if self.foo is not None:
return self.foo(x)
return x
mod = Dummy()
self.checkModule(mod, (torch.rand(2, 2),))
mod.foo = None
self.checkModule(mod, (torch.rand(2, 2),))
def test_override_instance_method_ignore(self):
class M(torch.nn.Module):
@torch.jit.ignore
def i_am_ignored(self):
return "old"
m = M()
# Override the ignored method by binding a new method to this instance.
@torch.jit.ignore
def i_am_ignored(self):
return "new"
m.i_am_ignored = types.MethodType(i_am_ignored, m)
self.assertEqual(m.i_am_ignored(), "new")
# ScriptModule should correctly reflect the override.
s = torch.jit.script(m)
self.assertEqual(s.i_am_ignored(), "new")
|
pytorch-master
|
test/jit/test_recursive_script.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch import nn
import torch.nn.utils.parametrize as parametrize
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestParametrization(JitTestCase):
# Define some parametrization
class Symmetric(nn.Module):
def forward(self, X):
return X.triu() + X.triu(1).mT
def test_traceable(self):
r"""Test the jit scripting and tracing of a parametrized model."""
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", self.Symmetric())
x = torch.randn(3, 5)
y = model(x)
# Check the tracing works. Because traced functions cannot be called
# directly, we run the comparison on the activations.
traced_model = torch.jit.trace_module(model, {'forward': x})
y_hat = traced_model(x)
self.assertEqual(y, y_hat)
# Check traced model works with caching
with parametrize.cached():
y_hat = traced_model(x)
self.assertEqual(y, y_hat)
# Check the tracing throws an error when caching
with self.assertRaisesRegex(RuntimeError,
'Cannot trace a model while caching'):
with parametrize.cached():
traced_model = torch.jit.trace_module(model, {'forward': x})
def test_scriptable(self):
# TODO: Need to fix the scripting in parametrizations
# Currently, all the tests below will throw torch.jit.Error
model = nn.Linear(5, 5)
parametrize.register_parametrization(model, "weight", self.Symmetric())
x = torch.randn(3, 5)
y = model(x)
with self.assertRaises(torch.jit.Error):
# Check scripting works
scripted_model = torch.jit.script(model)
y_hat = scripted_model(x)
self.assertEqual(y, y_hat)
with parametrize.cached():
# Check scripted model works when caching
y_hat = scripted_model(x)
self.assertEqual(y, y_hat)
# Check the scripting process throws an error when caching
with self.assertRaisesRegex(RuntimeError, 'Caching is not implemented'):
scripted_model = torch.jit.trace_module(model)
|
pytorch-master
|
test/jit/test_parametrization.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import warnings
import torch
from typing import List, Dict, Optional
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestScriptModuleInstanceAttributeTypeAnnotation(JitTestCase):
# NB: There are no tests for `Tuple` or `NamedTuple` here. In fact,
# reassigning a non-empty Tuple to an attribute previously typed
# as containing an empty Tuple SHOULD fail. See note in `_check.py`
def test_annotated_falsy_base_type(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: int = 0
def forward(self, x: int):
self.x = x
return 1
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (1,))
assert(len(w) == 0)
def test_annotated_nonempty_container(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: List[int] = [1, 2, 3]
def forward(self, x: List[int]):
self.x = x
return 1
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_empty_tensor(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x: torch.Tensor = torch.empty(0)
def forward(self, x: torch.Tensor):
self.x = x
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), (torch.rand(2, 3),))
assert(len(w) == 0)
def test_annotated_with_jit_attribute(self):
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.x = torch.jit.Attribute([], List[int])
def forward(self, x: List[int]):
self.x = x
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_annotation_only(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x = []
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_annotation_and_init_annotation(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x: List[int] = []
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_class_level_jit_annotation(self):
class M(torch.nn.Module):
x: List[int]
def __init__(self):
super().__init__()
self.x: List[int] = torch.jit.annotate(List[int], [])
def forward(self, y: List[int]):
self.x = y
return self.x
with warnings.catch_warnings(record=True) as w:
self.checkModule(M(), ([1, 2, 3],))
assert(len(w) == 0)
def test_annotated_empty_list(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: List[int] = []
def forward(self, x: List[int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_empty_dict(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: Dict[str, int] = {}
def forward(self, x: Dict[str, int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_empty_optional(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x: Optional[str] = None
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_list(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(List[int], [])
def forward(self, x: List[int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_dict(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(Dict[str, int], {})
def forward(self, x: Dict[str, int]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Tried to set nonexistent attribute",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_jit_empty_optional(self):
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = torch.jit.annotate(Optional[str], None)
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
def test_annotated_with_torch_jit_import(self):
from torch import jit
class M(torch.nn.Module):
def __init__(self):
super().__init__()
self.x = jit.annotate(Optional[str], None)
def forward(self, x: Optional[str]):
self.x = x
return 1
with self.assertRaisesRegexWithHighlight(RuntimeError,
"Wrong type for attribute assignment",
"self.x = x"):
with self.assertWarnsRegex(UserWarning, "doesn't support "
"instance-level annotations on "
"empty non-base types"):
torch.jit.script(M())
|
pytorch-master
|
test/jit/test_scriptmod_ann.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing import FileCheck
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestTensorMethods(JitTestCase):
def test_getitem(self):
def tensor_getitem(inp: torch.Tensor):
indices = torch.tensor([0, 2], dtype=torch.long)
return inp.__getitem__(indices)
inp = torch.rand(3, 4)
self.checkScript(tensor_getitem, (inp, ))
scripted = torch.jit.script(tensor_getitem)
FileCheck().check("aten::index").run(scripted.graph)
def test_getitem_invalid(self):
def tensor_getitem_invalid(inp: torch.Tensor):
return inp.__getitem__()
with self.assertRaisesRegexWithHighlight(
RuntimeError, "expected exactly 1 argument", "inp.__getitem__"):
torch.jit.script(tensor_getitem_invalid)
|
pytorch-master
|
test/jit/test_tensor_methods.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA, _inline_everything
from torch import nn
from torch.testing import FileCheck
from typing import Callable, List
import unittest
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestPeephole(JitTestCase):
def test_peephole_with_writes(self):
def test_write(x):
s = 0
s += x
s += x
return s
self.checkScript(test_write, (torch.ones(4, 4),))
def test_peephole_with_non_output_writes(self):
@torch.jit.ignore
def nomnom(x):
pass
def test_write(x):
t = torch.ones_like(x)
z = x.clone()
y = z + 0
z.add_(t)
# this makes sure z isn't blasted out of existence
# because it isn't returned or used in a side-effectful
# way
nomnom(z)
return y + y
a = torch.ones(4, 4)
j = self.checkScript(test_write, (a,))
def test_peephole_no_output_aliasing(self):
def test_peephole(x):
y = x + 0
return x, y
a = torch.ones(4, 4)
j = self.checkScript(test_peephole, (a,))
r1, r2 = j(a)
self.assertNotEqual(r1.data_ptr(), r2.data_ptr())
def test_peephole(self):
a = torch.tensor([0.4])
b = torch.tensor([0.7])
c = torch.tensor([0], dtype=torch.int32)
def f(x, y):
return x.type_as(y)
tf = torch.jit.trace(f, (a, b))
FileCheck().check("type_as").run(str(tf.graph))
self.run_pass('peephole', tf.graph)
FileCheck().check_not("type_as").run(str(tf.graph))
tf2 = torch.jit.trace(f, (a, c))
s = str(tf2.graph)
self.run_pass('peephole', tf2.graph)
self.assertEqual(s, str(s))
def test_peephole_dynamic(self):
def f(x, y):
return x.type_as(y)
fn = torch.jit.script(f)
s = str(fn.graph)
torch._C._jit_pass_peephole(fn.graph)
self.assertEqual(s, str(fn.graph))
def test_peephole_list_ops(self):
@torch.jit.script
def foo(x, y, z):
return len([x, y, z])
self.run_pass('peephole', foo.graph)
FileCheck().check("value=3").check_next("return").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for i in range(len(x)):
li.append(x)
return len([x, y, z])
self.run_pass('peephole', foo.graph)
FileCheck().check_not("aten::len").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[1], li[-2]
FileCheck().check("aten::__getitem__").run(foo.graph)
self.run_pass('peephole', foo.graph)
FileCheck().check_not("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
return li[-7]
self.run_pass('peephole', foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@torch.jit.script
def foo(x, y, z):
li = [x, y, z]
for i in range(len(x)):
li.append(x)
return li[-2]
self.run_pass('peephole', foo.graph)
FileCheck().check("aten::__getitem__").run(foo.graph)
@unittest.skipIf(not RUN_CUDA, "cpp tests require CUDA")
def test_peephole_cuda(self):
a = torch.tensor([0.4], device='cpu')
b = torch.tensor([0.7], device='cuda')
c = torch.tensor([0.7], device='cuda')
def f(x, y):
return x.type_as(y)
trace = torch.jit.trace(f, (a, c))
s = str(trace.graph)
self.run_pass('peephole', trace.graph)
self.assertEqual(s, str(trace.graph))
trace = torch.jit.trace(f, (b, c))
self.run_pass('peephole', trace.graph)
self.run_pass('dce', trace.graph)
FileCheck().check_not("type_as").run(str(trace.graph))
@_inline_everything
def test_peephole_type_refinements(self):
def refine(x):
# type: (Optional[Tensor]) -> Tensor
return x if x is not None else torch.tensor(3)
@torch.jit.script
def test():
return refine(torch.tensor(4))
FileCheck().check("prim::unchecked_cast").run(test.graph)
self.run_pass('peephole', test.graph)
FileCheck().check_not("prim::unchecked_cast").run(test.graph)
# refinement not optimzied out
def is_int_tensor(x):
scalar = x.item()
if isinstance(scalar, int):
return scalar + 3
else:
return 8
self.checkScript(is_int_tensor, (torch.tensor(2),))
self.checkScript(is_int_tensor, (torch.tensor(2.5),))
graph = torch.jit.script(is_int_tensor).graph
self.run_pass('peephole', graph)
FileCheck().check("prim::unchecked_cast").run(graph)
def test_short_circuit_optimization(self):
@torch.jit.script
def const_expressions(x):
# type: (int) -> Tuple[bool, bool]
return x == 1 and False, x == 1 or True
self.run_pass('constant_propagation', const_expressions.graph)
FileCheck().check_not("prim::If").check_not("aten::eq").run(const_expressions.graph)
self.assertEqual(const_expressions(1), (False, True))
@torch.jit.script
def redundant_expressions(x):
# type: (int) -> Tuple[bool, bool]
return x == 1 and True, x == 1 or False
self.run_pass('peephole', redundant_expressions.graph)
self.assertEqual(redundant_expressions(1), (True, True))
self.assertEqual(redundant_expressions(0), (False, False))
# and True / or False are removed from graph
FileCheck().check("aten::eq").check_not("prim::If").run(redundant_expressions.graph)
def test_conv_dim_folding(self):
modules = [nn.Conv1d, nn.Conv2d, nn.Conv3d]
for mod in modules:
class ConvDim(torch.nn.Module):
def __init__(self):
super(ConvDim, self).__init__()
self.conv = mod(3, 32, kernel_size=3, stride=2, bias=False)
def forward(self, x):
x = self.conv(x)
return x.dim()
conv_dim = torch.jit.script(ConvDim())
self.run_pass("inline", conv_dim.graph)
self.run_pass("peephole", conv_dim.graph)
FileCheck().check_not("conv").check_not("dim").run(conv_dim.graph)
class ConvDimMutate(torch.nn.Module):
def __init__(self):
super(ConvDimMutate, self).__init__()
self.conv = mod(3, 32, kernel_size=3, stride=2, bias=False)
def forward(self, x):
x = self.conv(x)
x.resize_([4, 4])
return x.dim()
conv_dim = torch.jit.script(ConvDimMutate())
self.run_pass("inline", conv_dim.graph)
self.run_pass("peephole", conv_dim.graph)
FileCheck().check("conv").check("dim").run(conv_dim.graph)
def test_normalized_rsub(self):
a = torch.tensor([1, 2, 3])
b = torch.tensor([4, 5, 6])
def convertible_rsub(x, y):
return (x - y), torch.rsub(y, x)
self.checkScript(convertible_rsub, (a, b))
op_graph = torch.jit.script(convertible_rsub).graph
FileCheck().check_count("aten::sub", 2, exactly=True).run(op_graph)
FileCheck().check_count("aten::rsub", 0, exactly=True).run(op_graph)
def test_normalized_is_op(self):
def convertible_is_op(x: bool, y: bool):
return x is True, False is x, x is y
self.checkScript(convertible_is_op, (True, False))
op_graph = torch.jit.script(convertible_is_op).graph
FileCheck().check_count("aten::eq", 3, exactly=True).run(op_graph)
FileCheck().check_count("aten::__is__", 0, exactly=True).run(op_graph)
def test_normalized_isnot_op(self):
def convertible_isnot_op(x: bool, y: bool):
return x is not True, False is not x, x is not y
self.checkScript(convertible_isnot_op, (True, False))
op_graph = torch.jit.script(convertible_isnot_op).graph
FileCheck().check_count("aten::ne", 3, exactly=True).run(op_graph)
FileCheck().check_count("aten::__isnot__", 0, exactly=True).run(op_graph)
def test_peephole_list_len(self):
def run_peephole_and_check_const_value(graph, const_string):
torch._C._jit_pass_peephole_list_idioms(graph, refine_list_len=True)
self.run_pass("constant_propagation", graph)
FileCheck().check(const_string).check_next("return").run(graph)
def gen_li(inp_len: int):
return [0 for i in range(inp_len)]
@torch.jit.script
def foo(x: List[int], y: List[int]):
if len(x) != 4 or len(y) != 5:
raise Exception("")
return len(x) + len(y)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(gen_li(4), gen_li(5)), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int]):
if len(x) == 4 and len(y) == 5:
pass
else:
raise Exception("hi")
return len(x) + len(y)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(gen_li(4), gen_li(5)), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: List[int], y: List[int], z: List[int]):
if len(x) != 4:
raise Exception("..")
else:
if len(y) != 8:
raise Exception("...")
else:
if len(z) == 3:
pass
else:
raise Exception("...")
return len(x) + len(y) * len(z)
run_peephole_and_check_const_value(foo.graph, "value=28")
self.assertEqual(foo(gen_li(4), gen_li(8), gen_li(3)), 28)
with self.assertRaises(Exception):
foo(1, 2, 3)
# refinement should persist in second len(x) call
@torch.jit.script
def foo(x: List[int], cond: bool):
if len(x) == 4:
if cond:
return len(x)
return 4
return 4
run_peephole_and_check_const_value(foo.graph, "value=4")
def test_const_tuple_output(graph, const_inputs):
tup = graph.findNode("prim::TupleConstruct")
for i, elem in enumerate(tup.inputs()):
if i in const_inputs:
self.assertIsNotNone(elem.toIValue())
else:
self.assertIsNone(elem.toIValue())
# testing combinations of x1 : {True, False} x
# {then/else branch} x assert {True/False}
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# we can only infer len(b) == 4 here
test_const_tuple_output(foo.graph, [1])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = False
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# cant infer anything
test_const_tuple_output(foo.graph, [])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) == 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# we cant infer anything, only len(b) != 4
test_const_tuple_output(foo.graph, [])
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) == 5:
x1 = True
else:
x1 = len(b) != 4
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# swap branches
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) != 5:
x1 = len(b) != 4
else:
x1 = True
assert x1 == False # noqa: E712 TODO: canonicalize x is False to aten::eq
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# use __not__
@torch.jit.script
def foo(x: List[int], b: List[int]):
if len(x) != 5:
x1 = len(b) != 4
else:
x1 = True
assert not x1
return len(x), len(b)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
torch._C._jit_pass_constant_propagation(foo.graph)
# can infer len(b) == 4
test_const_tuple_output(foo.graph, [1])
# Test unsuccessful optimizations
@torch.jit.script
def foo(x: List[int]):
assert len(x) == 4
x.append(3)
return len(x)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check_count("aten::len", 2).run(foo.graph)
@torch.jit.script
def foo(x: List[int], y: List[int]):
assert len(x) == 4 or len(y) == 5
return len(x) + len(y)
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check_count("aten::len", 4).run(foo.graph)
def test_integer_refinement(self):
def run_peephole_and_check_const_value(graph, const_string):
self.run_pass("refine_integer_values", graph)
self.run_pass("constant_propagation", graph)
self.run_pass("dce", graph)
FileCheck().check(const_string).check_next("return").run(graph)
@torch.jit.script
def foo(x: int, y: int):
if x != 4 or y != 5:
raise Exception("")
return x + y
graph = foo.graph
self.run_pass("refine_integer_values", graph)
self.run_pass("constant_propagation", graph)
self.run_pass("dce", graph)
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(4, 5), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: int, y: int):
if x == 4 and y == 5:
pass
else:
raise Exception("hi")
return x + y
run_peephole_and_check_const_value(foo.graph, "value=9")
self.assertEqual(foo(4, 5), 9)
with self.assertRaises(Exception):
foo(2, 4)
@torch.jit.script
def foo(x: int, y: int, z: int):
if x != 4:
raise Exception("..")
else:
if y != 8:
raise Exception("...")
else:
if z == 3:
pass
else:
raise Exception("...")
return x + y * z
run_peephole_and_check_const_value(foo.graph, "value=28")
self.assertEqual(foo(4, 8, 3), 28)
with self.assertRaises(Exception):
foo(1, 2, 3)
# refinement should persist in second len(x) call
@torch.jit.script
def foo(x: int, cond: bool):
if x == 4:
if cond:
return x
return 4
return 4
run_peephole_and_check_const_value(foo.graph, "value=4")
@torch.jit.script
def foo(x: int, y: int):
assert x == 4 or y == 5
return x + y
torch._C._jit_pass_peephole_list_idioms(foo.graph, refine_list_len=True)
self.run_pass("constant_propagation", foo.graph)
FileCheck().check("aten::add").run(foo.graph)
def test_optimize_out_comparison_same_value(self):
def foo(x: int):
return x == x, x != x
def foo2(x: List[int]):
return x == x, x != x
for func, inp in zip([foo, foo2], [1, [2, 3]]):
func_s = torch.jit.script(func)
self.run_pass("peephole", func_s.graph)
FileCheck().check_not("aten::eq").check_not("aten::neq").run(func_s.graph)
self.assertEqual(func(inp), func_s(inp))
def test_peephole_add_zero(self):
@torch.jit.script
def foo(x: int):
return x + 0, 0 + x
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::add")
self.assertEqual(foo(3), (3, 3))
def test_noop_peephole(self):
# test unsuccessful
def foo1(x):
return x + 0
def foo2():
x = torch.zeros([2, 2])
x.sub_(3)
return x + 0
def foo3():
x = torch.zeros([2, 2])
return x, x + 0
def foo4():
x = torch.zeros([2, 2])
return x + 0.
funcs = foo1, foo2, foo3, foo4
inps = (torch.ones([2]),), (), (), ()
for func, inp in zip(funcs, inps):
foo_s = torch.jit.script(func)
self.run_pass("peephole", foo_s.graph)
FileCheck().check_count("aten::add", 1, exactly=True).run(foo_s.graph)
self.assertEqual(func(*inp), foo_s(*inp))
# successful
def func(x):
return (x + 0) * 1 - 5
func_s = torch.jit.script(func)
self.run_pass("peephole", func_s.graph)
# bail on modified value first
FileCheck().check_not("aten::add").check("aten::mul").run(func_s.graph)
# second run it should succeed
self.run_pass("peephole", func_s.graph)
FileCheck().check_not("aten::add").check_not("aten::mul").run(func_s.graph)
self.assertEqual(func(torch.ones([2, 2])), func_s(torch.ones([2, 2])))
def func(x):
return (x + 0.) - 5
func_s = torch.jit.script(func)
inp = next(func_s.graph.inputs())
inp.setType(torch._C.TensorType.create_from_tensor(torch.rand([2, 2])))
torch._C._jit_pass_peephole(func_s.graph, disable_shape_peepholes=True)
FileCheck().check("aten::add").run(func_s.graph)
torch._C._jit_pass_peephole(func_s.graph, disable_shape_peepholes=False)
FileCheck().check_not("aten::add").run(func_s.graph)
def test_refine_integer_values(self):
@torch.jit.script
def foo(x: int):
y = 1
if x == 1:
return y
else:
return x
self.run_pass("refine_integer_values", foo.graph)
self.run_pass("constant_propagation", foo.graph)
self.run_pass("dce", foo.graph)
FileCheck().check("graph").check_next("return").run(foo.graph)
self.assertEqual(foo(2), 2)
self.assertEqual(foo(1), 1)
def test_peephole_len_list(self):
@torch.jit.script
def foo(x):
return len(x.size())
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::len").run(foo.graph)
inputs = list(foo.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes([None, None]))
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::len").run(foo.graph)
self.assertEqual(2, foo(torch.rand([3, 1])))
@torch.jit.script
def foo(x):
li = x.size()
li.append(4)
return len(li)
inputs = list(foo.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes([None, None]))
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::len").run(foo.graph)
self.assertEqual(3, foo(torch.rand([3, 1])))
def test_peephole_optional_refine(self):
@torch.jit.script
def foo(z: int, z2: int, cond: bool):
if cond:
return z
else:
return z2
out = next(foo.graph.findNode("prim::If").outputs())
out.setType(torch._C.OptionalType(torch._C.IntType.get()))
self.run_pass("peephole", foo.graph)
FileCheck().check_not("int?").run(foo.graph)
def test_peephole_int(self):
@torch.jit.script
def foo(x):
# type: (number)
return int(x)
FileCheck().check("aten::Int").run(foo.graph)
next(foo.graph.inputs()).setType(torch._C.IntType.get())
self.run_pass("peephole", foo.graph)
FileCheck().check_not("aten::Int").run(foo.graph)
def test_peephole_arith(self):
@torch.jit.script
def foo(input0: int, input1: int, input2: int, input3: int):
_1 = torch.add(input1, 2)
_3 = torch.add(input3, 2)
_5 = torch.add(1, torch.sub(_1, 3) // 1)
_6 = torch.add(1 * torch.sub(_3, 3) // 1, 1) / 1
return [_5, int(_6)]
FileCheck().check("aten::add").check("aten::sub") \
.check("aten::mul").check("aten::floordiv") \
.check("aten::div").run(foo.graph)
self.run_pass("peephole", foo.graph)
FileCheck().check("graph").check("):") \
.check_next("ListConstruct").check_next("return").run(foo.graph)
self.assertEqual(foo(0, 1, 2, 3), [1, 3])
def test_peephole_dict_getitem_simple(self):
@torch.jit.script
def foo(a: int, b: int):
d = {0: a, 1: b}
x = d[1]
y = d[0]
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
@torch.jit.script
def foo(a: int, b: int):
d = {'0': a, '1': b}
x = d['1']
y = d['0']
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
@torch.jit.script
def foo(a: int, b: int):
d = {0.0: a, 1.0: b}
x = d[1.0]
y = d[0.0]
return x, y
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("__getitem__").run(foo.graph)
self.assertEqual(foo(0, 1), (1, 0))
def test_peephole_dict_getitem_no_optimization_missing_key(self):
@torch.jit.script
def foo():
d = {0: 1}
return d[2]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_get_input_arg(self):
# Here we don't know if the input arg is in the dict, so we can't
# make the optimization.
@torch.jit.script
def foo(a: int):
d = {0: 1}
return d[a]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(0), 1)
def test_peephole_dict_getitem_no_optimization_dict_modified(self):
@torch.jit.script
def foo():
d = {0: 1}
d[0] = 2
return d[0]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(), 2)
def test_peephole_dict_getitem_no_optimization_overlapping_keys(self):
@torch.jit.script
def foo():
d = {0: 1, 0: 2} # noqa: F601
return d[0]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_keys_might_overlap(self):
@torch.jit.script
def foo(x: int):
d = {0: 1, x: 2}
return d[x]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
def test_peephole_dict_getitem_no_optimization_unsupported_type(self):
@torch.jit.script
def foo():
a = torch.rand((2, 2))
d = {a: 1}
return d[a]
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("__getitem__").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_dict_len(self):
@torch.jit.script
def foo():
d = {0: 1, 1: 2}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check_not("DictConstruct").check_not("len").run(foo.graph)
self.assertEqual(foo(), 2)
def test_peephole_dict_len_no_optimization_overlapping_keys(self):
@torch.jit.script
def foo():
d = {0: 1, 0: 2} # noqa: F601
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_dict_len_no_optimization_keys_might_overlap(self):
@torch.jit.script
def foo(x: int):
d = {0: 1, x: 2}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
def test_peephole_dict_len_no_optimization_unsupported_type(self):
@torch.jit.script
def foo():
a = torch.rand((2, 2))
d = {a: 1}
return len(d)
self.run_pass("peephole", foo.graph)
FileCheck().check("DictConstruct").check("len").run(foo.graph)
self.assertEqual(foo(), 1)
def test_peephole_slice_all_three_args(self):
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][-5:6:2]
graph = torch.jit.script(foo).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(foo, (3, ))
def test_peephole_slice_one_empty_arg(self):
def check_helper(fn: Callable[[int], None]) -> None:
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(fn, (3, ))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][1::2]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][:5:3]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][0:4]
check_helper(foo)
def test_peephole_slice_two_empty_args(self):
def check_helper(fn: Callable[[int], None]) -> None:
graph = torch.jit.script(fn).graph
self.run_pass("peephole", graph)
FileCheck().check_not("aten::slice").run(graph)
self.checkScript(fn, (3, ))
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][::2]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][:5]
check_helper(foo)
def foo(x: int):
return [1, 2, x, 4, 5, 6, 7][1:]
check_helper(foo)
def test_peephole_slice_optimization_not_applied_list_modified(self):
@torch.jit.script
def foo():
li = [1, 2, 3, 4, 5, 6, 7]
li[0] = 0
return li[2:5]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::slice").run(foo.graph)
def test_peephole_slice_optimization_not_applied_non_const_args(self):
@torch.jit.script
def foo(x: int, y: int):
li = [1, 2, 3, 4, 5, 6, 7]
return li[x:y]
self.run_pass("peephole", foo.graph)
FileCheck().check("aten::slice").run(foo.graph)
|
pytorch-master
|
test/jit/test_peephole.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
import unittest
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# NOTE: FIXING FAILING TESTS
# If you are seeing a test failure from this file, congrats, you improved
# parity between JIT and Python API. Before you fix the test, you must also update
# the corresponding section in documentation that states the unsupported behavior.
# see: `jit_unsupported.rst`
class TestUnsupportedOps(JitTestCase):
def test_factory_ops_requires_grad_fail(self):
# Keyword argument {name} unknown is a JIT-only error message,
# so these functions are succeeding in eager and failing in JIT
# Complete issue and set of ops is https://github.com/pytorch/pytorch/issues/30761
# only testing some because they should be fixed all at once
def ones():
return torch.ones([2], requires_grad=True)
with self.assertRaisesRegexWithHighlight(Exception,
"Keyword argument requires_grad unknown",
"torch.ones"):
torch.jit.script(ones)
def randn():
return torch.randn([2], requires_grad=True)
with self.assertRaisesRegexWithHighlight(Exception,
"Keyword argument requires_grad unknown",
"torch.randn"):
torch.jit.script(randn)
def zeros():
return torch.zeros([2], requires_grad=True)
with self.assertRaisesRegexWithHighlight(Exception,
"Keyword argument requires_grad unknown",
"torch.zeros"):
torch.jit.script(zeros)
@unittest.skipIf(not torch._C.has_lapack, "PyTorch compiled without Lapack")
def test_init_ops(self):
def calculate_gain():
return torch.nn.init.calculate_gain('leaky_relu', 0.2)
def eye_():
return torch.nn.init.eye_(torch.zeros([2, 2]))
def dirac_():
return torch.nn.init.dirac_(torch.empty(3, 16, 5, 5))
def kaiming_uniform_():
return torch.nn.init.kaiming_normal_(torch.empty(3, 5))
def orthogonal_():
return torch.nn.init.orthogonal_(torch.empty(3, 5))
def sparse():
return torch.nn.init.sparse_(torch.empty(3, 5), sparsity=.1)
for func in [calculate_gain, eye_, dirac_, kaiming_uniform_, orthogonal_, sparse]:
# doesn't error in eager
func()
with self.assertRaisesRegex(Exception, ""):
torch.jit.script(func)
|
pytorch-master
|
test/jit/test_unsupported_ops.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch import nn
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class Sequence(nn.Module):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
def forward(self, input):
outputs = []
h_t = torch.zeros(input.size(0), 51)
c_t = torch.zeros(input.size(0), 51)
h_t2 = torch.zeros(input.size(0), 51)
c_t2 = torch.zeros(input.size(0), 51)
for input_t in input.split(1, dim=1):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs += [output]
outputs = torch.cat(outputs, dim=1)
return outputs
class TestScriptProfile(JitTestCase):
def test_basic(self):
seq = torch.jit.script(Sequence())
p = torch.jit._ScriptProfile()
p.enable()
seq(torch.rand((10, 100)))
p.disable()
self.assertNotEqual(p.dump_string(), "")
def test_script(self):
seq = Sequence()
@torch.jit.script
def fn():
p = torch.jit._ScriptProfile()
p.enable()
_ = seq(torch.rand((10, 100)))
p.disable()
return p
self.assertNotEqual(fn().dump_string(), "")
def test_multi(self):
seq = torch.jit.script(Sequence())
profiles = [torch.jit._ScriptProfile() for _ in range(5)]
for p in profiles:
p.enable()
last = None
while len(profiles) > 0:
seq(torch.rand((10, 10)))
p = profiles.pop()
p.disable()
stats = p.dump_string()
self.assertNotEqual(stats, "")
if last:
self.assertNotEqual(stats, last)
last = stats
def test_section(self):
seq = Sequence()
@torch.jit.script
def fn():
p = torch.jit._ScriptProfile()
p.enable()
_ = seq(torch.rand((10, 100)))
p.disable()
stats0 = p.dump_string()
_ = seq(torch.rand((10, 10)))
stats1 = p.dump_string()
p.enable()
_ = seq(torch.rand((10, 10)))
p.disable()
stats2 = p.dump_string()
p.enable()
return stats0, stats1, stats2
s0, s1, s2 = fn()
self.assertEqual(s0, s1)
self.assertNotEqual(s1, s2)
def test_empty(self):
p = torch.jit._ScriptProfile()
p.enable()
p.disable()
self.assertEqual(p.dump_string(), "")
|
pytorch-master
|
test/jit/test_script_profile.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import sys
import torch
from torch.testing import FileCheck
from enum import Enum
from textwrap import dedent
from typing import Dict, List, Optional, Tuple, Union
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, make_global
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestUnion(JitTestCase):
"""
This class tests the functionality of `Union`.
Note: It's important to be able to refine the type of a `Union` to
one of its internal types. Currently, there are differences in the
way Python expects `isinstance` checks and the way TorchScript
expects `isinstance` checks. This means that we can't use
`checkScript` in our test cases because either the eager mode or the
script mode wouldn't run! So, some test cases have separate but
equivalent functions to emulate `checkScript`.
"""
def test_check_union_annotation(self):
def test_func(a: Union[int, float], b: Optional[int]):
return 0
scripted_func = torch.jit.script(test_func)
graph_rep = str(scripted_func.graph)
code_rep = str(scripted_func.code)
# TS graph IR for Union should be annotated as Union()
FileCheck().check("Union(").check("int?").run(graph_rep)
# Serialized code for Union should be annotated as Union[]
FileCheck().check("Union[").check("Optional[int]").run(code_rep)
self.checkScript(test_func, (5, 6))
# this shouldn't error out
torch._C.parse_ir(str(scripted_func.graph))
def test_union_with_scalar_values(self):
def fn(x: Union[int, float]) -> str:
return "foo"
self.checkScript(fn, (1,))
self.checkScript(fn, (1.0,))
scripted = torch.jit.script(fn)
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[float, int\] but "
"instead found type str"):
scripted("1")
def test_union_with_collections(self):
def fn(x: Union[Dict[str, int], List[int]]) -> str:
return "foo"
self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},))
self.checkScript(fn, ([1, 2, 3],))
scripted = torch.jit.script(fn)
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[List\[int\], Dict\[str, "
r"int\]\] but instead found type "
r"Dict\[str, str\]"):
scripted({"foo": "bar", "baz": "qux"})
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[List\[int\], Dict\[str, "
r"int\]\] but instead found type "
r"List\[str\]"):
scripted(["foo", "bar", "baz"])
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[List\[int\], Dict\[str, "
r"int\]\] but instead found type "
"str"):
scripted("1")
def test_union_with_enum(self):
class Color(Enum):
RED = 1
GREEN = 2
make_global(Color)
def fn(x: Union[str, Color]) -> str:
return "foo"
self.checkScript(fn, (Color.RED,))
self.checkScript(fn, ("red",))
scripted = torch.jit.script(fn)
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[__torch__.jit.test_union."
r"Color, str\] but instead found "
"type int"):
scripted(1)
def test_union_in_class_constructor(self):
@torch.jit.script # noqa: B903
class A(object): # noqa: B903
def __init__(self, x: Union[int, str]) -> None:
self.x = x
def fn(x: Union[str, int]) -> A:
return A(x)
self.assertEqual(fn("foo").x, "foo")
self.assertEqual(fn(1).x, 1)
scripted = torch.jit.script(fn)
with self.assertRaisesRegex(RuntimeError, "Expected a member of"
r" Union\[int, str\] but instead "
r"found type List\[str\]"):
scripted(["foo", "bar", "baz"])
def test_union_return_type(self):
def fn(x: int) -> Union[int, str]:
return "foo"
self.checkScript(fn, (1,))
def test_union_as_annotation(self):
def fn() -> Union[int, str]:
x: Union[int, str] = "foo"
return x
self.checkScript(fn, ())
def test_union_as_annotation_in_typed_container(self):
def fn() -> None:
l: List[Union[int, str]] = []
u1: Union[int, str] = "foo"
u2: Union[int, str] = 1
l.append(u1)
l.append(u2)
self.checkScript(fn, ())
def test_union_as_annotation_py2(self):
def fn():
# type: () -> Union[int, str]
x: Union[int, str] = "foo"
return x
self.checkScript(fn, ())
def test_union_as_internal_tuple_type(self):
def fn():
t: Tuple[Union[int, str], Union[int, str]] = (1, "foo")
return t
self.checkScript(fn, ())
def test_union_variable_can_be_reassigned(self):
@torch.jit.script
def aux1(i: int):
return int(i ** 2)
@torch.jit.script
def aux2(s: str):
return s + s
def fn() -> Union[int, str]:
x: Union[int, str] = "foo"
i: int = 1
x = i
y: int = aux1(x)
z: str = aux2(str(y))
x = z
return x
self.checkScript(fn, ())
def test_union_does_not_replace_existing_annotated_type(self):
def fn():
x: List[int] = [1, 2, 3]
x.append("foo")
return x
with self.assertRaisesRegex(RuntimeError, "Could not match type str"):
scripted = torch.jit.script(fn)
scripted()
def test_union_does_not_replace_existing_annotated_type_union(self):
def fn():
x: List[Union[int, str]] = [1, "foo", 3]
x.append(2.0)
return x
with self.assertRaisesRegex(RuntimeError, "Could not match type float"):
scripted = torch.jit.script(fn)
scripted()
def test_union_does_not_replace_existing_annotated_type_empty_container(self):
def fn():
x: List[int] = []
x.append("foo")
return x
with self.assertRaisesRegex(RuntimeError, "Could not match type str"):
scripted = torch.jit.script(fn)
scripted()
def test_unions_of_unions_are_flattened(self):
@torch.jit.script
def fn(x: Union[Union[int, str], float]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : Union(float, int, str)") \
.run(s)
def test_unions_of_a_single_argument_vanish(self):
@torch.jit.script
def fn(x: Union[int]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : int") \
.run(s)
def test_union_redundant_arguments_are_skipped(self):
@torch.jit.script
def fn(x: Union[int, str, int]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : Union(int, str)") \
.run(s)
def test_union_redundant_arguments_are_skipped_optional(self):
@torch.jit.script
def fn(x: Union[int, Optional[float], Optional[int]]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : Union(float, int, NoneType)") \
.run(s)
def test_union_redundant_arguments_are_skipped_subtyping(self):
@torch.jit.script
def fn(x: Union[str, Tuple[Optional[int], int], Tuple[int, int]]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : Union((int?, int), str)") \
.run(s)
def test_union_redundant_arguments_are_skipped_container(self):
@torch.jit.script
def fn(x: Union[List[str], List[float], List[str]]) -> str:
return "foo"
s = fn.graph
FileCheck().check("x : Union(float[], str[])") \
.run(s)
def test_union_argument_order_is_ignored(self):
@torch.jit.script
def fn1(x: Union[int, str]) -> str:
return "foo"
@torch.jit.script
def fn2(x: Union[str, int]) -> str:
return "foo"
for s in (fn1.graph, fn2.graph):
FileCheck().check("x : Union(int, str)") \
.run(s)
def test_union_argument_order_is_ignored_container(self):
@torch.jit.script
def fn1(x: Union[List[str], List[int]]) -> str:
return "foo"
@torch.jit.script
def fn2(x: Union[List[int], List[str]]) -> str:
return "foo"
for s in (fn1.graph, fn2.graph):
FileCheck().check("x : Union(int[], str[])") \
.run(s)
def test_union_T_None_is_equivalent_to_optional_T(self):
@torch.jit.script
def inner(x: Union[int, None]) -> int:
if x is not None:
return x
else:
return 5
@torch.jit.script
def fn1() -> int:
a: Optional[int] = 5
b: Optional[int] = None
a_ = inner(a)
b_ = inner(b)
return a_ + b_
self.assertEqual(fn1(), 10)
@torch.jit.script
def inner2(x: Optional[int]) -> int:
if x is not None:
return x
else:
return 5
@torch.jit.script
def fn2() -> int:
a: Union[int, None] = 5
b: Union[int, None] = None
a_ = inner(a)
b_ = inner(b)
return a_ + b_
self.assertEqual(fn2(), 10)
def test_union_optional_of_union_is_flattened(self):
@torch.jit.script
def fn(flag: int) -> Union[str, int, None]:
y: Union[int, str, None] = "foo"
if flag == 0:
x: Optional[Union[int, str]] = y
elif flag == 1:
x: Optional[Union[int, str]] = 1
else:
x: Optional[Union[int, str]] = None
return x
# Can't use `checkScript` because it will flag the fact that
# the original code has `Optional[Union[int, str]]` but the
# saved/loaded code has `Union[int, NoneType, str]` (even
# though this is exactly what we want)
self.assertEqual(fn(0), "foo")
self.assertEqual(fn(1), 1)
self.assertEqual(fn(2), None)
buffer = io.BytesIO()
torch.jit.save(fn, buffer)
buffer = io.BytesIO(buffer.getvalue())
l = torch.jit.load(buffer)
s = l.code
FileCheck().check("Union[int, NoneType, str]") \
.check("Union[int, NoneType, str]") \
.run(s)
def test_union_subclasses_larger_union(self):
def fn() -> Union[int, str, torch.Tensor]:
x: Union[int, str] = "foo"
return x
self.checkScript(fn, ())
# TODO: We would like to eventually support this. The issue is being
# tracked at https://github.com/pytorch/pytorch/issues/58167
def test_union_as_dict_key(self):
def fn():
x: Dict[Union[int, str], str] = {}
x["foo"] = "bar"
x[1] = 2
return x[1]
with self.assertRaisesRegex(RuntimeError, "only int, float, "
"complex, Tensor, device and string keys "
"are supported"):
torch.jit.script(fn)
def test_union_as_dict_value(self):
def fn():
x: Dict[str, Union[int, str]] = {}
x["foo"] = "bar"
x["baz"] = 2
return x["baz"]
self.checkScript(fn, ())
def test_union_module_with_union_instance_variable(self):
class M(torch.nn.Module):
x: Union[int, str]
def __init__(self, x: Union[int, str]):
super().__init__()
self.x: Union[int, str] = x
def forward(self, y: Union[int, str]):
self.x = y
return self.x
self.checkModule(M(2,), (1,))
self.checkModule(M("bar"), ("foo",))
def test_union_module_with_union_class_variable(self):
class M(torch.nn.Module):
x: Union[int, str] = "foo"
def __init__(self, y: int):
super().__init__()
x = y
def forward(self, z: str):
x = z
return x
self.checkModule(M(1), ("foo",))
def test_union_type_refinement(self):
def fn(x: Union[int, str]) -> str:
if isinstance(x, str):
z = x + "bar"
return x
else:
return "baz"
self.checkScript(fn, ("foo",))
self.checkScript(fn, (1,))
def test_union_type_refinement_union_rhs(self):
def fn(x: int) -> str:
if torch.jit.isinstance(x, Union[int, str]):
return "bar"
else:
return "baz"
self.checkScript(fn, (1,))
def test_union_type_refinement_tuple_rhs(self):
def fn(x: Union[int, float, List[str]]) -> str:
if isinstance(x, (int, float)):
if isinstance(x, int):
return str(x)
else:
return "foo"
else:
if len(x):
return x[0]
else:
return "bar"
self.checkScript(fn, (1,))
self.checkScript(fn, (1.0,))
self.checkScript(fn, (["a", "b", "c"],))
def test_union_type_refinement_tuple_rhs_noncontained_type(self):
def fn(x: Union[int, List[str]]) -> str:
if isinstance(x, (int, float)):
y = x + x
return str(y)
else:
if len(x):
return x[0]
else:
return "bar"
self.checkScript(fn, (1,))
self.checkScript(fn, (["a", "b", "c"],))
def test_union_type_refinement_tuple_rhs_union(self):
@torch.jit.script
def fn(x: int) -> str:
if torch.jit.isinstance(x, (Union[int, str], float)):
y = x + x
return str(y)
else:
return "foo"
# TODO: There's currently an unrelated bug in
# `torch.jit.isinstance` that makes it fail for tuple literals.
# Posted here: https://github.com/pytorch/pytorch/issues/60095
# Change `assertEqual` to `checkScript` when the bug is fixed
self.assertEqual(fn(1), "2")
def test_union_type_refinement_statically_false(self):
@torch.jit.script
def fn(x: int) -> str:
if torch.jit.isinstance(x, (Union[str, float], List[str], str)):
z = x + "foo"
return z
else:
return "bar"
s = fn.graph
# Check that we don't have any branching statements
FileCheck().check_not("block0()") \
.check_not("block1()") \
.run(s)
def test_union_type_refinement_statically_true(self):
@torch.jit.script
def fn(x: Union[List[int], int]) -> Union[List[int], int]:
if not torch.jit.isinstance(x, (int, List[int])):
return x
else:
l = [1, 2, 3]
y: Union[List[int], int] = l
return y
s = fn.graph
# Check that we don't have any branching statements
FileCheck().check_not("block0()") \
.check_not("block1()") \
.run(s)
def test_union_type_refinement_partial_static_refinement_tuple_rhs(self):
def fn(x: Union[List[int], int]) -> int:
if torch.jit.isinstance(x, (int, float, str)):
# We should know that `x` is an `int` here
z = x + 1
return z
else:
return 100
self.checkScript(fn, ([1, 2, 3],))
self.checkScript(fn, (1,))
def test_union_type_refinement_partial_static_refinement_union_rhs(self):
def fn(x: Union[List[int], int]) -> int:
if torch.jit.isinstance(x, Union[int, float, str]):
# We should know that `x` is an `int` here
z = x + 1
return z
else:
return 100
self.checkScript(fn, ([1, 2, 3],))
self.checkScript(fn, (1,))
def test_union_type_refinement_internal_declaration(self):
def fn(flag: bool) -> str:
x: Union[int, str, None] = None
if (flag):
y = "foo"
else:
y = 1
if isinstance(x, str):
return x
else:
return "bar"
self.checkScript(fn, (True,))
self.checkScript(fn, (False,))
def test_union_branching_with_union_return_and_homogenous_types(self):
def fn(x: int) -> Union[int, str]:
if x % 2:
return "foo"
else:
return "bar"
self.checkScript(fn, (1,))
self.checkScript(fn, (8,))
def test_union_branching_does_not_autoinfer_undeclared_union(self):
def fn(x: int) -> str:
if x % 2:
y = "foo"
else:
y = x
if isinstance(y, str):
return y
else:
return "bar"
with self.assertRaisesRegex(RuntimeError, "y is set to type str"
" in the true branch and type int "
"in the false branch"):
torch.jit.script(fn)
def test_union_branching_does_not_widen_existing_inferred_type(self):
def fn(x: int) -> str:
y = "foo"
if x % 2:
y = "bar"
else:
y = x
if isinstance(y, str):
return y
else:
return "baz"
with self.assertRaisesRegex(RuntimeError, "previously had type "
"str but is now being assigned to a"
" value of type int"):
torch.jit.script(fn)
def test_union_schema_matching_on_internal_type(self):
def fn(x: Union[List[int], Dict[str, int]]) -> int:
if torch.jit.isinstance(x, List[int]):
return x[0]
else:
return list(x.values())[0]
self.checkScript(fn, ([1, 2, 3],))
self.checkScript(fn, ({"foo": 1, "bar": 2, "baz": 3},))
def test_union_subtractive_refinement(self):
def fn(x: Union[List[int], int]) -> int:
if not isinstance(x, int):
x.append(1)
return x[0]
else:
return x
self.checkScript(fn, (1,))
self.checkScript(fn, ([1, 2, 3],))
def test_union_subtractive_refinement_with_container(self):
def fn(x: Union[List[int], int]) -> int:
if not torch.jit.isinstance(x, List[int]):
return x
else:
x.append(1)
return x[0]
self.checkScript(fn, (1,))
self.checkScript(fn, ([1, 2, 3],))
def test_union_memory_aliasing(self):
def fn():
x : List[torch.Tensor] = []
z : List[Optional[List[torch.Tensor]]] = []
z.append(x)
x_alias = z[0]
if torch.jit.isinstance(x_alias, List[torch.Tensor]):
x_alias.append(torch.tensor(3))
return x
self.checkScript(fn, ())
def test_union_serialization_preserves_type_annotations(self):
# This function will fail after being torch.jit.save'd and
# torch.jit.load'd if the type annotations aren't preserved
# for Union during serialization. We need the `Union[str, int]`
# annotation to make sure that `y` is typed as a Union instead
# of as a str in one branch and an int in the other
def fn(x: int) -> str:
if x % 2:
y: Union[str, int] = "bar"
else:
y: Union[str, int] = x
if isinstance(y, str):
return y
else:
return "baz"
self.checkScript(fn, (1,))
self.checkScript(fn, (8,))
def _assert_passes(self, template: str, ann: str, lhs: str):
code = template.format(ann=ann, lhs=lhs)
self.checkScript(code, (), name="fn")
def _assert_raises(self, template: str, ann: str, lhs: str, msg: str):
code = template.format(ann=ann, lhs=lhs)
with self.assertRaisesRegex(RuntimeError, msg):
cu = torch.jit.CompilationUnit(code, _frames_up=1)
string_frontend = getattr(cu, "fn") # noqa: B009
def test_union_with_list_assignment(self):
template = dedent('''
def fn():
x: {ann} = {lhs}
if torch.jit.isinstance(x, List[torch.Tensor]):
x.append(torch.tensor(3))
return x
''')
lhs = {"list_literal_empty" : "[]",
"list_literal_of_tensor" : "[torch.arange(3), torch.arange(5)]",
"list_literal_of_str" : "[\"foo\", \"bar\", \"baz\"]",
"list_literal_of_mixed" : "[torch.arange(5), 1]",
"list_comprehension_of_tensor" :
"[torch.add(x, 1) for x in [torch.arange(3), torch.arange(5)]]",
"list_comprehension_of_str" :
"[x + \"!\" for x in [\"foo\", \"bar\", \"baz\"]]",
"list_comprehension_of_mixed" :
"[torch.add(1, x) for x in [torch.arange(5), 1]]"}
"""
Union[List[str], List[torch.Tensor]]
"""
self._assert_raises(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_literal_empty"],
"there are multiple possible List type "
"candidates in the Union annotation")
self._assert_passes(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_literal_of_tensor"])
self._assert_passes(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_literal_of_str"])
self._assert_raises(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_literal_of_mixed"],
"none of those types match the types of the"
" given list elements")
self._assert_passes(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_comprehension_of_tensor"])
self._assert_passes(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_comprehension_of_str"])
# TODO: Support mixed list comprehensions
self._assert_raises(template,
"Union[List[str], List[torch.Tensor]]",
lhs["list_comprehension_of_mixed"],
"Arguments for call are not valid")
"""
Union[int, torch.Tensor]
"""
self._assert_raises(template,
"Union[int, torch.Tensor]",
lhs["list_literal_empty"],
"Expected an Union type annotation with an "
"inner List type")
self._assert_raises(template, "Union[int, torch.Tensor]",
lhs["list_literal_of_tensor"],
"Expected an Union type annotation with an "
"inner List type")
self._assert_raises(template, "Union[int, torch.Tensor]",
lhs["list_comprehension_of_tensor"],
"Expected an Union type annotation with an "
"inner List type")
"""
Union[List[torch.Tensor], int]
"""
self._assert_passes(template,
"Union[List[torch.Tensor], int]",
lhs["list_literal_empty"])
self._assert_passes(template,
"Union[List[torch.Tensor], int]",
lhs["list_literal_of_tensor"])
self._assert_raises(template, "Union[List[torch.Tensor], int]",
lhs["list_literal_of_str"],
r"List type annotation `List\[Tensor\]` did "
"not match the types of the given list "
"elements")
self._assert_raises(template, "Union[List[torch.Tensor], int]",
lhs["list_literal_of_mixed"],
r"List type annotation `List\[Tensor\]` did "
"not match the types of the given list "
"elements")
self._assert_passes(template,
"Union[List[torch.Tensor], int]",
lhs["list_comprehension_of_tensor"])
self._assert_raises(template,
"Union[List[torch.Tensor], int]",
lhs["list_comprehension_of_str"],
r"List type annotation `List\[Tensor\]` did "
"not match the types of the given list "
"elements")
# TODO(@ansley): Support mixed list comprehensions
self._assert_raises(template,
"Union[List[torch.Tensor], int]",
lhs["list_comprehension_of_mixed"],
"Arguments for call are not valid")
def test_union_with_dict_assignment(self):
template = dedent('''
def fn():
x: {ann} = {lhs}
if torch.jit.isinstance(x, Dict[str, torch.Tensor]):
x["foo"] = torch.tensor(3)
return x
''')
lhs = {"dict_literal_empty" : "{}",
"dict_literal_of_str_tensor" :
"{\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)}",
"dict_literal_of_str_int" :
"{\"foo\" : 1, \"bar\" : 2}",
"dict_literal_of_mixed" :
"{\"foo\" : torch.arange(3), \"bar\" : 2}",
"dict_comprehension_of_str_tensor" :
"{x : torch.add(y, 1) for x, y in \
zip([\"foo\", \"bar\"], [torch.arange(3), torch.arange(5)])}",
"dict_comprehension_of_str_int" :
"{x : torch.add(y, 1) for x, y in \
zip([\"foo\", \"bar\"], [1, 2]}",
"dict_comprehension_of_mixed" :
"{x : torch.add(y, 1) for x, y in \
zip([\"foo\", \"bar\"], [torch.arange(3), 2])}",
"dict_keyword" :
"dict(foo=torch.arange(3), baz=torch.arange(5))",
"dict_keyword_with_iterable" :
"dict([(\"foo\", torch.arange(3)), (\"bar\", torch.arange(5))])",
"dict_keyword_with_empty_iterable" :
"dict([])",
"dict_keyword_with_internal_aggregate_function" :
"dict(zip([\"foo\", \"bar\"], [torch.arange(3), torch.arange(5)])",
"dict_keyword_with_mapping" :
"dict({\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)})",
"dict_keyword_with_mapping_and_kwargs" :
"dict({\"foo\" : torch.arange(3), \"bar\" : torch.arange(5)}, baz=torch.arange(7))",
}
"""
Union[Dict[str, torch.Tensor], Dict[str, int]]
"""
self._assert_raises(template,
"Union[List[str], List[torch.Tensor]]",
lhs["dict_literal_empty"],
"Expected an Union type annotation with an "
"inner Dict type")
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_literal_of_str_tensor"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_literal_of_str_int"])
self._assert_raises(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_literal_of_mixed"],
"none of those dict types can hold the "
"types of the given keys and values")
# TODO: String frontend does not support tuple unpacking
# https://github.com/pytorch/pytorch/issues/64096
# self._assert_passes(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]",
# lhs["dict_comprehension_of_str_tensor"])
# self._assert_passes(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]",
# lhs["dict_comprehension_of_str_int"])
# self._assert_raises(template, "Union[Dict[str, torch.Tensor], Dict[str, int]]",
# lhs["dict_comprehension_of_mixed"],
# "foobar")
# self._assert_passes(template,
# "Union[Dict[str, torch.Tensor], Dict[str, int]]",
# lhs["dict_keyword_with_internal_aggregate_function"])
# TODO(@ansley): Follow-up project needed for full type
# inference with dict keyword (supported for dict comprehension
# and dict literal already; should not be a blocker for anyone)
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_keyword"],
"full type inference is not yet supported")
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_keyword_with_iterable"],
"full type inference is not yet supported")
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_keyword_with_empty_iterable"],
"full type inference is not yet supported")
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_keyword_with_mapping"],
"full type inference is not yet supported")
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], Dict[str, int]]",
lhs["dict_keyword_with_mapping_and_kwargs"],
"full type inference is not yet supported")
"""
Union[int, torch.Tensor]
"""
self._assert_raises(template,
"Union[int, torch.Tensor]",
lhs["dict_literal_empty"],
"Expected an Union type annotation with "
"an inner Dict type")
self._assert_raises(template,
"Union[int, torch.Tensor]",
lhs["dict_literal_of_str_tensor"],
"Expected an Union type annotation with "
"an inner Dict type")
# See above--string frontend does not support tuple unpacking
# self._assert_raises(template, "Union[int, torch.Tensor]",
# lhs["dict_comprehension_of_tensor"],
# "foobar")
"""
Union[Dict[str, torch.Tensor], int]
"""
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_literal_empty"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_literal_of_str_tensor"])
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_literal_of_str_int"],
"Type annotation was inferred to be "
r"`Dict\[str, Tensor\]`, but the type of "
"values given by the dict literal is")
self._assert_raises(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_literal_of_mixed"],
"Type annotation was inferred to be "
r"`Dict\[str, Tensor\]`, but the type of "
"values given by the dict literal is")
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_keyword"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_keyword_with_iterable"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_keyword_with_empty_iterable"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_keyword_with_mapping"])
self._assert_passes(template,
"Union[Dict[str, torch.Tensor], int]",
lhs["dict_keyword_with_mapping_and_kwargs"])
# See above--string frontend does not support tuple unpacking
# self._assert_passes(template,
# "Union[Dict[str, torch.Tensor], int]",
# lhs["dict_keyword_with_internal_aggregate_function"])
#
# self._assert_passes(template,
# "Union[Dict[str, torch.Tensor], int]",
# lhs["dict_comprehension_of_str_tensor"])
# self._assert_raises(template,
# "Union[Dict[str, torch.Tensor], int]",
# lhs["dict_comprehension_of_str_int"],
# "foobar")
# self._assert_raises(template,
# "Union[Dict[str, torch.Tensor], int]",
# lhs["dict_comprehension_of_mixed"],
# "foobar")
|
pytorch-master
|
test/jit/test_union.py
|
# Owner(s): ["oncall: jit"]
from typing import List, Any
import torch
import torch.nn as nn
import os
import sys
from torch import Tensor
from torch.testing._internal.jit_utils import JitTestCase, make_global
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class OrigModule(nn.Module):
def __init__(self):
super(OrigModule, self).__init__()
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 + inp2 + 1
def two(self, input: Tensor) -> Tensor:
return input + 2
def forward(self, input: Tensor) -> Tensor:
return input + self.one(input, input) + 1
class NewModule(nn.Module):
def __init__(self):
super(NewModule, self).__init__()
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
class TestModuleInterface(JitTestCase):
def test_not_submodule_interface_call(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
class TestNotModuleInterfaceCall(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestNotModuleInterfaceCall, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.two(input)
with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "self.proxy_mod.two"):
torch.jit.script(TestNotModuleInterfaceCall())
def test_module_interface(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
def forward(self, x: Tensor) -> Tensor:
pass
@torch.jit.interface
class OneTwoClass(object):
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
class FooMod(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x + y
def two(self, x: Tensor) -> Tensor:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return self.one(self.two(x), x)
class BarMod(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x * y
def two(self, x: Tensor) -> Tensor:
return 2 / x
def forward(self, x: Tensor) -> Tensor:
return self.two(self.one(x, x))
@torch.jit.export
def forward2(self, x: Tensor) -> Tensor:
return self.two(self.one(x, x)) + 1
make_global(OneTwoModule, OneTwoClass)
def use_module_interface(mod_list: List[OneTwoModule], x: torch.Tensor):
return mod_list[0].forward(x) + mod_list[1].forward(x)
def use_class_interface(mod_list: List[OneTwoClass], x: Tensor) -> Tensor:
return mod_list[0].two(x) + mod_list[1].one(x, x)
scripted_foo_mod = torch.jit.script(FooMod())
scripted_bar_mod = torch.jit.script(BarMod())
self.checkScript(use_module_interface,
([scripted_foo_mod, scripted_bar_mod], torch.rand(3, 4),))
self.checkScript(use_class_interface,
([scripted_foo_mod, scripted_bar_mod], torch.rand(3, 4),))
def call_module_interface_on_other_method(mod_interface: OneTwoModule, x: Tensor) -> Tensor:
return mod_interface.forward2(x)
# ensure error out when we call the module on the method other than the interface specified.
with self.assertRaisesRegexWithHighlight(RuntimeError, "object has no attribute or method", "mod_interface.forward2"):
self.checkScript(call_module_interface_on_other_method, (scripted_bar_mod, torch.rand(3, 4),))
def test_module_doc_string(self):
@torch.jit.interface
class TestInterface(nn.Module):
def one(self, inp1, inp2):
# type: (Tensor, Tensor) -> Tensor
pass
def forward(self, input):
# type: (Tensor) -> Tensor
r"""stuff 1"""
r"""stuff 2"""
pass
r"""stuff 3"""
class TestModule(nn.Module):
proxy_mod : TestInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input):
# type: (Tensor) -> Tensor
return self.proxy_mod.forward(input)
input = torch.randn(3, 4)
self.checkModule(TestModule(), (input,))
def test_module_interface_subtype(self):
@torch.jit.interface
class OneTwoModule(nn.Module):
def one(self, x: Tensor, y: Tensor) -> Tensor:
pass
def two(self, x: Tensor) -> Tensor:
pass
def forward(self, x: Tensor) -> Tensor:
pass
make_global(OneTwoModule)
@torch.jit.script
def as_module_interface(x: OneTwoModule) -> OneTwoModule:
return x
@torch.jit.script
class Foo(object):
def one(self, x: Tensor, y: Tensor) -> Tensor:
return x + y
def two(self, x: Tensor) -> Tensor:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return self.one(self.two(x), x)
# check class object is not a subtype of module interface
with self.assertRaisesRegex(RuntimeError, "ScriptModule class can be subtype of module interface"):
as_module_interface(Foo())
class WrongMod(nn.Module):
def two(self, x: int) -> int:
return 2 * x
def forward(self, x: Tensor) -> Tensor:
return x + torch.randn(3, self.two(3))
scripted_wrong_mod = torch.jit.script(WrongMod())
# wrong module that is not compatible with module interface
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
as_module_interface(scripted_wrong_mod)
# Check that interface implementations can be contravariant in argument types and covariant in return type.
@torch.jit.interface
class TensorToAny(nn.Module):
def forward(self, input: torch.Tensor) -> Any:
pass
make_global(TensorToAny)
@torch.jit.script
def as_tensor_to_any(x: TensorToAny) -> TensorToAny:
return x
@torch.jit.interface
class AnyToAny(nn.Module):
def forward(self, input: Any) -> Any:
pass
make_global(AnyToAny)
@torch.jit.script
def as_any_to_any(x: AnyToAny) -> AnyToAny:
return x
class TensorToAnyImplA(nn.Module):
def forward(self, input: Any) -> Any:
return input
class TensorToAnyImplB(nn.Module):
def forward(self, input: Any) -> torch.Tensor:
return torch.tensor([1])
class AnyToAnyImpl(nn.Module):
def forward(self, input: Any) -> torch.Tensor:
return torch.tensor([1])
as_tensor_to_any(torch.jit.script(TensorToAnyImplA()))
as_tensor_to_any(torch.jit.script(TensorToAnyImplB()))
as_any_to_any(torch.jit.script(AnyToAnyImpl()))
def test_module_interface_inheritance(self):
with self.assertRaisesRegex(RuntimeError, "does not support inheritance yet. Please directly"):
@torch.jit.interface
class InheritMod(nn.ReLU):
def three(self, x: Tensor) -> Tensor:
return 3 * x
def test_module_swap(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
scripted_mod = torch.jit.script(TestModule())
input = torch.randn(3, 4)
self.assertEqual(scripted_mod(input), 3 * input + 2)
# module swap with module that have the same interface
scripted_mod.proxy_mod = torch.jit.script(NewModule())
self.assertEqual(scripted_mod(input), input * (input + 1) + 1)
# module swap with non-scripted module should throw error
with self.assertRaisesRegex(RuntimeError, "a ScriptModule with non-scripted module"):
scripted_mod.proxy_mod = NewModule()
def test_module_swap_wrong_module(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class NewModuleWrong(nn.Module):
def __init__(self):
super(NewModuleWrong, self).__init__()
def forward(self, input: int) -> int:
return input + 1
class TestModule(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
scripted_mod = torch.jit.script(TestModule())
# module swap with in-compatible interface
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
scripted_mod.proxy_mod = torch.jit.script(NewModuleWrong())
def test_module_swap_no_lazy_compile(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
class NewModuleMethodNotLazyCompile(nn.Module):
def __init__(self):
super(NewModuleMethodNotLazyCompile, self).__init__()
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return input + 1
scripted_mod = torch.jit.script(TestModule())
# module swap with module that have the same interface, but the method not get
# lazily compiled from forward, user need to export it explicitly for swap to work
with self.assertRaisesRegex(RuntimeError, "is not compatible with interface"):
scripted_mod.proxy_mod = torch.jit.script(NewModuleMethodNotLazyCompile())
class NewModuleMethodManualExport(nn.Module):
def __init__(self):
super(NewModuleMethodManualExport, self).__init__()
@torch.jit.export
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
def forward(self, input: Tensor) -> Tensor:
return input + 1
scripted_mod.proxy_mod = torch.jit.script(NewModuleMethodManualExport())
input = torch.randn(3, 4)
self.assertEqual(scripted_mod(input), input + 1)
def test_module_swap_no_module_interface(self):
# test module swapping with no module interface
class TestNoModuleInterface(nn.Module):
def __init__(self):
super(TestNoModuleInterface, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod(input)
scripted_no_module_interface = torch.jit.script(TestNoModuleInterface())
# proxy mod is swapped with the new ScriptModule that share the same JIT type, should succeed.
scripted_no_module_interface.proxy_mod = torch.jit.script(OrigModule())
# proxy_mod is neither a module interface or have the same JIT type, should fail
with self.assertRaisesRegex(RuntimeError,
r"Expected a value of type '__torch__.jit.test_module_interface.OrigModule \(.*\)' " +
r"for field 'proxy_mod', but found '__torch__.jit.test_module_interface.NewModule \(.*\)'"):
scripted_no_module_interface.proxy_mod = torch.jit.script(NewModule())
def test_script_module_as_interface_swap(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
def forward(self, input: Tensor) -> Tensor:
pass
class OrigScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(OrigScriptModule, self).__init__()
@torch.jit.script_method
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 + inp2 + 1
@torch.jit.script_method
def forward(self, input: Tensor) -> Tensor:
return input + self.one(input, input) + 1
class NewScriptModule(torch.jit.ScriptModule):
def __init__(self):
super(NewScriptModule, self).__init__()
@torch.jit.script_method
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
return inp1 * inp2 + 1
@torch.jit.script_method
def forward(self, input: Tensor) -> Tensor:
return self.one(input, input + 1)
class TestNNModuleWithScriptModule(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestNNModuleWithScriptModule, self).__init__()
self.proxy_mod = OrigScriptModule()
def forward(self, input: Tensor) -> Tensor:
return self.proxy_mod.forward(input)
input = torch.randn(3, 4)
scripted_mod = torch.jit.script(TestNNModuleWithScriptModule())
self.assertEqual(scripted_mod(input), 3 * input + 2)
scripted_mod.proxy_mod = NewScriptModule()
self.assertEqual(scripted_mod(input), input * (input + 1) + 1)
# The call to forward of proxy_mod cannot be inlined. Making sure
# Freezing is throwing an error for now.
def test_freeze_module_with_interface(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.b = 20
def forward(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.a = 0
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> int:
pass
class TestModule(torch.nn.Module):
proxy_mod : ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule() # folded
def forward(self, x):
return self.proxy_mod(x) + self.sub(x)
m = torch.jit.script(TestModule())
m.eval()
mf = torch._C._freeze_module(m._c)
# Assume interface has no aliasing
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
input = torch.tensor([1])
out_s = m.forward(input)
out_f = mf.forward(input)
self.assertEqual(out_s, out_f)
def test_freeze_module_with_setattr_in_interface(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.b = 20
def forward(self, x):
self.b += 2
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.a = 0
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> int:
pass
class TestModule(torch.nn.Module):
proxy_mod : ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
return self.proxy_mod(x) + self.sub.getb(x)
m = torch.jit.script(TestModule())
m.proxy_mod = m.sub
m.eval()
with self.assertRaisesRegex(RuntimeError, "failed to freeze interface attribute 'proxy_mod'"):
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_inplace_mutation_in_interface(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
self.b[0] += 2
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod : ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
y = self.proxy_mod(x)
z = self.sub.getb(x)
return y[0] + z[0]
m = torch.jit.script(TestModule())
m.proxy_mod = m.sub
m.sub.b = m.proxy_mod.b
m.eval()
with self.assertRaisesRegex(RuntimeError, "failed to freeze interface attribute 'proxy_mod'"):
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_mutated_interface(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
return self.b
@torch.jit.export
def getb(self, x):
return self.b
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod : ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
self.proxy_mod = self.sub
y = self.proxy_mod(x)
z = self.sub.getb(x)
return y[0] + z[0]
m = torch.jit.script(TestModule())
m.eval()
with self.assertRaisesRegex(RuntimeError, "failed to freeze interface attribute 'proxy_mod'"):
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_freeze_module_with_interface_and_fork(self):
class SubModule(torch.nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.b = torch.tensor([1.5])
def forward(self, x):
self.b[0] += 3.2
return self.b
class OrigMod(torch.nn.Module):
def __init__(self):
super(OrigMod, self).__init__()
self.a = torch.tensor([0.5])
def forward(self, x):
return self.a
@torch.jit.interface
class ModInterface(torch.nn.Module):
def forward(self, x: Tensor) -> Tensor:
pass
class TestModule(torch.nn.Module):
proxy_mod : ModInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigMod()
self.sub = SubModule()
def forward(self, x):
y = self.proxy_mod(x)
z = self.sub(x)
return y + z
class MainModule(torch.nn.Module):
def __init__(self):
super(MainModule, self).__init__()
self.test = TestModule()
def forward(self, x):
fut = torch.jit._fork(self.test.forward, x)
y = self.test(x)
z = torch.jit._wait(fut)
return y + z
m = torch.jit.script(MainModule())
m.eval()
mf = torch._C._freeze_module(m._c, freezeInterfaces=True)
def test_module_apis_interface(self):
@torch.jit.interface
class ModuleInterface(nn.Module):
def one(self, inp1: Tensor, inp2: Tensor) -> Tensor:
pass
class TestModule(nn.Module):
proxy_mod : ModuleInterface
def __init__(self):
super(TestModule, self).__init__()
self.proxy_mod = OrigModule()
def forward(self, input):
return input * 2
@torch.jit.export
def method(self, input):
for module in self.modules():
input = module(input)
return input
with self.assertRaisesRegex(Exception, "Could not compile"):
scripted_mod = torch.jit.script(TestModule())
|
pytorch-master
|
test/jit/test_module_interface.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import sys
import torch
import zipfile
from torch.testing import FileCheck
from typing import Union
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestUpgraders(JitTestCase):
def _load_model_version(self, loaded_model):
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
zipped_model = zipfile.ZipFile(buffer)
# there was a change in how we store version number
# in a package between version 3 and 7.
# So we have to check for both.
try:
version = int(zipped_model.read('archive/version').decode("utf-8"))
return version
except KeyError:
version = int(zipped_model.read('archive/.data/version').decode("utf-8"))
return version
# TODO (tugsuu) We should ideally be generating this test cases.
def test_populated_upgrader_graph(self):
@torch.jit.script
def f():
return 0
buffer = io.BytesIO()
torch.jit.save(f, buffer)
buffer.seek(0)
torch.jit.load(buffer)
upgraders_size = torch._C._get_upgraders_map_size()
upgraders_dump = torch._C._dump_upgraders_map()
# make sure we only populate the upgrader map only once
# so we load it again and make sure the upgrader map has
# same content
buffer.seek(0)
torch.jit.load(buffer)
upgraders_size_second_time = torch._C._get_upgraders_map_size()
upgraders_dump_second_time = torch._C._dump_upgraders_map()
self.assertTrue(upgraders_size == upgraders_size_second_time)
self.assertTrue(upgraders_dump == upgraders_dump_second_time)
def test_add_value_to_version_map(self):
map_before_test = torch._C._get_operator_version_map()
upgrader_bumped_version = 3
upgrader_name = "_test_serialization_subcmul_0_2"
upgrader_schema = "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=2) -> Tensor"
dummy_entry = torch._C._UpgraderEntry(upgrader_bumped_version, upgrader_name, upgrader_schema)
torch._C._test_only_add_entry_to_op_version_map("aten::_test_serialization_subcmul", dummy_entry)
map_after_test = torch._C._get_operator_version_map()
self.assertTrue("aten::_test_serialization_subcmul" in map_after_test)
self.assertTrue(len(map_after_test) - len(map_before_test) == 1)
torch._C._test_only_remove_entry_to_op_version_map("aten::_test_serialization_subcmul")
map_after_remove_test = torch._C._get_operator_version_map()
self.assertTrue("aten::_test_serialization_subcmul" not in map_after_remove_test)
self.assertEqual(len(map_after_remove_test), len(map_before_test))
def test_populated_test_upgrader_graph(self):
@torch.jit.script
def f():
return 0
buffer = io.BytesIO()
torch.jit.save(f, buffer)
buffer.seek(0)
torch.jit.load(buffer)
# upgrader map should have populated now
upgraders_size = torch._C._get_upgraders_map_size()
test_map = {"a": str(torch._C.Graph()), "c": str(torch._C.Graph())}
torch._C._test_only_populate_upgraders(test_map)
upgraders_size_after_test = torch._C._get_upgraders_map_size()
self.assertEqual(upgraders_size_after_test - upgraders_size, 2)
upgraders_dump = torch._C._dump_upgraders_map()
self.assertTrue("a" in upgraders_dump)
self.assertTrue("c" in upgraders_dump)
torch._C._test_only_remove_upgraders(test_map)
upgraders_size_after_remove_test = torch._C._get_upgraders_map_size()
self.assertTrue(upgraders_size_after_remove_test == upgraders_size)
upgraders_dump_after_remove_test = torch._C._dump_upgraders_map()
self.assertTrue("a" not in upgraders_dump_after_remove_test)
self.assertTrue("c" not in upgraders_dump_after_remove_test)
def test_aten_div_tensor_at_3(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_div_tensor_v3.pt"
loaded_model = torch.jit.load(model_path)
# there are 3 aten::div in this model
# And the upgrader for aten::div uses two
# div's because of if/else branch
FileCheck().check("prim::If").run(loaded_model.graph)
FileCheck().check_count("aten::div", 6).run(loaded_model.graph)
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 4)
loaded_model_twice = torch.jit.load(buffer)
# we check by its code because graph variable names
# can be different every time
self.assertEqual(loaded_model.code, loaded_model_twice.code)
def test_aten_full_other_variants(self):
def test_func():
a = torch.full([4, 5, 6], 4, names=["a", "b", "c"], dtype=torch.int64)
return a
scripted_func = torch.jit.script(test_func)
buffer = io.BytesIO()
torch.jit.save(scripted_func, buffer)
current_flag_value = torch._C._get_version_calculator_flag()
# calculate based on old version
torch._C._calculate_package_version_based_on_upgraders(False)
buffer.seek(0)
loaded_func = torch.jit.load(buffer)
version = self._load_model_version(loaded_func)
self.assertTrue(version == 5)
# calculate based on new version
torch._C._calculate_package_version_based_on_upgraders(True)
buffer.seek(0)
loaded_func = torch.jit.load(buffer)
version = self._load_model_version(loaded_func)
self.assertTrue(version == 5)
# make sure we preserve old behaviou
torch._C._calculate_package_version_based_on_upgraders(current_flag_value)
def test_aten_linspace(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_v7.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for (a, b) in sample_inputs:
output_with_step, output_without_step = loaded_model(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 8)
def test_aten_linspace_out(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_linspace_out_v7.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64)),
(-10, 10, torch.empty((100,), dtype=torch.int64)),
(4.0, 6.0, torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64)),
)
for (a, b, c) in sample_inputs:
output = loaded_model(a, b, c)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 8)
def test_aten_logspace(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_v8.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = ((3, 10), (-10, 10), (4.0, 6.0), (3 + 4j, 4 + 5j))
for (a, b) in sample_inputs:
output_with_step, output_without_step = loaded_model(a, b)
# when no step is given, should have used 100
self.assertTrue(output_without_step.size(dim=0) == 100)
self.assertTrue(output_with_step.size(dim=0) == 5)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 9)
def test_aten_logspace_out(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_logspace_out_v8.ptl"
loaded_model = torch.jit.load(model_path)
sample_inputs = (
(3, 10, torch.empty((100,), dtype=torch.int64)),
(-10, 10, torch.empty((100,), dtype=torch.int64)),
(4.0, 6.0, torch.empty((100,), dtype=torch.float64)),
(3 + 4j, 4 + 5j, torch.empty((100,), dtype=torch.complex64)),
)
for (a, b, c) in sample_inputs:
output = loaded_model(a, b, c)
# when no step is given, should have used 100
self.assertTrue(output.size(dim=0) == 100)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 9)
def test_aten_test_serialization(self):
model_path = pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt"
# add test version entry to the version map
upgrader_bumped_version = 3
upgrader_name = "_test_serialization_subcmul_0_2"
upgrader_schema = "aten::_test_serialization_subcmul(Tensor self, Tensor other, Scalar alpha=2) -> Tensor"
dummy_entry = torch._C._UpgraderEntry(upgrader_bumped_version, upgrader_name, upgrader_schema)
torch._C._test_only_add_entry_to_op_version_map("aten::_test_serialization_subcmul", dummy_entry)
# add test upgrader in the upgraders map
@torch.jit.script
def _test_serialization_subcmul_0_2(self: torch.Tensor, other: torch.Tensor, alpha: Union[int, float] = 2) -> torch.Tensor:
return other - (self * alpha)
torch._C._test_only_populate_upgraders({"_test_serialization_subcmul_0_2": str(_test_serialization_subcmul_0_2.graph)})
# test if the server is able to find the test upgraders and apply to IR
loaded_model = torch.jit.load(model_path)
FileCheck().check_count("aten::mul", 2).run(loaded_model.graph)
FileCheck().check_count("aten::sub", 2).run(loaded_model.graph)
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 3)
loaded_model_twice = torch.jit.load(buffer)
# we check by its' code because graph variable names
# can be different every time
self.assertEqual(loaded_model.code, loaded_model_twice.code)
torch._C._test_only_remove_entry_to_op_version_map("aten::_test_serialization_subcmul")
torch._C._test_only_remove_upgraders({"_test_serialization_subcmul_0_2": str(_test_serialization_subcmul_0_2.graph)})
def test_aten_div_scalar_at_3(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_div_scalar_float_v3.pt"
loaded_model = torch.jit.load(model_path)
FileCheck().check("prim::If").run(loaded_model.graph)
FileCheck().check_count("aten::div", 2).run(loaded_model.graph)
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
version = self._load_model_version(loaded_model)
self.assertEqual(version, 4)
loaded_model_twice = torch.jit.load(buffer)
self.assertEqual(loaded_model(torch.Tensor([5.0, 3.0]), 2.0),
loaded_model_twice(torch.Tensor([5.0, 3.0]), 2.0))
def test_aten_div_tensor_out_at_3(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_div_tensor_out_v3.pt"
loaded_model = torch.jit.load(model_path)
FileCheck().check("prim::If").run(loaded_model.graph)
FileCheck().check_count("aten::div", 2).run(loaded_model.graph)
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 4)
loaded_model_twice = torch.jit.load(buffer)
# we check by its' code because graph variable names
# can be different every time
self.assertEqual(loaded_model.code, loaded_model_twice.code)
def test_aten_full_at_4(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_full_integer_value_v4.pt"
loaded_model = torch.jit.load(model_path)
FileCheck().check_count("aten::Float", 1).run(loaded_model.graph)
FileCheck().check_count("aten::full", 2).run(loaded_model.graph)
buffer = io.BytesIO()
torch.jit.save(loaded_model, buffer)
buffer.seek(0)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 5)
loaded_model_twice = torch.jit.load(buffer)
# we check by its' code because graph variable names
# can be different every time
self.assertEqual(loaded_model.code, loaded_model_twice.code)
def test_aten_full_out_at_4(self):
model_path = pytorch_test_dir + "/jit/fixtures/test_versioned_full_preserved_v4.pt"
loaded_model = torch.jit.load(model_path)
FileCheck().check_count("aten::full", 5).run(loaded_model.graph)
version = self._load_model_version(loaded_model)
self.assertTrue(version == 5)
|
pytorch-master
|
test/jit/test_upgraders.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
# Tests for torch.jit.isinstance
class TestIValue(JitTestCase):
def test_qscheme_ivalue(self):
def qscheme(x: torch.Tensor):
return x.qscheme()
x = torch.rand(2, 2)
self.checkScript(qscheme, (x,))
|
pytorch-master
|
test/jit/test_ivalue.py
|
# Owner(s): ["oncall: jit"]
# flake8: noqa
from dataclasses import dataclass, field, InitVar
from hypothesis import given, settings, strategies as st
from torch.testing._internal.jit_utils import JitTestCase
from typing import List, Optional
import sys
import torch
import unittest
from enum import Enum
# Example jittable dataclass
@dataclass(order=True)
class Point:
x: float
y: float
norm: Optional[torch.Tensor] = None
def __post_init__(self):
self.norm = (torch.tensor(self.x) ** 2 + torch.tensor(self.y) ** 2) ** 0.5
class MixupScheme(Enum):
INPUT = ["input"]
MANIFOLD = [
"input",
"before_fusion_projection",
"after_fusion_projection",
"after_classifier_projection",
]
@dataclass
class MixupParams:
def __init__(self, alpha: float = 0.125, scheme: MixupScheme = MixupScheme.INPUT):
self.alpha = alpha
self.scheme = scheme
class MixupScheme2(Enum):
A = 1
B = 2
@dataclass
class MixupParams2:
def __init__(self, alpha: float = 0.125, scheme: MixupScheme2 = MixupScheme2.A):
self.alpha = alpha
self.scheme = scheme
@dataclass
class MixupParams3:
def __init__(self, alpha: float = 0.125, scheme: MixupScheme2 = MixupScheme2.A):
self.alpha = alpha
self.scheme = scheme
# Make sure the Meta internal tooling doesn't raise an overflow error
NonHugeFloats = st.floats(min_value=-1e4, max_value=1e4, allow_nan=False)
class TestDataclasses(JitTestCase):
@classmethod
def tearDownClass(cls):
torch._C._jit_clear_class_registry()
# We only support InitVar in JIT dataclasses for Python 3.8+ because it would be very hard
# to support without the `type` attribute on InitVar (see comment in _dataclass_impls.py).
@unittest.skipIf(sys.version_info < (3, 8), "InitVar not supported in Python < 3.8")
def test_init_vars(self):
@torch.jit.script
@dataclass(order=True)
class Point2:
x: float
y: float
norm_p: InitVar[int] = 2
norm: Optional[torch.Tensor] = None
def __post_init__(self, norm_p: int):
self.norm = (torch.tensor(self.x) ** norm_p + torch.tensor(self.y) ** norm_p) ** (1 / norm_p)
def fn(x: float, y: float, p: int):
pt = Point2(x, y, p)
return pt.norm
self.checkScript(fn, (1.0, 2.0, 3))
# Sort of tests both __post_init__ and optional fields
@settings(deadline=None)
@given(NonHugeFloats, NonHugeFloats)
def test__post_init__(self, x, y):
P = torch.jit.script(Point)
def fn(x: float, y: float):
pt = P(x, y)
return pt.norm
self.checkScript(fn, [x, y])
@settings(deadline=None)
@given(st.tuples(NonHugeFloats, NonHugeFloats), st.tuples(NonHugeFloats, NonHugeFloats))
def test_comparators(self, pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
P = torch.jit.script(Point)
def compare(x1: float, y1: float, x2: float, y2: float):
pt1 = P(x1, y1)
pt2 = P(x2, y2)
return (
pt1 == pt2,
# pt1 != pt2, # TODO: Modify interpreter to auto-resolve (a != b) to not (a == b) when there's no __ne__
pt1 < pt2,
pt1 <= pt2,
pt1 > pt2,
pt1 >= pt2,
)
self.checkScript(compare, [x1, y1, x2, y2])
def test_default_factories(self):
@dataclass
class Foo(object):
x: List[int] = field(default_factory=list)
with self.assertRaises(NotImplementedError):
torch.jit.script(Foo)
def fn():
foo = Foo()
return foo.x
torch.jit.script(fn)()
# The user should be able to write their own __eq__ implementation
# without us overriding it.
def test_custom__eq__(self):
@torch.jit.script
@dataclass
class CustomEq:
a: int
b: int
def __eq__(self, other: 'CustomEq') -> bool:
return self.a == other.a # ignore the b field
def fn(a: int, b1: int, b2: int):
pt1 = CustomEq(a, b1)
pt2 = CustomEq(a, b2)
return pt1 == pt2
self.checkScript(fn, [1, 2, 3])
def test_no_source(self):
with self.assertRaises(RuntimeError):
# uses list in Enum is not supported
torch.jit.script(MixupParams)
torch.jit.script(MixupParams2) # don't throw
def test_use_unregistered_dataclass_raises(self):
def f(a: MixupParams3):
return 0
with self.assertRaises(OSError):
torch.jit.script(f)
|
pytorch-master
|
test/jit/test_dataclasses.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
import torch
# as with test_jit tests, requires global dtype set
torch.set_default_dtype(torch.double)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, enable_profiling_mode
from torch.testing._internal.jit_metaprogramming_utils import try_get_nn_module_compiled_mod_and_inputs, \
get_nn_mod_test_name, get_all_nn_module_tests, nn_functional_tests, get_nn_functional_compiled_fn_and_inputs
from torch.testing._internal.common_utils import run_tests, suppress_warnings, IS_FBCODE
def num_ifs_loops(graph):
graph_str = str(graph)
# only look at body of graph
graph_body = graph_str[0:graph_str.find("return")]
return graph_body.count("prim::Loop") + graph_body.count("prim::If")
def num_non_tensor_nodes(block):
num_non_tensor = 0
for node in block.nodes():
kind = node.kind()
# GetAttr don't provide useful signal here, since they are non-optimizable except with freezing
# Constant is not executed, bailouts should be a separate tests, don't provide useful signal here
if kind == "prim::Constant" or "prim::Bailout" in kind or "GetAttr" in kind:
continue
for b in node.blocks():
num_non_tensor += num_non_tensor_nodes(b)
tensor_out = False
for out in node.outputs():
if "Tensor" in str(out.type()):
tensor_out = True
break
num_non_tensor += int(not tensor_out)
return num_non_tensor
class TestComplexity(JitTestCase):
def setUp(self):
super(TestComplexity, self).setUp()
self.grad_enabled = torch.is_grad_enabled()
torch.set_grad_enabled(False)
def tearDown(self):
super(TestComplexity, self).tearDown()
torch.set_grad_enabled(self.grad_enabled)
@suppress_warnings
def test_generated_functional_tests(self):
with enable_profiling_mode():
stats = [("Name", "Ifs/Loops", "non-tensor ops")]
for test in nn_functional_tests:
test_name = test[0]
fn, inputs = get_nn_functional_compiled_fn_and_inputs(*test)
for _ in range(6):
fn(*inputs)
g = torch.jit.last_executed_optimized_graph()
stats.append((test_name, num_ifs_loops(g), num_non_tensor_nodes(g)))
for line in stats:
print(line)
@suppress_warnings
@unittest.skipIf(IS_FBCODE, "Causes a RecursionError in fbcode")
def test_nn_module_tests(self):
with enable_profiling_mode():
stats = [("Name", "Ifs/Loops", "non-tensor ops")]
for test in get_all_nn_module_tests():
out = try_get_nn_module_compiled_mod_and_inputs(**test)
if not out:
continue
mod, inputs = out
test_name = get_nn_mod_test_name(**test)
for _ in range(6):
mod(*inputs)
g = torch.jit.last_executed_optimized_graph()
stats.append((test_name, num_ifs_loops(g), num_non_tensor_nodes(g)))
for line in stats:
print(line)
if __name__ == '__main__':
run_tests()
|
pytorch-master
|
test/jit/test_complexity.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.common_utils import TestCase
class TestAtenPow(TestCase):
def test_aten_pow_zero_negative_exponent(self):
'''
1. Testing a = int, b = int
'''
@torch.jit.script
def fn_int_int(a: int, b: int):
return a ** b
# Existing correct behaviors of aten::pow
self.assertEqual(fn_int_int(2, 1), 2 ** 1)
self.assertEqual(fn_int_int(2, 0), 2 ** 0)
self.assertEqual(fn_int_int(2, -2), 2 ** (-2))
self.assertEqual(fn_int_int(-2, 2), (-2) ** 2)
self.assertEqual(fn_int_int(-2, 0), (-2) ** 0)
self.assertEqual(fn_int_int(-2, -2), (-2) ** (-2))
self.assertEqual(fn_int_int(-2, -1), (-2) ** (-1))
self.assertEqual(fn_int_int(0, 2), 0 ** 1)
self.assertEqual(fn_int_int(0, 0), 0 ** 0)
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_int_int, 0, -2)
'''
2. Testing a = int, b = float
'''
@torch.jit.script
def fn_int_float(a: int, b: float):
return a ** b
# Existing correct behaviors of aten::pow
self.assertEqual(fn_int_float(2, 2.5), 2 ** 2.5)
self.assertEqual(fn_int_float(2, -2.5), 2 ** (-2.5))
self.assertEqual(fn_int_float(2, -0.0), 2 ** (-0.0))
self.assertEqual(fn_int_float(2, 0.0), 2 ** (0.0))
self.assertEqual(fn_int_float(-2, 2.0), (-2) ** 2.0)
self.assertEqual(fn_int_float(-2, -2.0), (-2) ** (-2.0))
self.assertEqual(fn_int_float(-2, -3.0), (-2) ** (-3.0))
self.assertEqual(fn_int_float(-2, -0.0), (-2) ** (-0.0))
self.assertEqual(fn_int_float(-2, 0.0), (-2) ** (0.0))
self.assertEqual(fn_int_float(0, 2.0), 0 ** 2.0)
self.assertEqual(fn_int_float(0, 0.5), 0 ** 0.5)
self.assertEqual(fn_int_float(0, 0.0), 0 ** 0.0)
self.assertEqual(fn_int_float(0, -0.0), 0 ** (-0.0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_int_float, 0, -2.5)
'''
3. Testing a = float, b = int
'''
@torch.jit.script
def fn_float_int(a: float, b: int):
return a ** b
# Existing correct behaviors of aten::pow
self.assertEqual(fn_float_int(2.5, 2), 2.5 ** 2)
self.assertEqual(fn_float_int(2.5, -2), 2.5 ** (-2))
self.assertEqual(fn_float_int(2.5, -0), 2.5 ** (-0))
self.assertEqual(fn_float_int(2.5, 0), 2.5 ** 0)
self.assertEqual(fn_float_int(-2.5, 2), 2.5 ** 2)
self.assertEqual(fn_float_int(-2.5, -2), (-2.5) ** (-2))
self.assertEqual(fn_float_int(-2.5, -3), (-2.5) ** (-3))
self.assertEqual(fn_float_int(-2.5, -0), (-2.5) ** (-0))
self.assertEqual(fn_float_int(-2.5, 0), (-2.5) ** 0)
self.assertEqual(fn_float_int(0.0, 2), 0 ** 2)
self.assertEqual(fn_float_int(0.0, 0), 0 ** 0)
self.assertEqual(fn_float_int(0.0, -0), 0 ** (-0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_float_int, 0.0, -2)
'''
4. Testing a = float, b = float
'''
@torch.jit.script
def fn_float_float(a: float, b: float):
return a ** b
# Existing correct behaviors of aten::pow
self.assertEqual(fn_float_float(2.5, 2.0), 2.5 ** 2.0)
self.assertEqual(fn_float_float(2.5, -2.0), 2.5 ** (-2.0))
self.assertEqual(fn_float_float(2.5, -0.0), 2.5 ** (-0.0))
self.assertEqual(fn_float_float(2.5, 0.0), 2.5 ** 0.0)
self.assertEqual(fn_float_float(-2.5, 2.0), 2.5 ** 2.0)
self.assertEqual(fn_float_float(-2.5, -2.0), (-2.5) ** (-2.0))
self.assertEqual(fn_float_float(-2.5, -3.0), (-2.5) ** (-3.0))
self.assertEqual(fn_float_float(-2.5, -0.0), (-2.5) ** (-0.0))
self.assertEqual(fn_float_float(-2.5, 0.0), (-2.5) ** 0.0)
self.assertEqual(fn_float_float(0.0, 2.0), 0.0 ** 2.0)
self.assertEqual(fn_float_float(0.0, 0.0), 0.0 ** 0.0)
self.assertEqual(fn_float_float(0.0, -0.0), 0.0 ** (-0.0))
# zero base and negative exponent case that should trigger RunTimeError
self.assertRaises(RuntimeError, fn_float_float, 0.0, -2.0)
|
pytorch-master
|
test/jit/test_aten_pow.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
from typing import Tuple
import torch
from jit.test_hooks_modules import (
ModuleDirectforwardSubmodCall, ModuleForwardSingleInput,
ModuleForwardTupleInput, create_forward_tuple_input,
create_module_forward_multiple_inputs, create_module_forward_single_input,
create_module_hook_return_nothing,
create_module_multiple_hooks_multiple_inputs,
create_module_multiple_hooks_single_input, create_module_no_forward_input,
create_module_same_hook_repeated, create_submodule_forward_multiple_inputs,
create_submodule_forward_single_input,
create_submodule_forward_single_input_return_not_tupled,
create_submodule_hook_return_nothing,
create_submodule_multiple_hooks_multiple_inputs,
create_submodule_multiple_hooks_single_input,
create_submodule_no_forward_input, create_submodule_same_hook_repeated,
create_submodule_to_call_directly_with_hooks)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
# Tests for JIT forward hooks and pre-hooks
class TestHooks(JitTestCase):
def test_module_no_forward_input(self):
self.checkModule(create_module_no_forward_input(), ())
def test_submodule_no_forward_input(self):
self.checkModule(create_submodule_no_forward_input(), ())
def test_module_forward_multiple_inputs(self):
self.checkModule(
create_module_forward_multiple_inputs(), (["a"], "no_pre_hook")
)
def test_module_multiple_hooks_multiple_inputs(self):
self.checkModule(
create_module_multiple_hooks_multiple_inputs(), (["a"], "no_pre_hook")
)
def test_module_forward_single_input(self):
self.checkModule(create_module_forward_single_input(), ("a",))
def test_module_same_hook_repeated(self):
self.checkModule(create_module_same_hook_repeated(), ("a",))
def test_module_hook_return_nothing(self):
self.checkModule(create_module_hook_return_nothing(), ("a",))
def test_module_multiple_hooks_single_input(self):
self.checkModule(create_module_multiple_hooks_single_input(), ("a",))
def test_submodule_forward_multiple_inputs(self):
self.checkModule(
create_submodule_forward_multiple_inputs(), (["a"], "no_pre_hook")
)
def test_submodule_multiple_hooks_multiple_inputs(self):
self.checkModule(
create_submodule_multiple_hooks_multiple_inputs(), (["a"], "no_pre_hook"),
)
def test_submodule_forward_single_input(self):
self.checkModule(create_submodule_forward_single_input(), ("a",))
def test_submodule_called_directly_with_hooks(self):
module = create_submodule_to_call_directly_with_hooks()
module_scripted = torch.jit.script(module)
submodule = module.submodule
scripted_submodule = module_scripted.submodule
self.assertEqual(submodule("a"), scripted_submodule("a"))
def test_submodule_same_hook_repeated(self):
self.checkModule(create_submodule_same_hook_repeated(), ("a",))
def test_submodule_hook_return_nothing(self):
self.checkModule(create_submodule_hook_return_nothing(), ("a",))
def test_submodule_multiple_hooks_single_input(self):
self.checkModule(create_submodule_multiple_hooks_single_input(), (["a"]))
def test_forward_tuple_input(self):
self.checkModule(create_forward_tuple_input(), ((3,),))
def test_submodule_forward_single_input_return_not_tupled(self):
self.checkModule(
create_submodule_forward_single_input_return_not_tupled(), ("a",)
)
def test_hook_method_name_collision(self):
# Hooks can't have the same name as methods.
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def foo(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "inner_mod_name"
assert input[0] == "a_outermod"
return ("pre_hook_override_name",)
m.submodule.register_forward_pre_hook(foo)
with self.assertRaisesRegex(
RuntimeError,
"Can't define hook: foo on class: .+ "
"because a method or hook with that name already exists.",
):
torch.jit.script(m)
def test_hook_hook_name_collision(self):
# Test edge case of two hooks sharing name but not python definition
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the first hook"
m.submodule.register_forward_pre_hook(prehook)
def prehook(self, input: Tuple[str]) -> Tuple[str]:
return "This is the second hook"
m.submodule.register_forward_pre_hook(prehook)
with self.assertRaisesRegex(
RuntimeError,
"Pre-hook '.+' on .+ has at least two different python "
"definitions. Please use unique names for all hooks.",
):
torch.jit.script(m)
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def hook(self, input: Tuple[str], output: str):
return "This is the first hook"
m.submodule.register_forward_hook(hook)
def hook(self, input: Tuple[str]):
return "This is the second hook"
m.submodule.register_forward_hook(hook)
with self.assertRaisesRegex(
RuntimeError,
"Hook '.+' on .+ has at least two different python "
"definitions. Please use unique names for all hooks.",
):
torch.jit.script(m)
def test_module_direct_forward_invocation(self):
# Test that hooks are only invoked when the module is
# called directly and not when forward is called.
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
return ("pre_hook_override_name",)
def forward_hook(self, input: Tuple[str], output: str):
assert self.name == "outer_mod_name"
assert input == ("pre_hook_override_name",)
output = output + "_fh"
return output
m.register_forward_pre_hook(pre_hook)
m.register_forward_hook(forward_hook)
m_scripted = torch.jit.script(m)
self.assertEqual(m.forward("a"), m_scripted.forward("a"))
self.assertNotEqual(m_scripted("a"), m_scripted.forward("a"))
def test_submodule_direct_forward_invocation(self):
m_submod_forward_call = ModuleDirectforwardSubmodCall(
"outer_mod_name", "inner_mod_name"
)
m_submod_call = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
return ("pre_hook_override_name",)
def forward_hook(self, input: Tuple[str], output: str):
assert input == ("pre_hook_override_name",)
return output + "_fh"
m_submod_forward_call.submodule.register_forward_pre_hook(pre_hook)
m_submod_forward_call.submodule.register_forward_hook(forward_hook)
m_submod_call.submodule.register_forward_pre_hook(pre_hook)
m_submod_call.submodule.register_forward_hook(forward_hook)
m_submod_forward_call_scripted = torch.jit.script(m_submod_forward_call)
m_submod_call_scripted = torch.jit.script(m_submod_call)
self.assertEqual(
m_submod_forward_call_scripted("a"), m_submod_forward_call("a")
)
self.assertNotEqual(
m_submod_forward_call_scripted("a"), m_submod_call_scripted("a")
)
# TODO: add this test back once figured out how to print error msg
@unittest.skip
def test_hook_compilation_hint(self):
# Tests if hook error message is printed out if erroring after schema check.
# Useful for when user is scripting hooks while not aware of it.
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
def pre_hook(self, input: Tuple[str]) -> Tuple[str]:
assert self.name == "outer_mod_name"
assert input[4] == "a" # out of bounds tuple range
return ("pre_hook_override_name",)
m.register_forward_pre_hook(pre_hook)
with self.assertRaisesRegex(
RuntimeError,
"This error occured while scripting the forward pre-hook 'pre_hook'",
):
torch.jit.script(m)
def test_wrong_pre_hook_signatures(self):
# correct signature: pre_hook_c(self, input: Tuple[str])
def pre_hook_wrong_input1(self, input: Tuple[None]) -> Tuple[str]:
return ("hello",)
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_wrong_input1)
with self.assertRaisesRegex(
RuntimeError, "has the wrong inner types for the input tuple argument",
):
torch.jit.script(m)
def pre_hook_wrong_input2(self, input: Tuple[str], input2: str) -> Tuple[str]:
return ("hello",)
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_wrong_input2)
with self.assertRaisesRegex(
RuntimeError,
"was expected to only have exactly 2 inputs but it had 3 inputs",
):
torch.jit.script(m)
def pre_hook_wrong_input3(self, input: int) -> Tuple[str]:
return ("hello",)
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_wrong_input3)
with self.assertRaisesRegex(
RuntimeError,
"expected the input argument to be typed as a Tuple but"
" found type: 'int' instead",
):
torch.jit.script(m)
def pre_hook_wrong_output(self, input: Tuple[str]) -> int:
return 1 # expecting Tuple[str], str, or None
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_wrong_output)
with self.assertRaisesRegex(
RuntimeError, "returned the wrong type of: 'int'",
):
torch.jit.script(m)
def pre_hook_no_output_annotation(self, input: Tuple[str]):
return 1 # expecting Tuple[str], str, or None
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_no_output_annotation)
with self.assertRaisesRegex(
RuntimeError,
"is missing a return annotation. Return annotations"
" are required, please add one.",
):
torch.jit.script(m)
def pre_hook_wrong_tuple_return(self, input: Tuple[Tuple[int]]) -> Tuple[int]:
return (11,) # doesn't work with eager, inner tuple lost
m = ModuleForwardTupleInput("outer_mod_name", "inner_mod_name")
m.register_forward_pre_hook(pre_hook_wrong_tuple_return)
with self.assertRaisesRegex(
RuntimeError,
"When forward has a single tuple input argument, "
"the return needs to be 'None' or a nested tuple containing "
r"forward's input tuple argument as in: 'Tuple\[Tuple\[int\]\]'",
):
torch.jit.script(m)
def test_wrong_hook_signatures(self):
# correct signature:
# def forward_hook(self, input: Tuple[str], output: str)
def forward_hook_wrong_input1(self, input: Tuple[str, str], output: str):
return output
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_hook(forward_hook_wrong_input1)
with self.assertRaisesRegex(
RuntimeError,
"has the wrong number of contained types for the "
r"input argument's Tuple. Received type: 'Tuple\[str, str\]'",
):
torch.jit.script(m)
def forward_hook_wrong_input2(self, input: str, output: str):
return output
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_hook(forward_hook_wrong_input2)
with self.assertRaisesRegex(
RuntimeError,
"expected the input argument to be typed as a Tuple "
"but found type: 'str' instead.",
):
torch.jit.script(m)
def forward_hook_wrong_input3(self, input: Tuple[None], output: str):
return output
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_hook(forward_hook_wrong_input3)
with self.assertRaisesRegex(
RuntimeError,
"has the wrong inner types for the input tuple"
r" argument. Received type: 'Tuple\[NoneType\]'",
):
torch.jit.script(m)
def forward_hook_wrong_output(self, input: Tuple[str], output: Tuple[str]):
return output
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_hook(forward_hook_wrong_output)
with self.assertRaisesRegex(
RuntimeError,
"has the wrong type for the output argument. Received"
r" type: 'Tuple\[str\]'. Expected type: 'str'",
):
torch.jit.script(m)
def forward_hook_correct(self, input: Tuple[str], output: str):
return (output,)
def forward_hook_wrong_output_from_prev_hook(
self, input: Tuple[str], output: str
):
return output
m = ModuleForwardSingleInput("outer_mod_name", "inner_mod_name")
m.register_forward_hook(forward_hook_correct)
m.register_forward_hook(forward_hook_wrong_output_from_prev_hook)
with self.assertRaisesRegex(
RuntimeError,
"has the wrong type for the output argument. "
r"Received type: 'str'. Expected type: 'Tuple\[str\]'",
):
torch.jit.script(m)
|
pytorch-master
|
test/jit/test_hooks.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, warmup_backward, FileCheck
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestProfiler(JitTestCase):
def setUp(self):
self.prev_exec = torch._C._jit_set_profiling_executor(True)
self.prev_profiling = torch._C._get_graph_executor_optimize(True)
self.inline_autodiff = torch._C._debug_set_autodiff_subgraph_inlining(False)
self.texpr_fuser_state = torch._C._jit_texpr_fuser_enabled()
self.can_fuse_on_cpu = torch._C._jit_can_fuse_on_cpu()
torch._C._jit_set_texpr_fuser_enabled(True)
torch._C._jit_override_can_fuse_on_cpu(True)
self.default_dtype = torch.get_default_dtype()
self.old_reduction_enabled = torch._C._jit_set_texpr_reductions_enabled(True)
torch.set_default_dtype(torch.double)
self.old_fusion_inlining = torch._C._debug_get_fusion_group_inlining()
torch._C._debug_set_fusion_group_inlining(False)
self.old_te_must_use_llvm_cpu = torch._C._jit_get_te_must_use_llvm_cpu()
torch._C._jit_set_te_must_use_llvm_cpu(False)
def tearDown(self):
torch._C._jit_set_profiling_executor(self.prev_exec)
torch._C._get_graph_executor_optimize(self.prev_profiling)
torch._C._debug_set_autodiff_subgraph_inlining(self.inline_autodiff)
torch._C._jit_set_texpr_fuser_enabled(self.texpr_fuser_state)
torch._C._jit_override_can_fuse_on_cpu(self.can_fuse_on_cpu)
torch.set_default_dtype(self.default_dtype)
torch._C._jit_set_texpr_reductions_enabled(self.old_reduction_enabled)
torch._C._debug_set_fusion_group_inlining(self.old_fusion_inlining)
torch._C._jit_set_te_must_use_llvm_cpu(self.old_te_must_use_llvm_cpu)
def test_tensor_type_not_determined_by_inputs(self):
@torch.jit.script
def scalar_type_input(x, y, z):
return x + y + 4 + z.item()
x = torch.tensor([2, 2])
scalar_type_input(x, x, torch.tensor(1))
scalar_type_input(x, x, torch.tensor(1))
scalar_type_input(x, x, torch.tensor(1.0))
g = torch.jit.last_executed_optimized_graph()
# item & add should not get pulled into the fusion group -
# we expect to see Fusion Group (item / add) Fusion Group in ir dump
FileCheck().check("TensorExpr").check("Scalar = aten::item").check_next("Tensor = aten::add").check("TensorExpr").run(g)
@torch.jit.script
def non_const_dtype(x, y, cond: bool):
dtype = torch.int16 if cond else torch.int32
return (x + y + 3).sum(dtype=dtype)
non_const_dtype(x, x, True)
non_const_dtype(x, x, True)
g = torch.jit.last_executed_optimized_graph()
# because dtype is non-const, sum should not get pulled into the Fusion Group
FileCheck().check("TensorExpr").check("TensorExpr").check_not("aten::sum").run(g)
def test_specialize_backward(self):
def test_fuse(a, b):
c = a * b
d = c * b
return d
test_fuse.__disable_jit_function_caching__ = True
scripted_f = torch.jit.script(test_fuse)
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
scripted_f(x, y)
b = scripted_f(x, y)
warmup_backward(b)
g = torch.jit.last_executed_optimized_graph()
# Backward has an if node guarding specializations,
# within the if node true block there is only one if node
# that guards a tensorexpr group
optimized_block = next(g.findNode("prim::If").blocks())
if_nodes = list(optimized_block.findAllNodes("prim::If"))
self.assertEqual(len(if_nodes), 1)
FileCheck().check("Group[Subgraph").run(str(if_nodes[0]))
# no broadcasts occurred, sum_to_size have been specialized out
self.assertIsNone(optimized_block.findNode("aten::_grad_sum_to_size"))
broadcast_f = torch.jit.script(test_fuse)
x = torch.ones([2, 2], requires_grad=True)
y = torch.ones([1], requires_grad=True)
broadcast_f(x, y)
b = broadcast_f(x, y)
b.backward(torch.ones([2, 2], dtype=torch.float), retain_graph=True)
b.backward(torch.ones([2, 2], dtype=torch.float))
# warmup_backward(b, torch.ones([2, 2], dtype=torch.float))
g = torch.jit.last_executed_optimized_graph()
optimized_block = next(g.findNode("prim::If").blocks())
# broadcasts occurred, currently expect to see aten::_grad_sum_to_size
self.assertIsNotNone(optimized_block.findNode("aten::_grad_sum_to_size"))
def test_specialized_types(self):
@torch.jit.script
def test_fuse(a, b):
c = a * b
d = c * b
return d
x = torch.tensor([.5])
for _ in range(3):
test_fuse(x, x)
g = torch.jit.last_executed_optimized_graph()
# Types should remain specialized for typecheck outputs & fusion outputs
FileCheck().check("Double(").check_same("prim::TypeCheck").check_same("\n").check("Double").check_same("TensorExpr").run(g)
# other outputs should not be specialized
FileCheck().check("Tensor = prim::If").run(g)
def test_aliasing_merge(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
d.add_(b)
e = d * b
return d + e
x = torch.ones(1)
y = torch.ones(1)
foo(x, y)
b = foo(x, y)
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(len(list(g.findAllNodes("prim::TypeCheck"))), 2)
FileCheck().check("TensorExpr").check("aten::add_").check("TensorExpr").run(g)
def test_use_not_profiled(self):
def foo(t1, t2, t3, t4, t: float):
h = t1 + t2 + t3 + t4
if t > 0.5:
# Putting a use of t1 in a never-executed conditional prevents
return t1 + 1
return h
t = torch.rand(8, dtype=torch.float)
foo_script = torch.jit.script(foo)
for _ in range(torch._C._jit_get_num_profiled_runs() + 1):
foo_script(t, t, t, t, 0.1)
self.assertEqual(foo(t, t, t, t, 0.1), foo_script(t, t, t, t, 0.1))
g = torch.jit.last_executed_optimized_graph()
# all adds fused
FileCheck().check("graph").check_not("aten::add").check("prim::If").run(g)
def test_not_fusing_scalar_ops(self):
@torch.jit.script
def foo(x: int, y: int):
return x + y + 2 + 4 + 5 + 6
foo(1, 2)
foo(2, 3)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_not("TensorExpr").run(g)
def test_not_optimizing_property(self):
@torch.jit.script
def foo(x, y):
return x + y + 1 + 2 + 3, x.size()
x = torch.ones(1)
foo(x, x)
foo(x, x)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("aten::size").run(g)
x = torch.ones([2, 3, 5])
self.assertEqual(foo(x, x), (x + x + 1 + 2 + 3, x.size()))
def test_fallback_graph_not_specialized(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
e = d * b
return d + e
x = torch.ones(1)
y = torch.ones(1)
foo(x, y)
foo(x, y)
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("CallFunction").check_next("Tensor = prim::TupleUnpack").run(g)
def test_autograd_fallback_graph(self):
@torch.jit.script
def foo(a, b):
c = a * b
d = c * b
e = d * b
return d + e
x = torch.ones(1, requires_grad=True)
y = torch.ones(1, requires_grad=True)
foo(x, y)
b = foo(x, y)
b.backward(torch.ones([1], dtype=torch.float), retain_graph=True)
b.backward(torch.ones([1], dtype=torch.float))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check("fallback_function").check_next("CallFunction").run(g)
def test_tensor_constant(self):
def foo(a, b):
return a + b + torch.tensor([2])
x = torch.ones(1, requires_grad=False)
foo_script = torch.jit.script(foo)
foo_script(x, x)
foo_script(x, x)
self.assertEqual(foo_script(x, x), foo(x, x))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count("aten::add", 2, exactly=True).run(g)
def test_local_fusion_strategy(self):
@torch.jit.script
def foo(x):
return x + x + x
torch.jit.set_fusion_strategy([("STATIC", 1)])
for _ in range(3):
foo(torch.rand([10]))
torch.jit.set_fusion_strategy([("STATIC", 10)])
for i in range(10):
foo(torch.rand([i]))
foo(torch.rand([i]))
g = torch.jit.last_executed_optimized_graph()
FileCheck().check_count(":TensorExprGroup", 2, exactly=True).run(g)
def test_iterative_fusion(self):
@torch.jit.script
def foo(a, b, c, d):
a = a + b
b.add_(3)
c = c + b + d
a = a + 1
return a, c
x = torch.ones(1, requires_grad=False)
foo(x, x, x, x)
foo(x, x, x, x)
# when we iterate through the block, we will start
# by fusing a = a + b with a = a + 1
# if we were to continue iteration from that fusion point,
# would miss the fusion opportunity of c = c + d + b
g = torch.jit.last_executed_optimized_graph()
self.assertEqual(len(list(g.findAllNodes("prim::TensorExprGroup"))), 2)
|
pytorch-master
|
test/jit/test_profiler.py
|
# Owner(s): ["oncall: jit"]
import io
import os
import pathlib
import sys
import unittest
from typing import NamedTuple, Optional
import torch
from torch import Tensor
from torch.testing._internal.common_utils import TemporaryFileName
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, clear_class_registry
ENABLE_FLATBUFFER = os.environ.get("ENABLE_FLATBUFFER", "0") == "1"
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestSaveLoad(JitTestCase):
def test_different_modules(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
return x
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_different_functions(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
def lol(x):
return x
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
def lol(x): # noqa: F811
return "hello"
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_different_interfaces(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
@torch.jit.interface
class MyInterface(object):
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface(object):
def __init__(self):
pass
def bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self):
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.bar(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
@torch.jit.interface
class MyInterface(object):
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface(object): # noqa: F811
def __init__(self):
pass
def not_bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self):
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.not_bar(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(torch.jit.script(Foo()), second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_many_collisions(self):
class MyCoolNamedTuple(NamedTuple):
a: int
@torch.jit.interface
class MyInterface(object):
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface(object):
def __init__(self):
pass
def bar(self, x):
return x
def lol(x):
return x
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self):
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
x = lol(x)
x = self.interface.bar(x)
return x, MyCoolNamedTuple(a=5)
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save(first_script_module, first_saved_module)
first_saved_module.seek(0)
clear_class_registry()
@torch.jit.interface
class MyInterface(object):
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface(object): # noqa: F811
def __init__(self):
pass
def not_bar(self, x):
return x
def lol(x): # noqa: F811
return "asdofij"
class MyCoolNamedTuple(NamedTuple): # noqa: F811
a: str
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self):
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
self.interface.not_bar(x)
x = lol(x)
return x, MyCoolNamedTuple(a="hello")
second_script_module = torch.jit.script(Foo())
second_saved_module = io.BytesIO()
torch.jit.save(second_script_module, second_saved_module)
second_saved_module.seek(0)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("second", torch.jit.load(second_saved_module))
self.add_module("first", torch.jit.load(first_saved_module))
def forward(self, x):
x, named_tuple_1 = self.first(x)
x, named_tuple_2 = self.second(x)
return len(x + named_tuple_2.a) + named_tuple_1.a
sm = torch.jit.script(ContainsBoth())
contains_both = io.BytesIO()
torch.jit.save(sm, contains_both)
contains_both.seek(0)
sm = torch.jit.load(contains_both)
def test_save_load_with_extra_files(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return a
# specifically test binary data
value = b"bar\x00\xffbaz"
expected_extra_files = {}
expected_extra_files["foo"] = value
# verify that str to bytes conversion also works
expected_extra_files["foo2"] = "bar"
m = MyMod()
# Save to file.
with TemporaryFileName() as fname:
m.save(fname, _extra_files=expected_extra_files)
# values don't matter
extra_files = {"foo": "", "foo2": None}
torch.jit.load(fname, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# results come back always as bytes
self.assertEqual(b"bar", extra_files["foo2"])
# Use torch.jit API
torch.jit.save(m, fname, _extra_files=expected_extra_files)
extra_files["foo"] = ""
torch.jit.load(fname, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Save to buffer.
buffer = io.BytesIO(m.save_to_buffer(_extra_files=expected_extra_files))
extra_files = {"foo": ""}
torch.jit.load(buffer, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Use torch.jit API
buffer = io.BytesIO()
torch.jit.save(m, buffer, _extra_files=expected_extra_files)
buffer.seek(0)
extra_files = {"foo": ""}
torch.jit.load(buffer, _extra_files=extra_files)
self.assertEqual(value, extra_files["foo"])
# Non-existent file 'bar'
with self.assertRaises(RuntimeError):
extra_files["bar"] = ""
torch.jit.load(buffer, _extra_files=extra_files)
def test_save_load_using_pathlib(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return 2 * a
m = MyMod()
# Save then load.
with TemporaryFileName() as fname:
path = pathlib.Path(fname)
m.save(path)
m2 = torch.jit.load(path)
x = torch.tensor([1.0, 2.0, 3.0, 4.0])
self.assertTrue(torch.equal(m(x), m2(x)))
def test_save_nonexit_file(self):
class Foo(torch.nn.Module):
def forward(self, x):
return 2 * x
script_module = torch.jit.script(Foo())
with self.assertRaises(RuntimeError):
script_module.save("NonExist/path/test.pt")
def test_save_namedtuple_input_only(self):
"""
Even if a NamedTuple is only used as an input argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
return torch.tensor(3)
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded(FooTuple(a=5))
self.assertEqual(output, torch.tensor(3))
def test_save_namedtuple_output_only(self):
"""
Even if a NamedTuple is only used as an output argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self) -> Optional[FooTuple]:
return None
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded()
self.assertEqual(output, None)
def test_save_load_params_buffers_submodules(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Submodule(torch.nn.Module):
def __init__(self):
super().__init__()
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("submodule_a", Submodule())
self.register_parameter(
"parameter_a", torch.nn.Parameter(torch.randn(4))
)
self.register_buffer("buffer", torch.randn(4))
self.t = torch.rand(4) # not buffer
self.parameter_b = torch.nn.Parameter(torch.randn(4))
self.submodule_b = Submodule()
m = TestModule()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
for m_s, loaded_s in zip(m.named_modules(), m_loaded.named_modules()):
m_name, _ = m_s
loaded_name, _ = loaded_s
self.assertEqual(m_name, loaded_name)
# Check parameters.
self.assertEqual(len(list(m.parameters())), len(list(m_loaded.parameters())))
for m_p, loaded_p in zip(m.parameters(), m_loaded.parameters()):
self.assertEqual(m_p, loaded_p)
# Check buffers.
self.assertEqual(
len(list(m.named_buffers())), len(list(m_loaded.named_buffers()))
)
for m_b, loaded_b in zip(m.named_buffers(), m_loaded.named_buffers()):
m_name, m_buffer = m_b
loaded_name, loaded_buffer = loaded_b
self.assertEqual(m_name, loaded_name)
self.assertEqual(m_buffer, loaded_buffer)
def test_save_load_meta_tensors(self):
"""
Check that parameters, buffers, and submodules are the same after loading
for a module with parameters and buffers that are meta tensors
"""
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 3, device="meta")
self.bar = torch.nn.Linear(3, 4)
self.register_buffer("buffer", torch.randn(4, device="meta"))
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
m = Foo()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
self.assertEqual(
set(name for name, _ in m.named_modules()),
set(name for name, _ in m_loaded.named_modules()),
)
# Check parameters.
m_params = dict(m.named_parameters())
m_loaded_params = dict(m_loaded.named_parameters())
self.assertEqual(len(m_params), len(m_loaded_params))
self.assertEqual(m_params, m_loaded_params)
# Check buffers.
m_buffers = dict(m.named_buffers())
m_loaded_buffers = dict(m_loaded.named_buffers())
self.assertEqual(len(m_buffers), len(m_loaded_buffers))
self.assertEqual(m_buffers, m_loaded_buffers)
# Check params and buffers that are/are not meta tensors
self.assertTrue(m_params["foo.weight"].is_meta)
self.assertTrue(m_loaded_params["foo.weight"].is_meta)
self.assertTrue(m_params["foo.bias"].is_meta)
self.assertTrue(m_loaded_params["foo.bias"].is_meta)
self.assertFalse(m_params["bar.weight"].is_meta)
self.assertFalse(m_loaded_params["bar.weight"].is_meta)
self.assertFalse(m_params["bar.bias"].is_meta)
self.assertFalse(m_loaded_params["bar.bias"].is_meta)
self.assertTrue(m_buffers["buffer"].is_meta)
self.assertTrue(m_loaded_buffers["buffer"].is_meta)
def script_module_to_buffer(script_module):
module_buffer = io.BytesIO(
script_module._save_to_buffer_for_lite_interpreter(_use_flatbuffer=True)
)
module_buffer.seek(0)
return module_buffer
@unittest.skipIf(
not ENABLE_FLATBUFFER, "Need to enable flatbuffer to run the below tests"
)
class TestSaveLoadFlatbuffer(JitTestCase):
def test_different_modules(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
return x
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module(
"second", torch.jit.load(second_saved_module)
)
self.add_module(
"first", torch.jit.load(first_saved_module)
)
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_different_functions(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
def lol(x):
return x
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
def lol(x): # noqa: F811
return "hello"
class Foo(torch.nn.Module):
def forward(self, x):
return lol(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module(
"second", torch.jit.load(second_saved_module)
)
self.add_module(
"first", torch.jit.load(first_saved_module)
)
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_different_interfaces(self):
"""
Exercise the situation where we have the same qualified name
in two different CompilationUnits on save/load.
"""
@torch.jit.interface
class MyInterface(object):
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface(object):
def __init__(self):
pass
def bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self):
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.bar(x)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
@torch.jit.interface
class MyInterface(object):
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface(object): # noqa: F811
def __init__(self):
pass
def not_bar(self, x):
return x
class Foo(torch.nn.Module):
__annotations__ = {"interface": MyInterface}
def __init__(self):
super().__init__()
self.interface = ImplementInterface()
def forward(self, x):
return self.interface.not_bar(x)
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module(
"second", torch.jit.load(second_saved_module)
)
self.add_module(
"first", torch.jit.load(first_saved_module)
)
def forward(self, x):
x = self.first(x)
x = self.second(x)
return x
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_many_collisions(self):
class MyCoolNamedTuple(NamedTuple):
a: int
@torch.jit.interface
class MyInterface(object):
def bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script
class ImplementInterface(object):
def __init__(self):
pass
def bar(self, x):
return x
def lol(x):
return x
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self):
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
x = lol(x)
x = self.interface.bar(x)
return x, MyCoolNamedTuple(a=5)
first_script_module = torch.jit.script(Foo())
first_saved_module = script_module_to_buffer(first_script_module)
clear_class_registry()
@torch.jit.interface
class MyInterface(object):
def not_bar(self, x: Tensor) -> Tensor:
pass
@torch.jit.script # noqa: F811
class ImplementInterface(object): # noqa: F811
def __init__(self):
pass
def not_bar(self, x):
return x
def lol(x): # noqa: F811
return "asdofij"
class MyCoolNamedTuple(NamedTuple): # noqa: F811
a: str
class Foo(torch.nn.Module):
interface: MyInterface
def __init__(self):
super().__init__()
self.foo = torch.nn.Linear(2, 2)
self.interface = ImplementInterface()
def forward(self, x):
x = self.foo(x)
self.interface.not_bar(x)
x = lol(x)
return x, MyCoolNamedTuple(a="hello")
second_script_module = torch.jit.script(Foo())
second_saved_module = script_module_to_buffer(second_script_module)
clear_class_registry()
self.assertEqual(
first_script_module._c.qualified_name,
second_script_module._c.qualified_name,
)
class ContainsBoth(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module(
"second", torch.jit.load(second_saved_module)
)
self.add_module(
"first", torch.jit.load(first_saved_module)
)
def forward(self, x):
x, named_tuple_1 = self.first(x)
x, named_tuple_2 = self.second(x)
return len(x + named_tuple_2.a) + named_tuple_1.a
sm = torch.jit.script(ContainsBoth())
contains_both = script_module_to_buffer(sm)
sm = torch.jit.load(contains_both)
def test_save_load_using_pathlib(self):
class MyMod(torch.jit.ScriptModule):
@torch.jit.script_method
def forward(self, a):
return 2 * a
m = MyMod()
# Save then load.
with TemporaryFileName() as fname:
path = pathlib.Path(fname)
torch.jit.save_jit_module_to_flatbuffer(m, path)
m2 = torch.jit.load(path)
x = torch.tensor([1.0, 2.0, 3.0, 4.0])
self.assertTrue(torch.equal(m(x), m2(x)))
def test_save_namedtuple_input_only(self):
"""
Even if a NamedTuple is only used as an input argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self, x: FooTuple) -> torch.Tensor:
return torch.tensor(3)
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded(FooTuple(a=5))
self.assertEqual(output, torch.tensor(3))
def test_save_namedtuple_output_only(self):
"""
Even if a NamedTuple is only used as an output argument, saving and
loading should work correctly.
"""
global FooTuple # see [local resolution in python]
class FooTuple(NamedTuple):
a: int
class MyModule(torch.nn.Module):
def forward(self) -> Optional[FooTuple]:
return None
m_loaded = self.getExportImportCopy(torch.jit.script(MyModule()))
output = m_loaded()
self.assertEqual(output, None)
def test_module_info_flatbuffer(self):
class Foo(torch.nn.Module):
def __init__(self):
super(Foo, self).__init__()
self.foo = torch.nn.Linear(2, 2)
self.bar = torch.nn.Linear(2, 2)
def forward(self, x):
x = self.foo(x)
x = self.bar(x)
return x
first_script_module = torch.jit.script(Foo())
first_saved_module = io.BytesIO()
torch.jit.save_jit_module_to_flatbuffer(
first_script_module, first_saved_module)
first_saved_module.seek(0)
expected = {
'bytecode_version': 4,
'operator_version': 4,
'function_names': {'__torch__.___torch_mangle_0.Foo.forward'},
'type_names': set(),
'opname_to_num_args': {'aten::linear': 3}}
self.assertEqual(
torch.jit._serialization.get_flatbuffer_module_info(first_saved_module),
expected)
def test_save_load_params_buffers_submodules(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Submodule(torch.nn.Module):
def __init__(self):
super().__init__()
class TestModule(torch.nn.Module):
def __init__(self):
super().__init__()
self.add_module("submodule_a", Submodule())
self.register_parameter(
"parameter_a", torch.nn.Parameter(torch.randn(4))
)
self.register_buffer("buffer", torch.randn(4))
self.t = torch.rand(4) # not buffer
self.parameter_b = torch.nn.Parameter(torch.randn(4))
self.submodule_b = Submodule()
m = TestModule()
m_loaded = self.getExportImportCopy(torch.jit.script(m))
# Check submodules.
self.assertEqual(
len(list(m.named_modules())), len(list(m_loaded.named_modules()))
)
for m_s, loaded_s in zip(m.named_modules(), m_loaded.named_modules()):
m_name, _ = m_s
loaded_name, _ = loaded_s
self.assertEqual(m_name, loaded_name)
# Check parameters.
self.assertEqual(len(list(m.parameters())), len(list(m_loaded.parameters())))
for m_p, loaded_p in zip(m.parameters(), m_loaded.parameters()):
self.assertEqual(m_p, loaded_p)
# Check buffers.
self.assertEqual(
len(list(m.named_buffers())), len(list(m_loaded.named_buffers()))
)
for m_b, loaded_b in zip(m.named_buffers(), m_loaded.named_buffers()):
m_name, m_buffer = m_b
loaded_name, loaded_buffer = loaded_b
self.assertEqual(m_name, loaded_name)
self.assertEqual(m_buffer, loaded_buffer)
def test_save_load_with_extra_files(self):
"""
Check that parameters, buffers, and submodules are the same after loading.
"""
class Module(torch.nn.Module):
def __init__(self):
super().__init__()
def forward(self, x: Tensor):
return x
module = Module()
script_module = torch.jit.script(module)
script_module_io = io.BytesIO()
extra_files = {"abc.json": "[1,2,3]"}
script_module._save_for_lite_interpreter(script_module_io, _extra_files=extra_files, _use_flatbuffer=True)
script_module_io.seek(0)
re_extra_files = {}
torch._C._get_model_extra_files_from_buffer(script_module_io, _extra_files=re_extra_files)
self.assertEqual(extra_files, re_extra_files)
|
pytorch-master
|
test/jit/test_save_load.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch.testing import FileCheck
from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, freeze_rng_state
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestRemoveMutation(JitTestCase):
def test_aten_inplace(self):
def test_not_new_alias(x):
y = x[0]
y.add_(2)
return y
fn = torch.jit.script(test_not_new_alias)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check("aten::add_").run(graph)
self.assertEqual(fn(torch.ones([2, 2])), test_not_new_alias(torch.ones([2, 2])))
def test_no_lowering():
x = torch.tensor([2, 2])
x[0] = 3
return x
# there is no functional equivalent of x[0] = ...
fn = torch.jit.script(test_no_lowering)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check("aten::copy_").run(graph)
self.assertEqual(fn(), test_no_lowering())
def test_move_before_not_valid():
y = torch.tensor([2, 2])
z = y + 2
y.add_(2)
return y, z
fn = torch.jit.script(test_move_before_not_valid)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check("aten::add_").run(graph)
self.assertEqual(fn(), test_move_before_not_valid())
def test_successful():
x = torch.tensor([2, 2])
x.add_(1)
x.add_(3)
y = x + 4
return x, y
fn = torch.jit.script(test_successful)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check_not("aten::add_").run(graph)
self.assertEqual(test_successful(), fn())
def test_intermediary_use():
x = torch.tensor([2, 2])
x.add_(1)
y = x + 4
x.add_(3)
return x, y
fn = torch.jit.script(test_intermediary_use)
graph = fn.graph
FileCheck().check_count("aten::add_", 2).run(graph)
self.run_pass('remove_mutation', graph)
# Unable to remove the second add_ because of the y = x + 4 use
# In the future we could duplicating the value of x as a temporary and replacing
# its intermediary use (so long as aliasing is safe)
FileCheck().check_count("aten::add_", 1).run(graph)
self.assertEqual(test_intermediary_use(), fn())
def test_if_output(self):
def foo(x, cond: bool):
if cond:
y = x + 5
else:
y = x + 2
y.add_(4)
return y
out_eager = foo(torch.tensor(5), True)
foo_script = torch.jit.script(foo)
FileCheck().check("aten::add_").run(foo_script.graph)
self.run_pass('remove_mutation', foo_script.graph)
FileCheck().check_not("aten::add_").run(foo_script.graph)
self.assertEqual(out_eager, foo_script(torch.tensor(5), True))
def test_if_output_fail(self):
@torch.jit.script
def foo(cond: bool):
li = []
if cond:
x = torch.tensor(1)
li.append(x)
else:
x = torch.tensor(2)
y = x.add_(2)
return y, li
self.run_pass('inline', foo.graph)
self.run_pass('remove_mutation', foo.graph)
FileCheck().check("aten::add_").run(foo.graph)
@torch.jit.script
def foo(cond: bool, y):
if cond:
x = y
else:
x = torch.tensor(2)
z = x.add_(2)
return z
self.run_pass('inline', foo.graph)
self.run_pass('remove_mutation', foo.graph)
FileCheck().check("aten::add_").run(foo.graph)
def test_special_mapped_op(self):
def test_successful():
x = torch.tensor([2, 2])
y = torch.tensor([2, 4])
x.zero_()
y.fill_(3)
return x, y
fn = torch.jit.script(test_successful)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check_not("aten::zero_").check_not("aten::fill_").run(graph)
self.assertEqual(test_successful(), fn())
# full_like is not implemented for a tensor fill value
def test_successful():
x = torch.tensor([2, 2])
y = torch.tensor([2, 4])
x.fill_(y)
return x + x
fn = torch.jit.script(test_successful)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check_not('aten::fill_').run(graph)
def normal():
return torch.rand(2, 1, 3, 4).normal_()
fn = torch.jit.script(normal)
graph = fn.graph
self.run_pass('remove_mutation', graph)
FileCheck().check_not("normal_").run(graph)
with freeze_rng_state():
out_eager = normal()
with freeze_rng_state():
out_script = fn()
self.assertEqual(out_eager, out_script)
def test_lists_append(self):
def successful_remove():
return [i for i in range(5)] # noqa: C416
fn = torch.jit.script(successful_remove)
graph = fn.graph
self.run_pass('loop_unrolling', graph)
self.run_pass('remove_mutation', graph)
self.run_pass('constant_propagation', graph)
FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
self.assertEqual(successful_remove(), successful_remove())
def intermediary_use():
a = [1, 2]
b = len(a)
a.append(3)
return a
fn = torch.jit.script(intermediary_use)
graph = fn.graph
FileCheck().check("append").run(graph)
self.run_pass('remove_mutation', graph)
# it is possible to remove the append here but don't currently have the logic for it
FileCheck().check_not("append").run(graph)
self.assertEqual(intermediary_use(), fn())
def test_lists_insert(self):
def successful_remove():
a : List[int] = []
a.insert(0, 1)
a.insert(0, 2)
a.insert(-10, 3)
a.insert(-9, 4)
a.insert(10, 5)
return a
fn = torch.jit.script(successful_remove)
graph = fn.graph
torch._C._jit_pass_remove_mutation(graph)
torch._C._jit_pass_constant_propagation(graph)
FileCheck().check("graph").check_next("Constant").check_next("return").run(graph)
self.assertEqual(successful_remove(), fn())
def test_list_indexing_removal(self):
@torch.jit.script
def out_of_bounds():
x = [1, 2]
x[4] = 3
return x
torch._C._jit_pass_remove_mutation(out_of_bounds.graph)
FileCheck().check("set_item").run(out_of_bounds.graph)
@torch.jit.script
def unknown(y: int):
x = [1, 2]
x[y] = 3
return x
torch._C._jit_pass_remove_mutation(out_of_bounds.graph)
FileCheck().check("set_item").run(out_of_bounds.graph)
def successful():
x = [1, 2, 3]
x[0] = 4
x[-1] = 0
return x
scripted_fn = torch.jit.script(successful)
torch._C._jit_pass_remove_mutation(scripted_fn.graph)
FileCheck().check_not("set_item").run(scripted_fn.graph)
self.checkScript(successful, ())
def successful():
x = [1, 2, 3]
x[0] = 4
x[-1] = 0
return x
scripted_fn = torch.jit.script(successful)
torch._C._jit_pass_remove_mutation(scripted_fn.graph)
FileCheck().check_not("set_item").run(scripted_fn.graph)
self.checkScript(successful, ())
def successful():
x = [1]
x[-1] = 3
return x
scripted_fn = torch.jit.script(successful)
torch._C._jit_pass_remove_mutation(scripted_fn.graph)
FileCheck().check_not("set_item").run(scripted_fn.graph)
self.checkScript(successful, ())
def test_common_pytorch_list_ops(self):
for op in ["cat", "stack", "vstack", "hstack", "dstack"]:
class OpMod(torch.nn.Module):
def __init__(self, op):
super(OpMod, self).__init__()
self.op = torch_op
def forward(self):
x = torch.tensor([1, 2, 3, 4])
x.add_(3)
y = [x, x]
return self.op(y) + 3
torch_op = getattr(torch, op)
mod = OpMod(torch_op)
mod_script = torch.jit.script(mod)
self.run_pass('remove_mutation', mod_script.forward.graph)
FileCheck().check_not("aten::add_").run(mod_script.forward.graph)
self.assertEqual(mod(), mod_script())
# test that the output doesnt alias the input
for inputs in [torch.rand(2, 2)], [torch.rand(2, 2) for _ in range(2)]:
result = torch_op(inputs)
sums = [ten.sum() for ten in result]
for inp in inputs:
inp.fill_(10)
self.assertEqual(sums, [ten.sum() for ten in result])
@torch.jit.script
def test_multiple_uses():
x = torch.tensor([1, 2, 3, 4])
x.add_(3)
y = [x, x]
return torch.cat(y), y
self.run_pass('remove_mutation', mod_script.forward.graph)
FileCheck().check("aten::add_").run(test_multiple_uses.graph)
|
pytorch-master
|
test/jit/test_remove_mutation.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestOpDecompositions(JitTestCase):
def test_op_decomposition(self):
def foo(x):
return torch.var(x, unbiased=True)
# TODO: more robust testing
foo_s = torch.jit.script(foo)
FileCheck().check("aten::var").run(foo_s.graph)
torch._C._jit_pass_run_decompositions(foo_s.graph)
inp = torch.rand([10, 10])
self.assertEqual(foo(inp), foo_s(inp))
FileCheck().check_not("aten::var").run(foo_s.graph)
def test_registered_decomposition(self):
@torch.jit.script
def foo(x):
return torch.square(x)
@torch.jit.script
def square_decomp(x):
return torch.pow(x, 2)
torch.jit._register_decomposition(torch.ops.aten.square.default, square_decomp.graph)
torch._C._jit_pass_run_decompositions(foo.graph)
FileCheck().check_not("aten::square").check("aten::pow").run(foo.graph)
x = torch.rand([4])
self.assertEqual(foo(x), torch.square(x))
|
pytorch-master
|
test/jit/test_op_decompositions.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import gc
import unittest
import torch
from typing import NamedTuple
from torch.testing import FileCheck
from torch.testing._internal.jit_utils import JitTestCase
from torch.testing._internal.common_utils import skipIfRocm, skipCUDANonDefaultStreamIf
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
# Check if GPU is available
TEST_CUDA = torch.cuda.is_available()
# Check if multiple GPU's are available
TEST_MULTIGPU = TEST_CUDA and torch.cuda.device_count() >= 2
# If GPU is not available, then do not run the tests
if not TEST_CUDA:
print('CUDA not available, skipping tests', file=sys.stderr)
JitTestCase = object # noqa: F811
TEST_LARGE_TENSOR = TEST_CUDA
# If GPU is available, then initialize the cuda context and check
# if there is memory available to allocate for LARGE Tensors.
if TEST_CUDA:
torch.ones(1).cuda() # initialize cuda context
TEST_LARGE_TENSOR = torch.cuda.get_device_properties(0).total_memory >= 5e9
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
class TestCUDA(JitTestCase):
"""
A suite of tests for the CUDA API in TorchScript.
"""
def setUp(self):
super(TestCUDA, self).setUp()
def tearDown(self):
gc.collect()
torch.cuda.empty_cache()
super(TestCUDA, self).tearDown()
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_cuda_synchronize(self):
# Test device synchronization.
@torch.jit.script
def test_device_synchronize():
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize()
torch.cuda.synchronize('cuda')
torch.cuda.synchronize('cuda:0')
torch.cuda.synchronize(0)
torch.cuda.synchronize(torch.device('cuda:1'))
after_current_device_index = torch.cuda.current_device()
# Check if the current device index is same as the device index before
# synchronizing the device.
return prev_current_device_index == after_current_device_index
@torch.jit.script
def test_multi_device_synchronize():
torch.cuda.synchronize(torch.device('cuda:0'))
prev_current_device_index = torch.cuda.current_device()
torch.cuda.synchronize(1)
after_current_device_index = torch.cuda.current_device()
# Check if the current device index is same as the device index before
# synchronizing the device.
return prev_current_device_index == after_current_device_index
self.assertTrue(test_device_synchronize)
FileCheck().check("cuda::synchronize(") \
.run(test_device_synchronize.graph)
self.assertTrue(test_multi_device_synchronize)
FileCheck().check("cuda::synchronize(") \
.run(test_multi_device_synchronize.graph)
def test_stream_args(self):
# Test stream creation with default arguments
@torch.jit.script
def stream_default_args() -> bool:
s = torch.cuda.Stream()
return s.device_index() == torch.cuda.current_device()
@torch.jit.script
def stream_default_args_for_device() -> bool:
s = torch.cuda.Stream(priority=0)
return s.device_index() == torch.cuda.current_device()
@torch.jit.script
def stream_default_args_for_priority() -> bool:
d = torch.device("cuda:1")
s = torch.cuda.Stream(d)
return s.device_index() == 1
@torch.jit.script
def stream_args_all() -> bool:
d = torch.device("cuda:0")
s = torch.cuda.Stream(d, 0)
return s.device_index() == 0
self.assertTrue(stream_default_args)
self.assertTrue(stream_default_args_for_device)
self.assertTrue(stream_default_args_for_priority)
self.assertTrue(stream_args_all)
def test_event_args(self):
# Test Event creation with default arguments
@torch.jit.script
def event_default_args() -> bool:
e = torch.cuda.Event()
return e is not None
self.assertTrue(event_default_args)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
def test_current_stream(self):
# Test current stream on the device and check if the stream device index
# matches with the device ID
@torch.jit.script
def fn():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
s0 = torch.cuda.current_stream(device)
s1 = torch.cuda.current_stream(torch.device("cuda:1"))
s2 = torch.cuda.current_stream(torch.device("cuda:0"))
return s0.device_index(), s1.device_index(), s2.device_index()
d0, d1, d2 = fn()
# By default, the current device ID is 0.
self.assertEqual(0, d0)
self.assertEqual(1, d1)
self.assertEqual(0, d2)
self.assertEqual(d0, d2)
# Test current_stream API by passing device ID as an argument and
# and check if the stream device index matches with the device ID
@torch.jit.script
def fn_with_device_index_args():
device_index = torch.cuda.current_device()
s0 = torch.cuda.current_stream(device_index)
s1 = torch.cuda.current_stream(1)
s2 = torch.cuda.current_stream(0)
return s0.device_index(), s1.device_index(), s2.device_index()
d0, d1, d2 = fn_with_device_index_args()
# By default, the current device ID is 0.
self.assertEqual(0, d0)
self.assertEqual(1, d1)
self.assertEqual(0, d2)
self.assertEqual(d0, d2)
@skipIfRocm
@unittest.skipIf(not TEST_MULTIGPU, "detected only one GPU")
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@skipCUDANonDefaultStreamIf(True)
def test_streams_and_events(self):
# Test default_stream API by passing device ID as an argument and
# and check if the stream device index matches with the device ID
@torch.jit.script
def test_default_streams_with_device_index_args():
s0 = torch.cuda.default_stream(0)
s1 = torch.cuda.default_stream(1)
return s0.device_index(), s1.device_index()
d0, d1 = test_default_streams_with_device_index_args()
self.assertEqual(d0, 0)
self.assertEqual(d1, 1)
# This test checks for the default stream ID is set to 0 on the device
@torch.jit.script
def test_default_streams():
s0 = torch.cuda.default_stream(torch.device('cuda:0'))
s1 = torch.cuda.default_stream(torch.device('cuda:1'))
d = torch.device('cuda:1')
# Check the current stream id and default id are same
# on the current device. The current device id by default is 0
s2 = torch.cuda.current_stream(torch.device('cuda:0'))
check_s2 = s2.id() == s0.id()
check_d0 = torch.cuda.current_device() == s2.device_index()
# Set the current device to d1 and check if the stream
# has been set to the default stream on d1
with torch.cuda.device(d):
s3 = torch.cuda.current_stream(d)
check_s3 = s3.id() == s1.id()
check_d1 = torch.cuda.current_device() == s3.device_index()
# Check if the current device was reset to 0
is_device_d0 = torch.cuda.current_device() == s2.device_index()
return s0.device_index(), s1.device_index(), check_s2, check_s3, check_d0, check_d1, is_device_d0
d0, d1, check_s2, check_s3, check_d0, check_d1, is_device_d0 = test_default_streams()
self.assertEqual(d0, 0)
self.assertEqual(d1, 1)
self.assertTrue(check_s2)
self.assertTrue(check_s3)
self.assertTrue(check_d0)
self.assertTrue(check_d1)
self.assertTrue(is_device_d0)
# This test checks if the Stream Context manager is a no op
# when the stream is none for `with torch.cuda.stream`
@torch.jit.script
def test_set_none_stream():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
default_stream = torch.cuda.default_stream(device)
# When stream is none, check if this operation is a no-op
with torch.cuda.stream(None):
cur_device_index = torch.cuda.current_device()
is_device_index_same = cur_device_index == device_index
is_current_stream_same = torch.cuda.current_stream(device).id() == current_stream.id()
is_default_stream_same = torch.cuda.default_stream(device).id() == default_stream.id()
# Check if the device index, current stream and default streams have not changed
are_streams_same = is_device_index_same and is_current_stream_same and is_default_stream_same
return are_streams_same
self.assertTrue(test_set_none_stream())
# This test checks if the Device Context manager is a no op
# when the device is none for `with torch.cuda.device`
@torch.jit.script
def test_set_device_none():
device_index = torch.cuda.current_device()
# When device is none, check if this operation is a no-op
with torch.cuda.device(None):
# Check if the current device is the same
is_device_same = torch.cuda.current_device() == device_index
return is_device_same
self.assertTrue(test_set_device_none())
# Check if a CUDA JIT stream is created
# on the current_device
@torch.jit.script
def test_simple_stream():
device_index = torch.cuda.current_device()
s = torch.cuda.Stream()
return device_index == s.device_index()
self.assertTrue(test_simple_stream(), "Could not create Stream!")
# Class used to store results for the test: test_get_stream.
class Result(NamedTuple):
t1 : torch.Tensor
t2 : torch.Tensor
is_current_and_default_stream_same : bool
is_default_and_user_stream_not_same : bool
is_stream_set : bool
is_stream_reset : bool
default_stream_query : bool
default_stream_id : int
user_stream_id : int
# The test aims at checking different stream proporties.
@torch.jit.script
def test_get_stream():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
default_stream = torch.cuda.default_stream(device)
user_stream = torch.cuda.Stream()
# Check if the current and default streams are the same on the device
is_current_and_default_stream_same = current_stream.id() == default_stream.id()
# Check if user stream and default stream are not the same on the device
is_default_and_user_stream_not_same = default_stream.id() != user_stream.id()
with torch.cuda.stream(user_stream):
is_stream_set = torch.cuda.current_stream(device).id() == user_stream.id()
# Check if the stream was reset to current_stream
is_stream_reset = torch.cuda.current_stream(device).id() == current_stream.id()
tensor1 = torch.rand(10000, 10000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
default_stream.synchronize()
default_stream_query = default_stream.query()
# Capture all the results in the class Result
res = Result(
tensor1, tensor2, is_current_and_default_stream_same,
is_default_and_user_stream_not_same, is_stream_set,
is_stream_reset, default_stream_query, default_stream.id(), user_stream.id())
return res
result = test_get_stream()
self.assertEqual(torch.matmul(result.t1, result.t1), result.t2)
self.assertTrue(result.is_current_and_default_stream_same)
self.assertTrue(result.is_default_and_user_stream_not_same)
self.assertTrue(result.is_stream_set)
self.assertTrue(result.is_stream_reset)
self.assertTrue(result.default_stream_query)
self.assertEqual(result.default_stream_id, 0) # Check if the default stream ID is always 0
self.assertNotEqual(result.user_stream_id, 0) # Check if the user stream is always non zero
# Test the stream context manager. This test checks if the stream is switched
# to the user stream on using the stream context manager.
@torch.jit.script
def test_stream_context():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
current_stream = torch.cuda.current_stream(device)
user_stream = torch.cuda.Stream()
A = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(user_stream):
check = torch.cuda.current_stream(device).id() == user_stream.id()
B = torch.mm(A, A).to("cuda")
# Wait for B to be computed
user_stream.synchronize()
# Check if the stream has been reset on the current device
is_stream_reset = torch.cuda.current_stream(device).id() == current_stream.id()
return A, B, check, is_stream_reset
A, B, is_stream_set, is_stream_reset = test_stream_context()
self.assertEqual(torch.matmul(A, A), B)
self.assertTrue(is_stream_set, "Error: Current stream was not set to user stream!")
self.assertTrue(is_stream_reset, "Error: The stream was not restored to previous stream!")
# Test multiple nested streams. Check if the operations are computed as expected on the streams
# This test has been adapted from the eager mode tests available at test/test_cuda.py
@torch.jit.script
def test_multiple_stream():
prev_device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(prev_device_index))
prev_current_stream = torch.cuda.current_stream(device)
d1 = torch.device("cuda:0")
d2 = torch.device("cuda:1")
s1 = torch.cuda.Stream(d1, 0)
s2 = torch.cuda.Stream(d2, 0)
A = torch.rand(1000, 1000, device="cuda")
B = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(s1):
C = torch.mm(A, A).to("cuda")
# Check if the stream and device have been set to s1
is_stream_s1 = torch.cuda.current_stream(d1).id() == s1.id()
is_device_s1 = torch.cuda.current_device() == s1.device_index()
with torch.cuda.stream(s2):
# Check if the stream and device have been set to s2
is_stream_s2 = torch.cuda.current_stream(d2).id() == s2.id()
is_device_s2 = torch.cuda.current_device() == s2.device_index()
D = torch.mm(B, B).to("cuda")
# Check if the stream and device have been set to s1
is_stream_s1_after = torch.cuda.current_stream(d1).id() == s1.id()
is_device_s1_after = torch.cuda.current_device() == s1.device_index()
# Wait for D to be computed
s2.synchronize()
# Wait for C to be computed on S1
s1.synchronize()
# Check if the stream and device has been restored to previous stream and device
is_device_current = torch.cuda.current_device() == prev_device_index
is_stream_current = torch.cuda.current_stream(device).id() == prev_current_stream.id()
check_stream = is_stream_s1 and is_stream_s2 and is_stream_s1_after and is_stream_current
check_device = is_device_s1 and is_device_s2 and is_device_s1_after and is_device_current
return A, B, C, D, check_stream, check_device
A, B, C, D, check_stream, check_device = test_multiple_stream()
self.assertEqual(torch.matmul(A, A), C)
self.assertEqual(torch.matmul(B, B), D)
self.assertTrue(check_stream)
self.assertTrue(check_device)
# Test multiple streams waiting on each other for the operations to be completed.
@torch.jit.script
def test_data_dependency_between_streams():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
prev_current_stream = torch.cuda.current_stream(device)
d = torch.device("cuda:0")
s1 = torch.cuda.Stream(d, 0)
s2 = torch.cuda.Stream(d, 0)
event = torch.cuda.Event(False, False, False)
A = torch.rand(1000, 1000, device="cuda")
with torch.cuda.stream(s1):
is_stream_s1 = torch.cuda.current_stream(device).id() == s1.id()
B = torch.mm(A, A).to("cuda")
s1.record_event(event)
# Check if the current_stream is reset
is_current_stream_1 = torch.cuda.current_stream(device).id() == prev_current_stream.id()
# Wait for ops on s1 to be computed
s2.wait_event(event)
with torch.cuda.stream(s2):
is_stream_s2 = torch.cuda.current_stream(device).id() == s2.id()
C = torch.mm(B, B).to("cuda")
# Wait for C to be computed
s2.synchronize()
# Check if the current_stream is reset
is_current_stream_2 = torch.cuda.current_stream(device).id() == prev_current_stream.id()
check_stream = is_current_stream_1 and is_current_stream_2 and is_stream_s1 and is_stream_s2
return A, B, C, check_stream
A, B, C, check_stream = test_data_dependency_between_streams()
self.assertEqual(torch.matmul(A, A), B)
self.assertEqual(torch.matmul(B, B), C)
self.assertTrue(check_stream)
# Test a simple CUDA event. Test if the CUDA event was created successfully
@torch.jit.script
def test_simple_event():
e = torch.cuda.Event(True, False, False)
return e is not None
self.assertTrue(test_simple_event(), "Could not create CUDA Event!")
# Record the CUDA event for operation torch.mm on the current stream
# and then test if the elapsed time is greater than 0. This test is also
# an adaption from eager mdoe CUDA tests available at test/test_cuda.py
@torch.jit.script
def test_event():
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
stream = torch.cuda.current_stream(device)
event = torch.cuda.Event(True, False, False)
is_true_event_query = event.query()
start_event = torch.cuda.Event(True, False, False)
stream.record_event(start_event)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
stream.record_event(event)
event.synchronize()
is_again_true_event_query = event.query()
if not (is_true_event_query and is_again_true_event_query):
return -1.0
return start_event.elapsed_time(event)
self.assertGreater(test_event(), 0)
# Check for stream synchronization , when a large tensor multiplication is
# computed on the stream. The stream.query should be true once the synchroniztion is done
@torch.jit.script
def test_stream_synchronize() -> float:
device_index = torch.cuda.current_device()
s = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, False, False)
e_tok = torch.cuda.Event(True, False, False)
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
s.synchronize()
e_tok.record(s)
e_tok.synchronize()
if not s.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_stream_synchronize(), 0)
# Test event synchronization for the event that records a stream doing
# a large tensor multiplication. Check if the elapsed time is greater than 0
# and the stream.query evaluates to true.
@torch.jit.script
def test_event_synchronize() -> float:
s = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, False, False)
e_tok = torch.cuda.Event(True, False, False)
e_tik.record(s)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s):
tensor = torch.mm(tensor1, tensor1).to("cuda")
s.record_event(e_tok)
e_tok.synchronize()
s.synchronize()
if not s.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_event_synchronize(), 0)
# Test for event wait. Check if event waits for the all the operations on
# the stream to be done. Check for synchronizations and query on the streams
# and events. This test is adapted from eager mode tests for CUDA. Please refer
# test/test_cuda.py
@torch.jit.script
def test_event_wait() -> float:
device_index = torch.cuda.current_device()
device = torch.device("cuda:" + str(device_index))
s0 = torch.cuda.current_stream(device)
s1 = torch.cuda.Stream()
e_tik = torch.cuda.Event(True, True, False)
e_tok = torch.cuda.Event(True, True, False)
e_tik.record(s0)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
with torch.cuda.stream(s0):
tensor2 = torch.mm(tensor1, tensor1).cuda()
e_sync = torch.cuda.Event(True, False, False)
e_sync.record(torch.cuda.current_stream(device))
e_sync.wait(s1)
with torch.cuda.stream(s1):
tensor3 = torch.rand(1000000000, 1000000000, device="cuda")
tensor4 = torch.mm(tensor3, tensor3).cuda()
s1.synchronize()
e_tok.record(torch.cuda.current_stream(device))
e_tok.synchronize()
s0.synchronize()
if not s0.query() or not s1.query() or not e_sync.query():
return -1.0
# not necessary to check e_tik and e_tok, as elapsed_time would throw
# exception if otherwise.
return e_tik.elapsed_time(e_tok)
self.assertGreater(test_event_wait(), 0)
# Test for stream wait_event. Checks if the stream waits on the event
@torch.jit.script
def test_wait_event():
d1 = torch.device('cuda:1')
with torch.cuda.device(d1):
s0 = torch.cuda.current_stream(d1)
tensor1 = torch.rand(1000000000, 1000000000, device="cuda")
tensor2 = torch.mm(tensor1, tensor1).to("cuda")
e0 = torch.cuda.Event(False, False, False)
s0.record_event(e0)
s1 = torch.cuda.current_stream(torch.device('cuda:0'))
s1.wait_event(e0)
s1.synchronize()
return e0.query() and s0.query() and s1.query()
self.assertTrue(test_wait_event())
# Test if a scripted module with cuda streams can be saved, loaded and executed
def test_save_load(self):
class Model(torch.nn.Module):
def forward(self):
s = torch.cuda.Stream()
a = torch.rand(3, 4, device="cuda")
b = torch.rand(3, 4, device="cuda")
with torch.cuda.stream(s):
is_stream_s = torch.cuda.current_stream(s.device).id() == s.id()
c = torch.cat((a, b), 0).cuda()
s.synchronize()
return is_stream_s, a, b, c
model = Model()
# Script the model and save
script_model = torch.jit.script(model)
is_stream_s, a, b, c = script_model()
# Verify if the output is correct
self.assertTrue(is_stream_s)
self.assertEqual(torch.cat((a, b), 0), c)
# Save and load scripted model
load_model = self.getExportImportCopy(script_model)
is_stream_s, a_load, b_load, c_load = load_model()
self.assertTrue(is_stream_s)
self.assertEqual(torch.cat((a_load, b_load), 0), c_load)
|
pytorch-master
|
test/jit/test_cuda.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.jit_utils import JitTestCase
class TestFuserCommon(JitTestCase):
def test_autodiff_fallback(self):
for rq in [True, False]:
@torch.jit.script
def fn(x):
return torch.max(x**2.0, x**3.0)
x = torch.randn(5, requires_grad=not rq)
# cause optimization to be created
for i in range(5):
fn(x)
# test fallback when optimization is not applicable
y = fn(torch.randn(5, requires_grad=rq))
self.assertEqual(y.requires_grad, rq)
|
pytorch-master
|
test/jit/test_fuser_common.py
|
# Owner(s): ["oncall: jit"]
from torch.testing._internal.common_utils import TestCase
import torch
from torch import nn
r"""
Test TorchScript exception handling.
"""
class TestException(TestCase):
def test_pyop_exception_message(self):
class Foo(torch.jit.ScriptModule):
def __init__(self):
super(Foo, self).__init__()
self.conv = nn.Conv2d(1, 10, kernel_size=5)
@torch.jit.script_method
def forward(self, x):
return self.conv(x)
foo = Foo()
# testing that the correct error message propagates
with self.assertRaisesRegex(RuntimeError, r"Expected 3D \(unbatched\) or 4D \(batched\) input to conv2d"):
foo(torch.ones([123])) # wrong size
def test_builtin_error_messsage(self):
with self.assertRaisesRegex(RuntimeError, "Arguments for call are not valid"):
@torch.jit.script
def close_match(x):
return x.masked_fill(True)
with self.assertRaisesRegex(RuntimeError, "This op may not exist or may not be currently "
"supported in TorchScript"):
@torch.jit.script
def unknown_op(x):
torch.set_anomaly_enabled(True)
return x
def test_exceptions(self):
cu = torch.jit.CompilationUnit('''
def foo(cond):
if bool(cond):
raise ValueError(3)
return 1
''')
cu.foo(torch.tensor(0))
with self.assertRaisesRegex(torch.jit.Error, "3"):
cu.foo(torch.tensor(1))
def foo(cond):
a = 3
if bool(cond):
raise ArbitraryError(a, "hi")
if 1 == 2:
raise ArbitraryError
return a
with self.assertRaisesRegex(RuntimeError, "undefined value ArbitraryError"):
torch.jit.script(foo)
def exception_as_value():
a = Exception()
print(a)
with self.assertRaisesRegex(RuntimeError, "cannot be used as a value"):
torch.jit.script(exception_as_value)
@torch.jit.script
def foo_no_decl_always_throws():
raise RuntimeError("Hi")
# function that has no declared type but always throws set to None
output_type = next(foo_no_decl_always_throws.graph.outputs()).type()
self.assertTrue(str(output_type) == "NoneType")
@torch.jit.script
def foo_decl_always_throws():
# type: () -> Tensor
raise Exception("Hi")
output_type = next(foo_decl_always_throws.graph.outputs()).type()
self.assertTrue(str(output_type) == "Tensor")
def foo():
raise 3 + 4
with self.assertRaisesRegex(RuntimeError, "must derive from BaseException"):
torch.jit.script(foo)
# a escapes scope
@torch.jit.script
def foo():
if 1 == 1:
a = 1
else:
if 1 == 1:
raise Exception("Hi")
else:
raise Exception("Hi")
return a
self.assertEqual(foo(), 1)
@torch.jit.script
def tuple_fn():
raise RuntimeError("hello", "goodbye")
with self.assertRaisesRegex(torch.jit.Error, "hello, goodbye"):
tuple_fn()
@torch.jit.script
def no_message():
raise RuntimeError
with self.assertRaisesRegex(torch.jit.Error, "RuntimeError"):
no_message()
def test_assertions(self):
cu = torch.jit.CompilationUnit('''
def foo(cond):
assert bool(cond), "hi"
return 0
''')
cu.foo(torch.tensor(1))
with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"):
cu.foo(torch.tensor(0))
@torch.jit.script
def foo(cond):
assert bool(cond), "hi"
foo(torch.tensor(1))
# we don't currently validate the name of the exception
with self.assertRaisesRegex(torch.jit.Error, "AssertionError: hi"):
foo(torch.tensor(0))
def test_python_op_exception(self):
@torch.jit.ignore
def python_op(x):
raise Exception("bad!")
@torch.jit.script
def fn(x):
return python_op(x)
with self.assertRaisesRegex(RuntimeError, "operation failed in the TorchScript interpreter"):
fn(torch.tensor(4))
def test_dict_expansion_raises_error(self):
def fn(self):
d = {"foo": 1, "bar": 2, "baz": 3}
return {**d}
with self.assertRaisesRegex(torch.jit.frontend.NotSupportedError,
"Dict expansion "):
torch.jit.script(fn)
def test_custom_python_exception(self):
class MyValueError(ValueError):
def __init__(self, msg):
super(MyValueError, self).__init__(msg)
@torch.jit.script
def fn():
raise MyValueError("test custom exception")
with self.assertRaisesRegex(torch.jit.Error, "jit.test_exception.MyValueError: test custom exception"):
fn()
def test_custom_python_exception_defined_elsewhere(self):
from jit.myexception import MyKeyError
@torch.jit.script
def fn():
raise MyKeyError("This is a user defined key error")
with self.assertRaisesRegex(torch.jit.Error, "jit.myexception.MyKeyError: This is a user defined key error"):
fn()
|
pytorch-master
|
test/jit/test_exception.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from typing import Tuple, List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == "__main__":
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestHash(JitTestCase):
def test_hash_tuple(self):
def fn(t1: Tuple[int, int], t2: Tuple[int, int]) -> bool:
return hash(t1) == hash(t2)
self.checkScript(fn, ((1, 2), (1, 2)))
self.checkScript(fn, ((1, 2), (3, 4)))
self.checkScript(fn, ((1, 2), (2, 1)))
def test_hash_tuple_nested_unhashable_type(self):
# Tuples may contain unhashable types like `list`, check that we error
# properly in that case.
@torch.jit.script
def fn_unhashable(t1: Tuple[int, List[int]]):
return hash(t1)
with self.assertRaisesRegexWithHighlight(RuntimeError, "unhashable", "hash"):
fn_unhashable((1, [1]))
def test_hash_tensor(self):
"""Tensors should hash by identity"""
def fn(t1, t2):
return hash(t1) == hash(t2)
tensor1 = torch.tensor(1)
tensor1_clone = torch.tensor(1)
tensor2 = torch.tensor(2)
self.checkScript(fn, (tensor1, tensor1))
self.checkScript(fn, (tensor1, tensor1_clone))
self.checkScript(fn, (tensor1, tensor2))
def test_hash_none(self):
def fn():
n1 = None
n2 = None
return hash(n1) == hash(n2)
self.checkScript(fn, ())
def test_hash_bool(self):
def fn(b1: bool, b2: bool):
return hash(b1) == hash(b2)
self.checkScript(fn, (True, False))
self.checkScript(fn, (True, True))
self.checkScript(fn, (False, True))
self.checkScript(fn, (False, False))
def test_hash_float(self):
def fn(f1: float, f2: float):
return hash(f1) == hash(f2)
self.checkScript(fn, (1.2345, 1.2345))
self.checkScript(fn, (1.2345, 6.789))
self.checkScript(fn, (1.2345, float("inf")))
self.checkScript(fn, (float("inf"), float("inf")))
self.checkScript(fn, (1.2345, float('nan')))
if sys.version_info < (3, 10):
# Hash of two nans are not guaranteed to be equal. From https://docs.python.org/3/whatsnew/3.10.html :
# Hashes of NaN values of both float type and decimal.Decimal type now depend on object identity.
self.checkScript(fn, (float("nan"), float("nan")))
self.checkScript(fn, (float("nan"), float("inf")))
def test_hash_int(self):
def fn(i1: int, i2: int):
return hash(i1) == hash(i2)
self.checkScript(fn, (123, 456))
self.checkScript(fn, (123, 123))
self.checkScript(fn, (123, -123))
self.checkScript(fn, (-123, -123))
self.checkScript(fn, (123, 0))
def test_hash_string(self):
def fn(s1: str, s2: str):
return hash(s1) == hash(s2)
self.checkScript(fn, ("foo", "foo"))
self.checkScript(fn, ("foo", "bar"))
self.checkScript(fn, ("foo", ""))
def test_hash_device(self):
def fn(d1: torch.device, d2: torch.device):
return hash(d1) == hash(d2)
gpu0 = torch.device('cuda:0')
gpu1 = torch.device('cuda:1')
cpu = torch.device('cpu')
self.checkScript(fn, (gpu0, gpu0))
self.checkScript(fn, (gpu0, gpu1))
self.checkScript(fn, (gpu0, cpu))
self.checkScript(fn, (cpu, cpu))
|
pytorch-master
|
test/jit/test_hash.py
|
# Owner(s): ["oncall: jit"]
import torch
from torch.testing._internal.jit_utils import JitTestCase
from typing import List
class TestAutodiffJit(JitTestCase):
def test_undefined_tensor_lists(self):
def fn(tensor_list: List[torch.Tensor], add_tensor):
cat = torch.cat(tensor_list, dim=1)
r = torch.sin(cat + add_tensor)
return r
fn_s = torch.jit.script(fn)
a = torch.rand((3, 6), requires_grad=True)
b = torch.rand((3, 10), requires_grad=True)
x = [a, b]
y = torch.rand((3, 16), requires_grad=True)
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
ret.sum().backward()
ret = fn_s(x, y)
s = ret.sum()
# backward_fn expects 2 inputs: (grad_output, current_grad_r)
# current_grad_r is provided because we need to add this contribution
# to grad_r when we return it.
backward_fn = s.grad_fn.next_functions[0][0]
# check behavior with defined tensor
grad_out = torch.rand((3, 16))
grad_inputs = backward_fn(grad_out, None)
# expect 3 tensors: grad_y, grad_a, grad_b
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
self.assertTrue(isinstance(x, torch.Tensor))
# now test with undefined grad_out
grad_inputs = backward_fn(None, None)
# expect all of them to be None
self.assertEqual(3, len(grad_inputs))
for x in grad_inputs:
if x is not None:
self.assertEqual(0, torch.max(torch.abs(x)).item())
def test_requires_grad_outputs(self):
# outputs should require_grad only if eager outputs would require_grad.
def fn(a, b, c):
return a.relu() + b.relu(), c.relu()
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
fn_s = torch.jit.script(fn)
for i in range(4):
x, y = fn_s(a, b, c)
self.assertFalse(x.requires_grad)
self.assertTrue(y.requires_grad)
def test_requires_grad_outputs_profiled_twice(self):
# the value "r" is used twice, by gammaln and by entr, so it is profiled twice.
# So during autodiff graph formation the profile nodes are unmerged because
# they are aliasing. Then the DifferentiableGraph doesn't have a profile
# node on the output. The requires_grad info should then be added onto the
# output value (otherwise autodiff will make the output require_grad).
# Note: this relies on gammaln and entr not having autodiff implementations.
def fn(a, b, c):
r = a.relu().relu()
return torch.special.gammaln(r), torch.special.entr(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def test_requires_grad_outputs_side_effects(self):
# same as above, but also add a CallFunction in between.
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
r = a.relu().relu()
z = python_fn(r)
return torch.relu(r), torch.nn.functional.gelu(r), c.cos().relu()
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=False)
b = torch.rand((10, 10), requires_grad=False)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
def test_autodiff_requires_grad_nograd(self):
@torch.jit.ignore
def python_fn(x):
return x.relu()
def fn(a, b, c):
x = a.sin().relu()
y = python_fn(b)
with torch.no_grad():
z = x + c
return x, y, z
fn_s = torch.jit.script(fn)
a = torch.rand((10, 10), requires_grad=True)
b = torch.rand((10, 10), requires_grad=True)
c = torch.rand((10, 10), requires_grad=True)
for i in range(4):
x_s, y_s, z_s = fn_s(a, b, c)
x, y, z = fn(a, b, c)
self.assertEqual(x_s.requires_grad, x.requires_grad)
self.assertEqual(y_s.requires_grad, y.requires_grad)
self.assertEqual(z_s.requires_grad, z.requires_grad)
|
pytorch-master
|
test/jit/test_autodiff.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
from typing import Any, List, Tuple
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.testing._internal.jit_utils import JitTestCase
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
class TestModuleContainers(JitTestCase):
def test_sequential_intermediary_types(self):
class A(torch.nn.Module):
def __init__(self):
super(A, self).__init__()
def forward(self, x):
return x + 3
class B(torch.nn.Module):
def __init__(self):
super(B, self).__init__()
def forward(self, x):
return {"1": x}
class C(torch.nn.Module):
def __init__(self):
super(C, self).__init__()
self.foo = torch.nn.Sequential(A(), B())
def forward(self, x):
return self.foo(x)
self.checkModule(C(), (torch.tensor(1),))
def test_moduledict(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class Inner2(torch.nn.Module):
def forward(self, x):
return x * 2
class Inner3(torch.nn.Module):
def forward(self, x):
return (x - 4) * 3
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
modules = OrderedDict([
('one', Inner()),
('two', Inner2()),
('three', Inner3()),
])
self.moduledict = nn.ModuleDict(modules)
def forward(self, x, skip_name):
# type: (Tensor, str)
names = torch.jit.annotate(List[str], [])
values = []
for name in self.moduledict:
names.append(name)
for name, mod in self.moduledict.items():
if name != skip_name:
names.append(name)
x = mod(x)
values.append(x)
for mod in self.moduledict.values():
x = mod(x)
values.append(x)
for key in self.moduledict.keys():
names.append(key)
return x, names
class M2(M):
def __init__(self):
super(M2, self).__init__()
def forward(self, x, skip_name):
# type: (Tensor, str)
names = torch.jit.annotate(List[str], [])
values = []
x2 = x
iter = 0
for name in self.moduledict:
names.append(name)
for i, (name, mod) in enumerate(self.moduledict.items()):
iter += i
if name != skip_name:
names.append(name)
x = mod(x)
values.append(x)
for i, mod in enumerate(self.moduledict.values()):
iter += i
x = mod(x)
values.append(x)
for i, key in enumerate(self.moduledict.keys()):
iter += i
names.append(key)
for mod, mod in zip(self.moduledict.values(), self.moduledict.values()):
iter += i
x2 = mod(mod(x2))
return x, x2, names, iter
for name in ["", "one", "two", "three"]:
inp = torch.tensor(1)
self.checkModule(M(), (inp, name))
self.checkModule(M2(), (inp, name))
def test_custom_container_forward(self):
class Inner(torch.nn.Module):
def forward(self, x):
return x + 10
class CustomSequential(nn.Sequential):
def __init__(self):
super(CustomSequential, self).__init__(
nn.ReLU(), Inner())
def forward(self, x):
x = x + 3
for mod in self:
x = mod(x)
return x - 5
self.checkModule(CustomSequential(), (torch.tensor(.5),))
class CustomModuleList(nn.ModuleList):
def __init__(self):
super(CustomModuleList, self).__init__(
[nn.ReLU(), Inner()])
def forward(self, x):
x = x + 3
for mod in self:
x = mod(x)
return x - 5
self.checkModule(CustomModuleList(), (torch.tensor(.5),))
class CustomModuleDict(nn.ModuleDict):
def __init__(self):
super(CustomModuleDict, self).__init__(
OrderedDict([
('one', Inner()),
('two', nn.ReLU()),
('three', Inner()),
]))
def forward(self, x):
x = x + 3
names = torch.jit.annotate(List[str], [])
for name, mod in self.items():
x = mod(x)
names.append(name)
return names, x - 5
self.checkModule(CustomModuleDict(), (torch.tensor(.5),))
def test_script_module_list_sequential(self):
class M(torch.jit.ScriptModule):
def __init__(self, mod_list):
super(M, self).__init__()
self.mods = mod_list
@torch.jit.script_method
def forward(self, v):
for m in self.mods:
v = m(v)
return v
with torch.jit.optimized_execution(False):
m = M(nn.Sequential(nn.ReLU()))
self.assertExportImportModule(m, (torch.randn(2, 2),))
def test_script_modulelist_index(self):
class Sub(torch.nn.Module):
def __init__(self, i):
super(Sub, self).__init__()
self.i = i
def forward(self, thing):
return thing - self.i
class M(torch.nn.Module):
def __init__(self):
super(M, self).__init__()
self.mods = nn.ModuleList([Sub(i) for i in range(10)])
def forward(self, v):
v = self.mods[4].forward(v)
v = self.mods[-1].forward(v)
v = self.mods[-9].forward(v)
return v
x = torch.tensor(1)
self.checkModule(M(), (x,))
class MForward(torch.nn.Module):
def __init__(self):
super(MForward, self).__init__()
self.mods = nn.ModuleList([Sub(i) for i in range(10)])
def forward(self, v):
v = self.mods[4](v)
v = self.mods[-1](v)
v = self.mods[-9](v)
return v
self.checkModule(MForward(), (torch.tensor(1),))
class M2(M):
def __init__(self):
super(M2, self).__init__()
def forward(self, v):
return self.mods[-11].forward(v)
with self.assertRaisesRegexWithHighlight(Exception, "Index -11 out of range", "self.mods[-11]"):
torch.jit.script(M2())
class M3(M):
def __init__(self):
super(M3, self).__init__()
def forward(self, v):
i = 3
return self.mods[i].forward(v)
with self.assertRaisesRegexWithHighlight(Exception, "Enumeration is supported", "self.mods[i]"):
torch.jit.script(M3())
def test_module_interface_special_methods(self):
class CustomModuleInterface(torch.nn.Module):
def __init__(self):
super(CustomModuleInterface, self).__init__()
class CustomModuleList(CustomModuleInterface, torch.nn.ModuleList):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleList.__init__(self, modules)
class CustomSequential(CustomModuleInterface, torch.nn.Sequential):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.Sequential.__init__(self, modules)
class CustomModuleDict(CustomModuleInterface, torch.nn.ModuleDict):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleDict.__init__(self, modules)
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.modulelist = CustomModuleList([self.submod])
self.sequential = CustomSequential(self.submod)
self.moduledict = CustomModuleDict({"submod": self.submod})
def forward(self, inputs):
assert self.modulelist[0] is self.submod, "__getitem__ failing for ModuleList"
assert len(self.modulelist) == 1, "__len__ failing for ModuleList"
for module in self.modulelist:
assert module is self.submod, "__iter__ failing for ModuleList"
assert self.sequential[0] is self.submod, "__getitem__ failing for Sequential"
assert len(self.sequential) == 1, "__len__ failing for Sequential"
for module in self.sequential:
assert module is self.submod, "__iter__ failing for Sequential"
assert self.moduledict["submod"] is self.submod, "__getitem__ failing for ModuleDict"
assert len(self.moduledict) == 1, "__len__ failing for ModuleDict"
# note: unable to index moduledict with a string variable currently
i = 0
for key in self.moduledict:
i += 1
assert i == len(self.moduledict), "iteration failing for ModuleDict"
assert "submod" in self.moduledict, "__contains__ fails for ModuleDict"
for key in self.moduledict.keys():
assert key == "submod", "keys() fails for ModuleDict"
for item in self.moduledict.items():
assert item[0] == "submod", "items() fails for ModuleDict"
assert item[1] is self.submod, "items() fails for ModuleDict"
for value in self.moduledict.values():
assert value is self.submod, "values() fails for ModuleDict"
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_special_method_with_override(self):
class CustomModuleInterface(torch.nn.Module):
def __init__(self):
super(CustomModuleInterface, self).__init__()
class CustomModuleList(CustomModuleInterface, torch.nn.ModuleList):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleList.__init__(self, modules)
def __len__(self):
# this is arbitrary, just to check that the overridden py __len__ from
# CustomModuleList takes precedence over the automatically generated
# __len__ added by the jit compiler
return 2
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.modulelist = CustomModuleList([self.submod])
def forward(self, inputs):
assert len(self.modulelist) == 2, "__len__ failing for ModuleList"
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
mm = torch.jit.script(m)
def test_moduledict_getitem(self):
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
self.relu = torch.jit.script(torch.nn.ReLU())
self.tanh = torch.jit.script(torch.nn.Tanh())
self.moduledict = torch.nn.ModuleDict({"relu": self.relu,
"tanh": self.tanh})
def forward(self, input):
assert self.moduledict['relu'] is self.relu
assert self.moduledict['tanh'] is self.tanh
return input
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_moduledict_keyerror(self):
class BadModule(torch.nn.Module):
def __init__(self):
super(BadModule, self).__init__()
self.moduledict = torch.nn.ModuleDict({"foo": None,
"bar": None})
def forward(self, input):
assert self.moduledict['blah'] == "blah", "this is a keyerror"
with self.assertRaisesRegexWithHighlight(RuntimeError, "Key Error, blah", "self.moduledict['blah'"):
b = BadModule()
torch.jit.script(b)
class AnotherBadModule(torch.nn.Module):
def __init__(self):
super(AnotherBadModule, self).__init__()
self.moduledict = torch.nn.ModuleDict({"foo": None,
"bar": None})
def forward(self, input):
idx = 'blah'
assert self.moduledict[idx] == "blah", "this is a string literal error"
with self.assertRaisesRegexWithHighlight(RuntimeError, "Unable to extract string literal index. "
"ModuleDict indexing is only supported with string literals.",
"self.moduledict[idx]"):
b = AnotherBadModule()
torch.jit.script(b)
def test_normal_list_attribute_with_modules_error(self):
"""
Test that an attempt to script a module with a regular list attribute
containing other modules fails with a relevant error message.
"""
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.a = [torch.nn.ReLU(), torch.nn.ReLU()]
def forward(self):
return len(self.a)
error_msg = "Could not infer type of list element: Cannot infer concrete type of torch.nn.Module"
with self.assertRaisesRegexWithHighlight(RuntimeError, error_msg, "self.a"):
torch.jit.script(Mod())
def test_empty_dict_override_contains(self):
class CustomModuleInterface(torch.nn.Module):
def __init__(self):
super(CustomModuleInterface, self).__init__()
class CustomModuleDict(CustomModuleInterface, torch.nn.ModuleDict):
def __init__(self, modules=None):
CustomModuleInterface.__init__(self)
torch.nn.ModuleDict.__init__(self, modules)
class MyModule(torch.nn.Module):
def __init__(self):
super(MyModule, self).__init__()
# work around aliasing issue for 'is' operator by scripting ReLU up front
self.submod = torch.jit.script(torch.nn.ReLU())
self.moduledict = CustomModuleDict()
def forward(self, inputs):
assert "submod" not in self.moduledict, "__contains__ fails for ModuleDict"
return inputs
m = MyModule()
self.checkModule(m, [torch.randn(2, 2)])
def test_typed_module_dict(self):
"""
Test that a type annotation can be provided for a ModuleDict that allows
non-static indexing.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class DoesNotImplementInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.max(inp, dim=0)
# Test annotation of submodule.
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.d = torch.nn.ModuleDict({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
value: ModuleInterface = self.d[key]
return value.forward(x)
m = Mod()
self.checkModule(m, (torch.randn(2, 2), "module"))
# Test annotation of self.
class ModDict(torch.nn.ModuleDict):
def __init__(self):
super().__init__({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
submodule: ModuleInterface = self[key]
return submodule.forward(x)
m = ModDict()
self.checkModule(m, (torch.randn(2, 2), "module"))
# Test error message thrown when annotated attribute does not comply with the
# annotation.
class ModWithWrongAnnotation(torch.nn.ModuleDict):
def __init__(self):
super().__init__()
self.d = torch.nn.ModuleDict({"module": DoesNotImplementInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
submodule: ModuleInterface = self.d[key]
return submodule.forward(x)
with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute module is not of annotated type", "self.d[key]"):
torch.jit.script(ModWithWrongAnnotation())
def test_typed_module_list(self):
"""
Test that a type annotation can be provided for a ModuleList that allows
non-static indexing.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class DoesNotImplementInterface(torch.nn.Module):
def forward(self, inp: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
return torch.max(inp, dim=0)
# Test annotation of submodule.
class Mod(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.ModuleList([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
value: ModuleInterface = self.l[idx]
return value.forward(x)
m = Mod()
self.checkModule(m, (torch.randn(2, 2), 0))
# Test annotation of self.
class ModList(torch.nn.ModuleList):
def __init__(self):
super().__init__([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
submodule: ModuleInterface = self[idx]
return submodule.forward(x)
m = ModList()
self.checkModule(m, (torch.randn(2, 2), 0))
# Test error message thrown when annotated attribute does not comply with the
# annotation.
class ModWithWrongAnnotation(torch.nn.ModuleList):
def __init__(self):
super().__init__()
self.l = torch.nn.ModuleList([DoesNotImplementInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
submodule: ModuleInterface = self.l[idx]
return submodule.forward(x)
with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute 0 is not of annotated type", "self.l[idx]"):
torch.jit.script(ModWithWrongAnnotation())
def test_module_properties(self):
class ModuleWithProperties(torch.nn.Module):
__jit_unused_properties__ = ["ignored_attr"]
def __init__(self, a: int):
super().__init__()
self.a = a
def forward(self, a: int, b: int):
self.attr = a + b
return self.attr
@property
def attr(self):
return self.a
@property
def ignored_attr(self):
return sum([self.a])
@torch.jit.unused
@property
def ignored_attr_2(self):
return sum([self.a])
@ignored_attr_2.setter
def ignored_attr_2(self, value):
self.a = sum([self.a])
@attr.setter
def attr(self, a: int):
if a > 0:
self.a = a
else:
self.a = 0
class ModuleWithNoSetter(torch.nn.Module):
def __init__(self, a: int):
super().__init__()
self.a = a
def forward(self, a: int, b: int):
self.attr + a + b
@property
def attr(self):
return self.a + 1
self.checkModule(ModuleWithProperties(5), (5, 6,))
self.checkModule(ModuleWithProperties(5), (-5, -6,))
self.checkModule(ModuleWithNoSetter(5), (5, 6,))
self.checkModule(ModuleWithNoSetter(5), (-5, -6,))
mod = ModuleWithProperties(3)
scripted_mod = torch.jit.script(mod)
with self.assertRaisesRegex(AttributeError, "has no attribute"):
scripted_mod.ignored_attr
def test_module_inplace_construct(self):
class M(nn.Module):
def __init__(self, start: int):
super().__init__()
self.linear = nn.Linear(3, 3)
self.attribute = start
self.parameter = nn.Parameter(torch.tensor(3, dtype=torch.float))
def method(self) -> int:
return self.attribute
@torch.jit.unused
def unused_method(self):
return self.attribute + self.attribute
def forward(self, x):
return self.linear(self.linear(x))
class N(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(4, 4)
@torch.jit.ignore
def ignored_method(self, x):
return x
def forward(self, x):
return self.linear(x)
m = torch.jit.script(M(3))
n = torch.jit.script(N())
n._reconstruct(m._c)
inp = torch.rand((3))
# Check that both modules produce the same output.
with torch.no_grad():
m_out = m(inp)
n_out = n(inp)
self.assertEqual(m_out, n_out)
# Check that ignored method is still intact.
self.assertEqual(inp, n.ignored_method(inp))
def test_parameterlist_script_getitem(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
self.parameter_list = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(10)])
def forward(self, x):
self.module_list[0]
self.parameter_list[0]
return x
self.checkModule(MyModule(), (torch.zeros(1)))
def test_parameterlist_script_iter(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.module_list = nn.ModuleList([nn.Linear(1, 1) for _ in range(10)])
self.parameter_list = nn.ParameterList([nn.Parameter(torch.zeros(1)) for _ in range(10)])
def forward(self, x):
r = x
for i, p in enumerate(self.parameter_list):
r = r + p + i
return r
self.checkModule(MyModule(), (torch.zeros(1),))
def test_parameterdict_script_getitem(self):
class MyModule(nn.Module):
def __init__(self):
super().__init__()
self.parameter_dict = nn.ParameterDict({k: nn.Parameter(torch.zeros(1)) for k in ['a', 'b', 'c']})
def forward(self, x):
return self.parameter_dict['a'] * x + self.parameter_dict['b'] * self.parameter_dict['c']
self.checkModule(MyModule(), (torch.ones(1),))
|
pytorch-master
|
test/jit/test_module_containers.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import unittest
from torch.testing._internal.common_utils import enable_profiling_mode_for_profiling_tests, GRAPH_EXECUTOR, ProfilingMode
import torch
import torch.nn as nn
import torch.nn.functional as F
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase, RUN_CUDA
from torch.testing._internal.common_utils import slowTest, suppress_warnings
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
except RuntimeError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
class MnistNet(nn.Module):
def __init__(self):
super(MnistNet, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.reshape(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
class TestModels(JitTestCase):
@staticmethod
def _test_dcgan_models(self, device, check_export_import=True):
class DCGANGenerator(nn.Module):
def __init__(self, nz, ngf, nc):
super(DCGANGenerator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
return self.main(input)
class DCGANDiscriminator(nn.Module):
def __init__(self, nc, ndf):
super(DCGANDiscriminator, self).__init__()
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
return self.main(input).view(-1, 1).squeeze(1)
bs, nz, ngf, nc, ndf = 5, 6, 9, 3, 10
self.checkTrace(DCGANGenerator(nz, ngf, nc).to(device),
(torch.rand(bs, nz, 1, 1, device=device),),
export_import=check_export_import)
example_input = DCGANGenerator(nz, ngf, nc).to(device)(torch.rand(bs, nz, 1, 1, device=device))
self.checkTrace(DCGANDiscriminator(nc, ndf).to(device), (example_input,),
export_import=check_export_import)
def test_dcgan_models(self):
self._test_dcgan_models(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_dcgan_models_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_dcgan_models(self, device='cuda', check_export_import=False)
@staticmethod
def _test_neural_style(self, device, check_export_import=True):
class TransformerNet(torch.nn.Module):
def __init__(self):
super(TransformerNet, self).__init__()
# Initial convolution layers
self.conv1 = ConvLayer(3, 32, kernel_size=9, stride=1)
self.in1 = torch.nn.InstanceNorm2d(32, affine=True)
self.conv2 = ConvLayer(32, 64, kernel_size=3, stride=2)
self.in2 = torch.nn.InstanceNorm2d(64, affine=True)
self.conv3 = ConvLayer(64, 128, kernel_size=3, stride=2)
self.in3 = torch.nn.InstanceNorm2d(128, affine=True)
# Residual layers
self.res1 = ResidualBlock(128)
self.res2 = ResidualBlock(128)
self.res3 = ResidualBlock(128)
self.res4 = ResidualBlock(128)
self.res5 = ResidualBlock(128)
# Upsampling Layers
self.deconv1 = UpsampleConvLayer(128, 64, kernel_size=3, stride=1, upsample=2)
self.in4 = torch.nn.InstanceNorm2d(64, affine=True)
self.deconv2 = UpsampleConvLayer(64, 32, kernel_size=3, stride=1, upsample=2)
self.in5 = torch.nn.InstanceNorm2d(32, affine=True)
self.deconv3 = ConvLayer(32, 3, kernel_size=9, stride=1)
# Non-linearities
self.relu = torch.nn.ReLU()
def forward(self, X):
y = self.relu(self.in1(self.conv1(X)))
y = self.relu(self.in2(self.conv2(y)))
y = self.relu(self.in3(self.conv3(y)))
y = self.res1(y)
y = self.res2(y)
y = self.res3(y)
y = self.res4(y)
y = self.res5(y)
y = self.relu(self.in4(self.deconv1(y)))
y = self.relu(self.in5(self.deconv2(y)))
y = self.deconv3(y)
return y
class ConvLayer(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, stride):
super(ConvLayer, self).__init__()
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
out = self.reflection_pad(x)
out = self.conv2d(out)
return out
class ResidualBlock(torch.nn.Module):
"""ResidualBlock
introduced in: https://arxiv.org/abs/1512.03385
recommended architecture: http://torch.ch/blog/2016/02/04/resnets.html
"""
def __init__(self, channels):
super(ResidualBlock, self).__init__()
self.conv1 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in1 = torch.nn.InstanceNorm2d(channels, affine=True)
self.conv2 = ConvLayer(channels, channels, kernel_size=3, stride=1)
self.in2 = torch.nn.InstanceNorm2d(channels, affine=True)
self.relu = torch.nn.ReLU()
def forward(self, x):
residual = x
out = self.relu(self.in1(self.conv1(x)))
out = self.in2(self.conv2(out))
out = out + residual
return out
class UpsampleConvLayer(torch.nn.Module):
"""UpsampleConvLayer
Upsamples the input and then does a convolution. This method gives better results
compared to ConvTranspose2d.
ref: http://distill.pub/2016/deconv-checkerboard/
"""
def __init__(self, in_channels, out_channels, kernel_size, stride, upsample=None):
super(UpsampleConvLayer, self).__init__()
self.upsample = upsample
if upsample:
self.upsample_layer = torch.nn.Upsample(mode='nearest', scale_factor=upsample)
reflection_padding = kernel_size // 2
self.reflection_pad = torch.nn.ReflectionPad2d(reflection_padding)
self.conv2d = torch.nn.Conv2d(in_channels, out_channels, kernel_size, stride)
def forward(self, x):
x_in = x
if self.upsample:
x_in = self.upsample_layer(x_in)
out = self.reflection_pad(x_in)
out = self.conv2d(out)
return out
self.checkTrace(TransformerNet(), (torch.rand(5, 3, 16, 16),), export_import=check_export_import)
@slowTest
def test_neural_style(self):
self._test_neural_style(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_neural_style_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_neural_style(self, device='cuda', check_export_import=False)
@unittest.skipIf(GRAPH_EXECUTOR == ProfilingMode.LEGACY, "Bug found in deprecated executor")
@staticmethod
def _test_mnist(self, device, check_export_import=True):
# eval() is present because dropout makes this nondeterministic
with enable_profiling_mode_for_profiling_tests():
self.checkTrace(MnistNet().to(device).eval(), (torch.rand(5, 1, 28, 28, device=device),),
export_import=check_export_import)
def test_mnist(self):
self._test_mnist(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_mnist_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_mnist(self, device='cuda', check_export_import=False)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_mnist_training_leaks_no_memory_cuda(self):
net = MnistNet().cuda()
# MnistNet uses dropout, don't check its trace
traced_net = torch.jit.trace(net, [torch.randn(5, 1, 28, 28, device='cuda')],
check_trace=False)
def train(iters):
for _ in range(iters):
# Get some fake data
inp = torch.randn(5, 1, 28, 28, device='cuda')
out = traced_net(inp)
# Here's some fake loss
out.sum().backward()
# Zero out grads
traced_net.zero_grad()
# Set it up so the params have .grad fields so they are not reported as leaks
train(1)
with self.assertLeaksNoCudaTensors():
train(5)
@staticmethod
def _test_reinforcement_learning(self, device, test_export_import=True):
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.affine1 = nn.Linear(4, 128)
self.affine2 = nn.Linear(128, 2)
def forward(self, x):
x = F.relu(self.affine1(x))
action_scores = self.affine2(x)
return F.softmax(action_scores, dim=1)
with enable_profiling_mode_for_profiling_tests():
self.checkTrace(Policy().to(device), (torch.rand(1, 4, device=device),),
export_import=test_export_import)
def test_reinforcement_learning(self):
self._test_reinforcement_learning(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_reinforcement_learning_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_reinforcement_learning(self, device='cuda', test_export_import=False)
@staticmethod
def _test_snli(self, device, check_export_import=True, quantized=False):
class Bottle(nn.Module):
def forward(self, input):
if len(input.size()) <= 2:
return super(Bottle, self).forward(input)
size = input.size()[:2]
out = super(Bottle, self).forward(input.view(size[0] * size[1], -1))
return out.view(size[0], size[1], -1)
class Linear(Bottle, nn.Linear):
pass
class Encoder(nn.Module):
def __init__(self, config):
super(Encoder, self).__init__()
self.config = config
input_size = config.d_proj if config.projection else config.d_embed
dropout = 0 if config.n_layers == 1 else config.dp_ratio
self.rnn = nn.LSTM(input_size=input_size, hidden_size=config.d_hidden,
num_layers=config.n_layers, dropout=dropout,
bidirectional=config.birnn)
def forward(self, inputs):
batch_size = inputs.size()[1]
state_shape = self.config.n_cells, batch_size, self.config.d_hidden
h0 = c0 = inputs.new_zeros(state_shape)
outputs, (ht, ct) = self.rnn(inputs, (h0, c0))
return ht[-1] if not self.config.birnn else ht[-2:].transpose(0, 1).contiguous().view(batch_size, -1)
class SNLIClassifier(nn.Module):
def __init__(self, config):
super(SNLIClassifier, self).__init__()
self.config = config
self.embed = nn.Embedding(config.n_embed, config.d_embed)
self.projection = Linear(config.d_embed, config.d_proj)
self.encoder = Encoder(config)
self.dropout = nn.Dropout(p=config.dp_ratio)
self.relu = nn.ReLU()
seq_in_size = 2 * config.d_hidden
if self.config.birnn:
seq_in_size *= 2
lin_config = [seq_in_size] * 2
self.out = nn.Sequential(
Linear(*lin_config),
self.relu,
self.dropout,
Linear(*lin_config),
self.relu,
self.dropout,
Linear(*lin_config),
self.relu,
self.dropout,
Linear(seq_in_size, config.d_out))
def forward(self, premise, hypothesis):
prem_embed = self.embed(premise)
hypo_embed = self.embed(hypothesis)
if self.config.fix_emb:
prem_embed = prem_embed.detach()
hypo_embed = hypo_embed.detach()
if self.config.projection:
prem_embed = self.relu(self.projection(prem_embed))
hypo_embed = self.relu(self.projection(hypo_embed))
premise = self.encoder(prem_embed)
hypothesis = self.encoder(hypo_embed)
scores = self.out(torch.cat([premise, hypothesis], 1))
return scores
class Config:
n_embed = 100
d_embed = 100
d_proj = 300
dp_ratio = 0.0 # For deterministic testing TODO: change by fixing seed in checkTrace?
d_hidden = 30
birnn = True
d_out = 300
fix_emb = True
projection = True
n_layers = 2
n_cells = 4 # 2 * n_layers because birnn = True
premise = torch.LongTensor(48, 64).random_(0, 100).to(device)
hypothesis = torch.LongTensor(24, 64).random_(0, 100).to(device)
if quantized:
snli = SNLIClassifier(Config()).cpu()
torch.jit.quantized.quantize_linear_modules(snli)
# we don't do export/import checks because we would need to call
# _pack/_unpack
self.checkTrace(snli, (premise, hypothesis), inputs_require_grads=False,
export_import=False)
else:
self.checkTrace(SNLIClassifier(Config()).to(device), (premise, hypothesis),
inputs_require_grads=False, export_import=check_export_import)
@slowTest
def test_snli(self):
self._test_snli(self, device='cpu')
@skipIfNoFBGEMM
# Suppression: this exercises a deprecated API
@suppress_warnings
def test_snli_quantized(self):
self._test_snli(self, device='cpu', quantized=True)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_snli_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_snli(self, device='cuda', check_export_import=False)
@staticmethod
def _test_super_resolution(self, device, check_export_import=True):
class Net(nn.Module):
def __init__(self, upscale_factor):
super(Net, self).__init__()
self.relu = nn.ReLU()
self.conv1 = nn.Conv2d(1, 64, (5, 5), (1, 1), (2, 2))
self.conv2 = nn.Conv2d(64, 64, (3, 3), (1, 1), (1, 1))
self.conv3 = nn.Conv2d(64, 32, (3, 3), (1, 1), (1, 1))
self.conv4 = nn.Conv2d(32, upscale_factor ** 2, (3, 3), (1, 1), (1, 1))
self.pixel_shuffle = nn.PixelShuffle(upscale_factor)
def forward(self, x):
x = self.relu(self.conv1(x))
x = self.relu(self.conv2(x))
x = self.relu(self.conv3(x))
x = self.pixel_shuffle(self.conv4(x))
return x
net = Net(upscale_factor=4).to(device)
self.checkTrace(net, (torch.rand(5, 1, 32, 32, device=device),),
export_import=check_export_import)
@slowTest
def test_super_resolution(self):
self._test_super_resolution(self, device='cpu')
@unittest.skipIf(not RUN_CUDA, 'no CUDA')
def test_super_resolution_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_super_resolution(self, device='cuda', check_export_import=False)
@suppress_warnings
def test_time_sequence_prediction(self):
class Sequence(torch.jit.ScriptModule):
def __init__(self):
super(Sequence, self).__init__()
self.lstm1 = nn.LSTMCell(1, 51)
self.lstm2 = nn.LSTMCell(51, 51)
self.linear = nn.Linear(51, 1)
@torch.jit.script_method
def forward(self, input):
# TODO: add future as input with default val
# see https://github.com/pytorch/pytorch/issues/8724
outputs = torch.empty((3, 0))
h_t = torch.zeros((3, 51))
c_t = torch.zeros((3, 51))
h_t2 = torch.zeros((3, 51))
c_t2 = torch.zeros((3, 51))
output = torch.zeros([3, 51])
future = 2
# TODO: chunk call should appear as the for loop iterable
# We hard-code it to 4 for now.
a, b, c, d = input.chunk(input.size(1), dim=1)
for input_t in (a, b, c, d):
h_t, c_t = self.lstm1(input_t, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs = torch.cat((outputs, output), 1)
for _ in range(future): # if we should predict the future
h_t, c_t = self.lstm1(output, (h_t, c_t))
h_t2, c_t2 = self.lstm2(h_t, (h_t2, c_t2))
output = self.linear(h_t2)
outputs = torch.cat((outputs, output), 1)
return outputs
class Traced(nn.Module):
def __init__(self):
super(Traced, self).__init__()
self.seq = Sequence()
def forward(self, input):
return self.seq.forward(input)
# disabled due to a jitter issues that will be fixed by using load/store in the compiler
with torch._jit_internal._disable_emit_hooks():
# TODO: toggle export_import once above issues are fixed
self.checkTrace(Traced(), (torch.rand(3, 4),),
export_import=False)
@staticmethod
def _test_vae(self, device, check_export_import=True, quantized=False):
class VAE(nn.Module):
def __init__(self):
super(VAE, self).__init__()
self.fc1 = nn.Linear(784, 400)
self.fc21 = nn.Linear(400, 20)
self.fc22 = nn.Linear(400, 20)
self.fc3 = nn.Linear(20, 400)
self.fc4 = nn.Linear(400, 784)
def encode(self, x):
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def reparameterize(self, mu, logvar):
if self.training:
std = torch.exp(0.5 * logvar)
eps = torch.randn_like(std)
return eps.mul(std).add_(mu)
else:
return mu
def decode(self, z):
h3 = F.relu(self.fc3(z))
return torch.sigmoid(self.fc4(h3))
def forward(self, x):
mu, logvar = self.encode(x.view(-1, 784))
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar
if quantized:
vae = VAE().to(device).eval()
torch.jit.quantized.quantize_linear_modules(vae)
# We don't do export/import checks because we would need to call
# _unpack and _pack
self.checkTrace(vae, (torch.rand(128, 1, 28, 28, device=device),),
export_import=False, allow_unused=True,
inputs_require_grads=False)
else:
with enable_profiling_mode_for_profiling_tests():
# eval() is present because randn_like makes this nondeterministic
self.checkTrace(VAE().to(device).eval(), (torch.rand(128, 1, 28, 28, device=device),),
export_import=check_export_import)
def test_vae(self):
self._test_vae(self, device='cpu')
@skipIfNoFBGEMM
# Suppression: this exercises a deprecated API
@suppress_warnings
def test_vae_quantized(self):
self._test_vae(self, device='cpu', quantized=True)
@unittest.skipIf(not RUN_CUDA, "no CUDA")
def test_vae_cuda(self):
# XXX: export_import on CUDA modules doesn't work (#11480)
self._test_vae(self, device='cuda', check_export_import=False)
@slowTest
@skipIfNoTorchVision
def test_script_module_trace_resnet18(self):
x = torch.ones(1, 3, 224, 224)
m_orig = torch.jit.trace(torchvision.models.resnet18(), torch.ones(1, 3, 224, 224))
m_import = self.getExportImportCopy(m_orig)
input = torch.randn(1, 3, 224, 224, requires_grad=True)
output_orig = m_orig(input)
output_orig.sum().backward()
grad_orig = input.grad.clone()
input.grad.zero_()
output_import = m_import(input)
output_import.sum().backward()
grad_import = input.grad.clone()
self.assertEqual(output_orig, output_import)
self.assertEqual(grad_orig, grad_import)
@slowTest
@skipIfNoTorchVision
def test_script_module_script_resnet(self):
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(torch.jit.ScriptModule):
expansion = 1
__constants__ = ['downsample']
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
@torch.jit.script_method
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(torch.jit.ScriptModule):
__constants__ = ['layer1', 'layer2', 'layer3', 'layer4']
def __init__(self, block, layers, num_classes=1000):
super(ResNet, self).__init__()
self.inplanes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
@torch.jit.script_method
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
resnet18 = ResNet(BasicBlock, [2, 2, 2, 2])
resnet18_imported = self.getExportImportCopy(resnet18)
input = torch.randn(1, 3, 224, 224, requires_grad=True)
output_orig = resnet18(input)
output_orig.sum().backward()
grad_orig = input.grad.clone()
input.grad.zero_()
output_import = resnet18_imported(input)
output_import.sum().backward()
grad_import = input.grad.clone()
self.assertEqual(output_orig, output_import)
self.assertEqual(grad_orig, grad_import)
@skipIfNoTorchVision
def test_alexnet(self):
x = torch.ones(1, 3, 224, 224)
model = torchvision.models.AlexNet()
with torch.random.fork_rng(devices=[]):
g, outputs, inputs = torch.jit._get_trace_graph(model, x, return_inputs=True)
self.run_pass('cse', g)
m = self.createFunctionFromGraph(g)
with torch.random.fork_rng(devices=[]):
self.assertEqual(outputs, m(*inputs))
|
pytorch-master
|
test/jit/test_models.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from torch._C import parse_ir
from torch.testing import FileCheck
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# Tests that Python slice class is supported in TorchScript
class TestIgnorableArgs(JitTestCase):
def test_slice_ignorable_args_for_slice(self):
graph_str = """graph():
%13 : int = prim::Constant[value=0]()
%10 : bool = prim::Constant[value=0]()
%8 : NoneType = prim::Constant()
%0 : int = prim::Constant[value=1]()
%1 : int = prim::Constant[value=2]()
%2 : int = prim::Constant[value=3]()
%3 : int = prim::Constant[value=4]()
%4 : int = prim::Constant[value=9]()
%5 : int[] = prim::ListConstruct(%0, %1, %2, %3, %4, %4)
%6 : int[] = prim::ListConstruct(%0, %1, %2, %3, %4, %4)
%7 : int[][] = prim::ListConstruct(%5, %6)
%val.1 : Tensor = aten::tensor(%7, %8, %8, %10)
%16 : Tensor = aten::slice(%val.1, %13, %1, %8, %0)
%20 : Tensor = aten::slice(%16, %0, %8, %0, %0)
return (%20)"""
graph = parse_ir(graph_str)
function = self.createFunctionFromGraph(graph)
function_copy = self.getExportImportCopy(function)
src = str(function.code)
# For a signature:
# aten::slice(Tensor self, int dim, int start, int end, int step) -> Tensor
# We ignore trailing arguments after start=2 for dim 0
# and after end=1 for dim 1
# because in %16, %15 and %0 are default values for the schema.
FileCheck().check("torch.slice(torch.slice(torch.tensor(_0), 0, 2), 1, None, 1)").run(src)
self.assertEqual(function(), function_copy())
def test_add_out_ignorable_args(self):
@torch.jit.script
def fn(x: torch.Tensor, y: torch.Tensor):
torch.add(x, y, out=y)
FileCheck().check("torch.add(x, y, out=y)").run(fn.code)
|
pytorch-master
|
test/jit/test_ignorable_args.py
|
# Owner(s): ["oncall: jit"]
import io
import unittest
from itertools import product
from typing import Any
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.jit._recursive import wrap_cpp_module
from torch.testing import FileCheck
from torch.testing._internal.common_quantization import skipIfNoFBGEMM
from torch.testing._internal.common_quantized import override_quantized_engine
from torch.testing._internal.common_utils import set_default_dtype, skipCUDAMemoryLeakCheckIf
from torch.testing._internal.jit_utils import JitTestCase
from torch.utils import mkldnn as mkldnn_utils
try:
import torchvision
HAS_TORCHVISION = True
except ImportError:
HAS_TORCHVISION = False
skipIfNoTorchVision = unittest.skipIf(not HAS_TORCHVISION, "no torchvision")
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
TEST_CUDA = torch.cuda.is_available()
TEST_ROCM = torch.cuda.is_available() and torch.version.hip is not None
TEST_CUDNN = False
if TEST_CUDA and not TEST_ROCM: # Skip ROCM
torch.ones(1).cuda() # initialize cuda context
TEST_CUDNN = TEST_CUDA and torch.backends.cudnn.is_acceptable(torch.tensor(1., device=torch.device('cuda:0')))
def removeExceptions(graph):
for n in graph.findAllNodes('prim::RaiseException'):
n.destroy()
class TestFreezing(JitTestCase):
def test_freeze_module(self):
class M(nn.Module):
def __init__(self):
super(M, self).__init__()
self.a = 1 # folded
self.b = 1.2 # folded
self.c = "hello" # folded
self.c2 = "hi\xA1" # not folded
self.d = [1, 1] # folded
self.e = [1.0, 1.1] # folded
self.f = ["hello", "world"] # folded
self.f2 = [(1, "Over \u0e55\u0e57 57")]
self.g = ([1, 2], 3.2, "4.4", torch.tensor([5.5], requires_grad=True)) # folded
self.h = {"layer" : [torch.tensor([7.7], requires_grad=True)]}
self.h2 = {"layer\xB1" : [torch.tensor([8.8], requires_grad=True)]}
self.t = torch.tensor([1.2, 2.4], requires_grad=True) # folded
self.ts = [torch.tensor([1.0, 2.0], requires_grad=True), torch.tensor([3.0, 4.0], requires_grad=True)] # folded
self.tt = [[torch.tensor([3.3, 2.3], requires_grad=True), None]]
def forward(self, x):
return str(self.a) + str(self.b) + self.c + self.c2 + str(self.d) + \
str(self.e) + str(self.f) + str(self.f2) + str(self.g) + \
str(self.h) + str(self.h2) + str(self.t) + str(self.ts) + str(self.tt)
m = torch.jit.script(M())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
m._c = torch._C._freeze_module(m._c)
buffer = io.BytesIO()
torch.jit.save(m._c, buffer)
buffer.seek(0)
m2 = torch.jit.load(buffer)
# Check if frozen module looks as below:
# module m {
# attributes {
# tt = ...
# }
# ...
# }
self.assertFalse(m2._c.hasattr('a'))
self.assertFalse(m2._c.hasattr('b'))
self.assertFalse(m2._c.hasattr('c'))
self.assertFalse(m2._c.hasattr('c2'))
self.assertFalse(m2._c.hasattr('d'))
self.assertFalse(m2._c.hasattr('e'))
self.assertFalse(m2._c.hasattr('f'))
self.assertFalse(m2._c.hasattr('f2'))
self.assertFalse(m2._c.hasattr('g'))
self.assertFalse(m2._c.hasattr('h'))
self.assertFalse(m2._c.hasattr('h2'))
self.assertFalse(m2._c.hasattr('t'))
self.assertFalse(m2._c.hasattr('ts'))
self.assertFalse(m2._c.hasattr('tt'))
output_f = m2.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_submodule(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.a = 12
self.b = 2
def forward(self, x):
self.b = 30
return self.a + self.b
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule()
self.sub2 = SubModule2()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self.sub1(x) + self.a + self.b + self.sub2(x)
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch.jit.freeze(m)
# Check if frozen module looks as below:
# module m {
# attributes {
# sub2 = ...
# b =
# }
# ...
# submodule {
# module m {
# attributes {
# sub2 = ...
# b =
# }
# ...
# }
# }
# }
mf = mf._c
self.assertFalse(mf.hasattr('sub1'))
self.assertFalse(mf.hasattr('a'))
self.assertTrue(mf.hasattr('b'))
self.assertTrue(mf.hasattr('sub2'))
self.assertTrue(mf.sub2.hasattr('b')) # verify b is preserved in sub2
self.assertFalse(mf.sub2.hasattr('a')) # verify a is removed in sub2
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
return self.a * self.b + x
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub = SubModule()
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(20, 20)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# }
# ...
# submodule {
# }
# }
self.assertFalse(mf.hasattr('a'))
self.assertFalse(mf.hasattr('b'))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_nested_fork(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
return self.a * self.b + x
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.sub = SubModule()
self.c = torch.ones(20, 20)
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
return y_hat + y + self.c
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub = SubModule2()
self.d = 1
def forward(self, x):
fut = torch.jit._fork(self.sub.forward, x)
y_hat = self.sub(x)
y = torch.jit._wait(fut)
self.d = 2
return y_hat * y + self.d
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(20, 20)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# }
# ...
# submodule {
# }
# }
self.assertFalse(mf.hasattr('a'))
self.assertFalse(mf.hasattr('b'))
self.assertFalse(mf.hasattr('c'))
self.assertTrue(mf.hasattr('d'))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork2(self):
@torch.jit.script
def foo(x):
return x * 2
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
def forward(self, x):
fut = torch.jit._fork(foo, self.a)
y_hat = foo(self.b)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# self.a = ...
# self.b = ..
# }
# ...
# submodule {
# }
# }
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. both 'a' and 'b' are preserved.
self.assertTrue(mf.hasattr('a'))
self.assertFalse(mf.hasattr('b'))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_fork_calling_module_method(self):
@torch.jit.script
def foo(x, y):
return x * y
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.a = torch.ones(20, 20)
self.b = torch.ones(20, 20)
@torch.jit.export
def foo(self, x):
return x * self.a
@torch.jit.export
def bar(self, x):
return x * self.b
def forward(self, x):
fut = torch.jit._fork(self.foo, self.b)
y_hat = self.bar(self.a)
y = torch.jit._wait(fut)
return y_hat + y
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Check if frozen module looks as below:
# module m {
# attributes {
# self.b = ..
# }
# ...
# TODO: Although there are no mutation, the alias analysis
# conservatively assumes there is a mutation because attributes are
# passed to fork subgraph. 'b' is preserved.
self.assertFalse(mf.hasattr('a'))
self.assertTrue(mf.hasattr('b'))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_sharedclasstype(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self. b
@torch.jit.export
def modify_b(self, x):
self.b[0] += 20
return self.a
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.sub = SubModule()
self.b = torch.tensor([3.3])
def forward(self, x):
y = self.sub.modify_b(x)
return y + self.b
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule() # sub1 and sub2.sub shared same class type.
self.sub2 = SubModule2()
self.a = torch.tensor([4.4])
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z + self.a
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
output_s = m.forward(input)
mf = torch._C._freeze_module(m._c)
# Checking if Frozen module looks as below
# module mf {
# attributes {
# sub1 = ...
# sub2 = ...
# }
# ...
# submodules {
# module sub1 {
# attributes {
# a = ...
# b = ...
# }
# ...
# }
# module sub2 {
# attributes {
# sub = ...
# }
# ...
# submodule {
# module sub {
# attributes {
# a = ...
# b = ...
# }
# ...
# }
# }
# }
# }
# }
self.assertTrue(mf.hasattr('sub1'))
self.assertTrue(mf.sub1.hasattr('a'))
self.assertTrue(mf.sub1.hasattr('b'))
self.assertFalse(mf.hasattr('a'))
self.assertTrue(mf.hasattr('sub2'))
self.assertTrue(mf.sub2.hasattr('sub'))
self.assertFalse(mf.sub2.hasattr('b'))
self.assertTrue(mf.sub2.sub.hasattr('a'))
self.assertTrue(mf.sub2.sub.hasattr('b'))
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_nestedaliasing(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] = 10
return self. b
@torch.jit.export
def modify_b(self, x):
self.b[0] = 20
return self.a
Sub = SubModule()
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.sub = Sub # aliasing
def forward(self, x):
return self.sub.a
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = Sub # aliasing
self.sub2 = SubModule2()
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z
m = torch.jit.script(TestModule())
m.eval()
mf = torch._C._freeze_module(m._c)
self.assertTrue(mf.hasattr('sub1'))
self.assertTrue(mf.sub1.hasattr('a'))
self.assertFalse(mf.sub1.hasattr('b'))
self.assertTrue(mf.hasattr('sub2'))
self.assertTrue(mf.sub2.hasattr('sub'))
self.assertTrue(mf.sub2.sub.hasattr('a')) # Freezing detects that self.sub2.sub.a and self.sub1.a are alias
self.assertFalse(mf.sub2.sub.hasattr('b'))
input = torch.randn(2, 2)
output_s = m.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
# FIXME: JIT is not honoring aliasing. 'Sub' module is copied. As a result
# Eager and Script modules produce different output.
def test_freeze_module_with_nestedaliasingscalar(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 1.1
self.b = 2.2
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a = 10.0
return self. b
@torch.jit.export
def modify_b(self, x):
self.b = 20.0
return self.a
Sub = SubModule()
class SubModule2(nn.Module):
def __init__(self):
super(SubModule2, self).__init__()
self.sub = Sub # aliasing
def forward(self, x):
return self.sub.a
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = Sub # aliasing
self.sub2 = SubModule2()
def forward(self, x):
z = self.sub1.modify_a(x)
return self.sub2(x) + z
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c)
self.assertTrue(mf.hasattr('sub1'))
self.assertTrue(mf.sub1.hasattr('a'))
self.assertFalse(mf.sub1.hasattr('b'))
# sub2 is fully folded becasue self.sub1 and self.sub2.sub are not alias (Scripting bug)
self.assertFalse(mf.hasattr('sub2'))
input = torch.randn(2, 2)
output = m.forward(input)
output_s = ms.forward(input)
output_f = mf.forward(input)
# Should be equal
self.assertNotEqual(output, output_s)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_preserve_sub_module(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.tensor([1.1])
self.b = 2.2
def forward(self, x):
return self.a
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule() # aliasing
self.sub2 = SubModule()
def forward(self, x):
return self.sub2(x) + self.sub1(x)
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c, ["sub1"])
# Test that 'sub1' is preserved entirely and 'sub2' is completely folded
self.assertTrue(mf.hasattr('sub1'))
self.assertTrue(mf.sub1.hasattr('a'))
self.assertTrue(mf.sub1.hasattr('b'))
self.assertFalse(mf.hasattr('sub2'))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_preserve_sub_module_and_mutation(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = torch.tensor([1.1])
self.b = 2.2
def forward(self, x):
self.a[0] = 3.3
return self.a
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub1 = SubModule() # aliasing
self.sub2 = SubModule()
def forward(self, x):
return self.sub2(x) + self.sub1(x)
m = TestModule()
ms = torch.jit.script(m)
ms.eval()
mf = torch._C._freeze_module(ms._c, ["sub1"])
# Test that be both sub1 and sub1 are preserved and 'b' is preserved
# even if it is not used. To fulfill user request to preserve 'sub1'
self.assertTrue(mf.hasattr('sub1'))
self.assertTrue(mf.sub1.hasattr('a'))
self.assertTrue(mf.sub1.hasattr('b'))
self.assertTrue(mf.hasattr('sub2'))
self.assertTrue(mf.sub2.hasattr('a'))
self.assertTrue(mf.sub2.hasattr('b'))
input = torch.randn(2, 2)
output_s = ms.forward(input)
output_f = mf.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_module_with_helperfunction(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 11
self.b = 2
def forward(self, x):
return self.a + self.b
class TestModule(nn.Module):
def __init__(self):
super(TestModule, self).__init__()
self.sub = SubModule()
self.a = 3
self.b = 4
def forward(self, x):
self.b = 20
return self._forward(x) + self.a + self.b
def _forward(self, x):
return self.sub(x)
m = torch.jit.script(TestModule())
m.eval()
input = torch.randn(2, 2)
mf = torch._C._freeze_module(m._c)
self.assertFalse(mf.hasattr('sub'))
self.assertFalse(mf.hasattr('a'))
self.assertTrue(mf.hasattr('b'))
with self.assertRaisesRegex(AttributeError, "TestModule \(.*\) does not have a field with name '_forward'"): # noqa: W605
mf._forward(x)
def test_freeze_module_with_inplace_mutable(self):
class FreezeMe(torch.jit.ScriptModule):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [11, 22]
@torch.jit.script_method
def forward(self, x):
for i in range(3):
self.a.append(i)
return self.a
m = FreezeMe()
m.eval()
m_f = torch._C._freeze_module(m._c)
self.assertTrue(m_f.hasattr('a'))
m.forward(torch.tensor([3]))
out = m_f.forward(torch.tensor([5]))
expected = [11, 22, 0, 1, 2, 0, 1, 2]
self.assertEqual(out, expected)
# Mutable attributes
def test_freeze_module_with_mutable_list(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [1, 2]
def forward(self, x):
return self.a
m = FreezeMe()
m.eval()
m.a.append(3)
m_s = torch.jit.script(m)
v = m_s.a
v.append(4)
m_s.a = v
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# Post-freezing mutating m_s.a does not affect m_f (m_f has its own copy).
v = m_s.a
v.append(5)
m_s.a = v
self.assertFalse(m_f.hasattr('a'))
out = m_f.forward(torch.tensor([5]))
expected = [1, 2, 3, 4]
self.assertEqual(out, expected)
def test_freeze_module_with_mutable_dict(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = {"layer" : "4"}
def forward(self, x):
return self.a
@torch.jit.export
def modify_a(self, x):
self.a["layer"] = self.a["layer"] + "1"
return self.a
m = FreezeMe()
m.eval()
m.a["layer2"] = "3"
m_s = torch.jit.script(m)
t = torch.tensor(5)
m_s.modify_a(t)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
m.a["layer2"] += "2"
m_s.modify_a(t)
self.assertFalse(m_f.hasattr('a'))
out = m_f.forward(t)
expected = {"layer" : "411", "layer2" : "3"}
self.assertEqual(out, expected)
def test_freeze_module_with_mutable_tensor(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1., 2., 3.])
def forward(self, x):
return self.a
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.a[1] += 3.0
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# Post-freezing tensor attribute mutations affect m_f.
# FIXME: deep copy all folded attributes so that m_f has full ownership.
m_s.a[0] += 5.0
self.assertFalse(m_f.hasattr('a'))
out = m_f.forward(torch.tensor([5]))
expected = [6., 5., 3.]
self.assertEqual(out, expected)
def test_freeze_module_with_tuple(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = (torch.tensor([1, 2, 3, 4, 5, 6]), "hi")
def forward(self, x):
if (x[0] == 2.0):
self.a[0][0] = 10
return self.a[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([2.0])
expected = m_s.forward(inp)
m_s.a[0][0] = 1
m_f = torch._C._freeze_module(m_s._c)
self.assertFalse(m_f.hasattr('a'))
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_tensor(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
def forward(self, x):
x = self.a.view(2, 3)
x[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('a'))
m_f.a[0] -= 10
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_list(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [torch.tensor([1, 2, 3, 4, 5, 6])]
def forward(self, x):
self.a[0][1] += 10
return self.a[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_s.a[0][1] -= 10
m_f = torch._C._freeze_module(m_s._c)
self.assertFalse(m_f.hasattr('a'))
out = m_f.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = self.a.view(2, 3)
def forward(self, x):
self.b[1] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('a'))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = torch.tensor(51) # 1+2+3+14+15+16
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr2(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = {"layer" : ([self.a.view(2, 3), torch.tensor([10])], 20)}
self.c = ([self.a.view(2, 3), torch.tensor([10])], 20)
self.d = (self.a.view(2, 3), 20)
def forward(self, x):
self.d[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_tensor_attr3(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = [self.a, torch.tensor([10])]
def forward(self, x):
self.a[1] += 10
return self.b[0].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('a'))
self.assertTrue(m_f.hasattr('b'))
out = m_f.forward(inp)
expected += 10 # account for self.a += 10.
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_tensor_attr4(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1, 2, 3, 4, 5, 6])
self.b = [self.a, torch.tensor([10])]
def forward(self, x):
self.b[0][0] += 10
return self.a.sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
m_s.a[0] -= 10
with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_overlapping_attrs(self):
a = torch.tensor([1, 2, 3, 4, 5, 6])
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.b = [a.view(3, 2), torch.tensor([10])]
self.c = (20, a.view(2, 3))
def forward(self, x):
self.b[0][0] += 10
return self.c[1].sum()
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
inp = torch.tensor([5])
expected = m_s.forward(inp)
a[0] -= 10
with self.assertRaisesRegex(RuntimeError, "module contains attributes values that overlaps"):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_with_aliased_attr(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = self.a
self.c = (self.a, 10)
def forward(self, x):
self.b[1] += 10
return str(self.a) + str(self.c)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
# FIXME: It should be assertTrue. Currently scripting is making a copy for setting self.b (see #33034)
self.assertFalse(m_f.hasattr('a'))
self.assertFalse(m_f.hasattr('c'))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m_s.forward(inp)
self.assertEqual(out, expected)
# Check attribute a is preserved. Alias analysis detects that 'a' has output writers.
# In this example, 'a' is not mutated. However, we do not track which sub
# values of a composite ivalue is mutated.
def test_freeze_module_with_aliased_attr2(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = ([11], [10])
def forward(self, x):
v = self.a
self.b = (v, [12])
v2 = self.b[1]
v2.append(7)
return str(v) + str(v2)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('a'))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_with_aliased_attr3(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = [1, 2, 3, 4, 5, 6]
self.b = ([11], [10])
def forward(self, x):
v = self.a
v2 = (v, [12])
v3 = v2[0]
v3.append(7)
return str(self.a)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('a'))
inp = torch.tensor([5])
out = m_f.forward(inp)
expected = m.forward(inp)
self.assertEqual(out, expected)
def test_freeze_module_return_self(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.a = torch.tensor([1., 2., 3.])
def forward(self, x):
return self
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
with self.assertRaisesRegex(RuntimeError, "attempted to freeze a module that return itself"):
m_f = torch._C._freeze_module(m_s._c)
def test_freeze_module_inlining(self):
@torch.jit.script # noqa: B903
class Obj(object): # noqa: B903
def __init__(self, x: int, y: int):
self.x = x
self.y = y
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.obj = Obj(2, 3)
def forward(self, i: int):
print(self.obj)
return i
mod = torch.jit.freeze(torch.jit.script(Mod().eval()))
obj = mod.graph.findNode("prim::Constant")
self.assertTrue(torch._C._jit_object_is_non_holding(obj))
buffer = io.BytesIO()
torch.jit.save(mod, buffer)
buffer.seek(0)
loaded = torch.jit.load(buffer)
obj = mod.graph.findNode("prim::Constant")
self.assertTrue(torch._C._jit_object_is_non_holding(obj))
def test_freeze_module_return_sub_module(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
def forward(self, x):
return self.conv1
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c)
self.assertTrue(m_f.hasattr('conv1'))
def test_freeze_module_no_forward(self):
class FreezeMe(nn.Module):
def __init__(self):
super(FreezeMe, self).__init__()
self.lin = nn.Linear(10, 1)
@torch.jit.export
def foo(self, x):
return self.lin(x)
m = FreezeMe()
m_s = torch.jit.script(m)
m_s.eval()
m_f = torch._C._freeze_module(m_s._c, preservedAttrs=['foo'])
input = torch.ones(10)
self.assertEqual(m_s.foo(input), m_f.foo(input))
def test_freeze_module_in_training_mode(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout2d(0.25)
self.dropout2 = nn.Dropout2d(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)
def forward(self, x):
x = self.conv1(x)
x = nn.functional.relu(x)
x = self.conv2(x)
x = nn.functional.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = nn.functional.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = nn.functional.log_softmax(x, dim=1)
return output
model = torch.jit.script(Net())
model.train()
mTrain_freezed = torch._C._freeze_module(model._c)
# verify mTrain_freezed looks exactly as:
# module {
# attributes {
# conv1 = ...
# conv2 = ...
# dropout1 = ...
# dropout2 = ...
# fc1 = ...
# fc2 = ...
# }
# ...
# submodules {
# module conv1 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module conv2 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module dropout1 {
# attributes {
# training = ...
# }
# ...
# }
# module dropout2 {
# attributes {
# training = ...
# }
# ...
# }
# module fc1 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
# module fc2 {
# attributes {
# weight = ...
# bias = ...
# }
# ...
# }
self.assertFalse(mTrain_freezed.hasattr('training'))
self.assertTrue(mTrain_freezed.hasattr('conv1'))
self.assertFalse(mTrain_freezed.conv1.hasattr('training'))
self.assertTrue(mTrain_freezed.conv1.hasattr('weight'))
self.assertTrue(mTrain_freezed.conv1.hasattr('bias'))
self.assertTrue(mTrain_freezed.hasattr('conv2'))
self.assertFalse(mTrain_freezed.conv2.hasattr('training'))
self.assertTrue(mTrain_freezed.conv2.hasattr('weight'))
self.assertTrue(mTrain_freezed.conv2.hasattr('bias'))
self.assertTrue(mTrain_freezed.hasattr('dropout1'))
self.assertTrue(mTrain_freezed.dropout1.hasattr('training'))
self.assertTrue(mTrain_freezed.hasattr('dropout2'))
self.assertTrue(mTrain_freezed.dropout2.hasattr('training'))
self.assertTrue(mTrain_freezed.hasattr('fc1'))
self.assertTrue(mTrain_freezed.fc1.hasattr('weight'))
self.assertTrue(mTrain_freezed.fc1.hasattr('bias'))
self.assertTrue(mTrain_freezed.hasattr('fc2'))
self.assertTrue(mTrain_freezed.fc2.hasattr('weight'))
self.assertTrue(mTrain_freezed.fc2.hasattr('bias'))
model.eval()
mEval_freezed = torch._C._freeze_module(model._c)
self.assertFalse(mEval_freezed.hasattr('conv1'))
self.assertFalse(mEval_freezed.hasattr('conv2'))
self.assertFalse(mEval_freezed.hasattr('dropout1'))
self.assertFalse(mEval_freezed.hasattr('training'))
self.assertFalse(mEval_freezed.hasattr('fc1'))
self.assertFalse(mEval_freezed.hasattr('dropout2'))
self.assertFalse(mEval_freezed.hasattr('fc2'))
with self.assertRaisesRegex(AttributeError, "does not have a field with name 'state_dict'"):
print(mEval_freezed.state_dict())
buffer = io.BytesIO()
torch.jit.save(mEval_freezed, buffer)
buffer.seek(0)
m = torch.jit.load(buffer)
FileCheck().check_not('GetAttr[name=') \
.run(m._c._get_method('forward').graph)
m2 = torch._C._freeze_module(model._c, preserveParameters=True)
self.assertTrue(m2.hasattr('conv1'))
self.assertTrue(m2.hasattr('conv2'))
self.assertFalse(m2.hasattr('dropout1'))
self.assertFalse(m2.hasattr('training'))
self.assertTrue(m2.hasattr('fc1'))
self.assertFalse(m2.hasattr('dropout2'))
self.assertTrue(m2.hasattr('fc2'))
def test_freeze_module_detach_gradient(self):
mod = nn.Conv2d(8, 3, 4, 2, 1)
self.assertTrue(mod.weight.requires_grad)
smod = torch.jit.script(mod)
smod.eval()
fmod = torch._C._freeze_module(smod._c)
self.assertTrue(mod.weight.requires_grad)
self.assertTrue(smod.weight.requires_grad)
self.assertFalse(fmod.hasattr('weight'))
inp = torch.ones(1, 8, 32, 32)
out1 = fmod.forward(inp)
# FIXME: frozen module mutated from outside (original module).
with torch.no_grad():
smod.weight[0, 0, 0, 0] += 100.0
out2 = fmod.forward(inp)
out3 = smod(inp)
self.assertNotEqual(out1, out2)
self.assertEqual(out2, out3)
def test_freeze_module_with_user_preserved_attr(self):
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["a"])
# Attribute "a" is preserved
self.assertTrue(fm.hasattr("a"))
self.assertFalse(fm.hasattr("b"))
def test_freeze_module_with_user_preserved_method(self):
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self.b
@torch.jit.export
def modify_b(self, x):
self.b[0] += 20
return self.a
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["modify_a"])
# Both attribute "a" and method "modify_a" are preserved
self.assertTrue(fm.hasattr("a"))
self.assertFalse(fm.hasattr("b"))
input = torch.randn(2, 2)
expected = m.forward(input)
out = fm.forward(input)
self.assertEqual(out, expected)
def test_freeze_module_with_user_preserved_method2(self):
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.a = torch.tensor([1.1])
self.b = torch.tensor([2.2])
def forward(self, x):
self.b += 10
return self.a + self.b
@torch.jit.export
def modify_a(self, x):
self.a[0] += 10
return self.b + self.a
m = torch.jit.script(Module())
m.eval()
fm = torch._C._freeze_module(m._c, ["modify_a"])
FileCheck().check('prim::GetAttr[name="a"]').run(fm.forward.graph)
FileCheck().check('prim::GetAttr[name="b"]').run(fm.modify_a.graph)
def test_freeze_module_with_user_preserved_attribute_on_submodule(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 1
self.b = 2
def forward(self):
return self.a + self.b
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.sub1 = SubModule()
self.sub2 = SubModule()
def forward(self):
return self.sub1() + self.sub2()
m = torch.jit.script(Module())
m.eval()
m = torch.jit.freeze(m, preserved_attrs=['sub1.a', 'sub2.a'])
fm = m._c
self.assertTrue(fm.hasattr('sub1'))
self.assertTrue(fm.sub1.hasattr('a'))
self.assertFalse(fm.sub1.hasattr('b'))
self.assertTrue(fm.hasattr('sub2'))
self.assertTrue(fm.sub2.hasattr('a'))
self.assertFalse(fm.sub2.hasattr('b'))
self.assertEqual(m(), 6)
m.sub1.a += 1
self.assertEqual(m(), 7)
def test_freeze_module_with_user_preserved_attribute_on_unused_submodule(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
self.a = 1
self.b = 2
def forward(self):
return self.a + self.b
@torch.jit.export
def method_a(self):
return 42
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.sub = SubModule()
def forward(self):
return 1
m = torch.jit.script(Module())
m.eval()
fm = torch.jit.freeze(m, preserved_attrs=['sub.a', 'sub.method_a'])._c
self.assertTrue(fm.hasattr('sub'))
self.assertTrue(fm.sub.hasattr('a'))
self.assertFalse(fm.sub.hasattr('b'))
self.assertTrue(fm.sub._has_method('method_a'))
def test_freeze_module_with_user_preserved_method_on_submodule(self):
class SubModule(nn.Module):
def __init__(self):
super(SubModule, self).__init__()
def forward(self, x):
return self.method_a(x) + self.method_b(x)
def method_a(self, x):
return x * x
def method_b(self, x):
return x + x
class Module(nn.Module):
def __init__(self):
super(Module, self).__init__()
self.sub = SubModule()
def forward(self, x):
return self.sub(x)
m = torch.jit.script(Module())
m.eval()
fm = torch.jit.freeze(m, preserved_attrs=['sub.method_a'])._c
self.assertTrue(fm.hasattr('sub'))
self.assertTrue(fm.sub._has_method('method_a'))
self.assertFalse(fm.sub._has_method('method_b'))
@skipIfNoFBGEMM
def test_module_with_shared_type_instances(self):
class Child(nn.Module):
def __init__(self):
super(Child, self).__init__()
self.conv1 = nn.Conv2d(1, 1, 1).to(dtype=torch.float32)
def forward(self, x):
x = self.conv1(x)
return x
class Parent(nn.Module):
def __init__(self):
super(Parent, self).__init__()
self.quant = torch.ao.quantization.QuantStub()
self.conv1 = nn.Conv2d(1, 1, 1).to(dtype=torch.float32)
self.child = Child()
self.child2 = Child()
self.dequant = torch.ao.quantization.DeQuantStub()
def forward(self, x):
x = self.quant(x)
x = self.conv1(x)
x = self.child(x)
x = self.child2(x)
x = self.dequant(x)
return x
def _static_quant(model):
qModel = torch.ao.quantization.QuantWrapper(model)
qModel.qconfig = torch.ao.quantization.default_qconfig
torch.ao.quantization.prepare(qModel, inplace=True)
qModel(torch.rand(4, 1, 4, 4, dtype=torch.float32))
torch.ao.quantization.convert(qModel, inplace=True)
return model
with override_quantized_engine('fbgemm'):
data = torch.randn(4, 1, 4, 4, dtype=torch.float32)
m = Parent().to(torch.float32)
m = _static_quant(m)
m = torch.jit.script(m)
m.eval()
torch._C._jit_pass_inline(m.graph)
m_frozen = wrap_cpp_module(torch._C._freeze_module(m._c))
# Earlier bug resulted in _packed_params set to false.
FileCheck().check_not('_packed_params = False').run(m_frozen._c.dump_to_str(True, True, False))
m_res = m(data)
# It used to segfault while running frozen module.
m_frozen_res = m_frozen(data)
self.assertEqual(m_res, m_frozen_res)
def test_module_getattr_indirection(self):
@torch.jit.script
class ValHolder(object):
def __init__(self, val: int):
self.val: int = val
class Mod(nn.Module):
def __init__(self):
super(Mod, self).__init__()
self.mod1 = ValHolder(1)
self.mod2 = ValHolder(2)
def forward(self, cond: bool):
if cond:
mod = self.mod1
else:
mod = self.mod2
return mod.val
mod = Mod()
mod.eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
mod_eager = Mod()
self.assertEqual(mod_eager(True), frozen_mod(True))
self.assertEqual(mod_eager(False), frozen_mod(False))
def test_freeze_module_with_non_static_module_container_index(self):
"""
Test that Modules containing non-static ModuleDict or ModuleList
indexing cannot be frozen.
"""
@torch.jit.interface
class ModuleInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
pass
class ImplementsInterface(torch.nn.Module):
def forward(self, inp: Any) -> Any:
if isinstance(inp, torch.Tensor):
return torch.max(inp, dim=0)
return inp
class ModWithDict(torch.nn.Module):
def __init__(self):
super().__init__()
self.d = torch.nn.ModuleDict({"module": ImplementsInterface()})
def forward(self, x: torch.Tensor, key: str) -> Any:
value: ModuleInterface = self.d[key]
return value.forward(x)
m = torch.jit.script(ModWithDict())
m.eval()
with self.assertRaisesRegex(RuntimeError, "Freezing modules containing prim::ModuleContainerIndex is not supported"):
mf = torch._C._freeze_module(m._c)
class ModWithList(torch.nn.Module):
def __init__(self):
super().__init__()
self.l = torch.nn.ModuleList([ImplementsInterface()])
def forward(self, x: torch.Tensor, idx: int) -> Any:
value: ModuleInterface = self.l[idx]
return value.forward(x)
m = torch.jit.script(ModWithList())
m.eval()
with self.assertRaisesRegex(RuntimeError, "Freezing modules containing prim::ModuleContainerIndex is not supported"):
mf = torch._C._freeze_module(m._c)
def test_freeze_non_module_class_getattr(self):
class BoxCoder(object):
def __init__(self, bbox_xform_clip):
# type: (float) -> None
self.bbox_xform_clip = bbox_xform_clip
def decode(self, input):
return input * self.bbox_xform_clip
class MyModule(torch.nn.Module):
__annotations__ = {
'box_coder': BoxCoder,
}
def __init__(self):
super(MyModule, self).__init__()
self.box_coder = BoxCoder(50.)
def forward(self, input):
return self.box_coder.decode(input)
model = MyModule()
model.eval()
script_model = torch.jit.freeze(torch.jit.script(model))
inp = torch.randn([4, 4])
output_eager = model(inp)
self.assertEqual(model(inp), script_model(inp))
FileCheck().check_not("GetAttr").run(script_model.graph)
def test_freeze_module_with_tupleoutput_submodule(self):
class SubModule(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
return (x + 1, x + 2)
class TestModule(nn.Module):
def __init__(self):
super().__init__()
self.sub = SubModule()
def forward(self, x):
y1, y2 = self.sub(x)
return y1 + y2
m = torch.jit.script(TestModule())
m = m.eval()
mf = torch.jit.freeze(m)
inp = torch.randn(2, 2)
expected = m.forward(inp)
output = mf.forward(inp)
# Check if prim::TupleConstruct and prim::TupleUnpack
# Don't exist in frozen graph
FileCheck().check_not("prim::TupleConstruct").run(mf.graph)
FileCheck().check_not("prim::TupleUnpack").run(mf.graph)
self.assertEqual(output, expected)
def test_freeze_module_with_call_method(self):
class Mod(nn.Module):
def __init__(self, val):
super().__init__()
self.param = nn.Parameter(val)
def forward(self, x):
# this method will change during freezing
return x + self.param
@torch.jit.export
def make_prediction(self, x):
y = x + x
return self.forward(y)
param = torch.rand([2, 2])
x = torch.rand([2, 2])
unscripted_mod = Mod(param)
mod = torch.jit.script(unscripted_mod)
mod.eval()
mod = torch.jit.freeze(mod, preserved_attrs=["make_prediction"])
self.assertEqual(
mod.forward(x), unscripted_mod.forward(x), atol=1e-5, rtol=1e-5
)
class TestFrozenOptimizations(JitTestCase):
def setUp(self):
self.default_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.double)
def tearDown(self):
torch.set_default_dtype(self.default_dtype)
def test_conv_bn_folding(self):
conv_bias = [True, False]
module_pairs = [(nn.Conv1d, nn.BatchNorm1d), (nn.Conv2d, nn.BatchNorm2d), (nn.Conv3d, nn.BatchNorm3d)]
use_tracing = [True, False]
bn_running_stats = [True, False]
for use_bias, modules, tracing, track_stats in product(conv_bias, module_pairs, use_tracing, bn_running_stats):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConvBN, self).__init__()
self.conv = modules[0](in_channels, out_channels, bias=use_bias, **kwargs)
self.bn = modules[1](out_channels, eps=0.001, track_running_stats=track_stats)
def forward(self, x):
x = self.conv(x)
return self.bn(x)
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if modules[0] == nn.Conv2d:
inps.append(inps[-1])
if modules[0] == nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
self.run_pass("peephole", scripted_mod.graph)
self.run_pass("constant_propagation", scripted_mod.graph)
FileCheck().check("conv").check("batch").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("fold_frozen_conv_bn", scripted_mod.graph)
FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_conv_bn", scripted_mod.graph)
if track_stats:
FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.graph)
else:
FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.graph)
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
def test_conv_bn_folding_not_forward(self):
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConvBN, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=True, **kwargs)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001)
self.amt = 3.2
def forward(self, x):
x = self.conv(x)
return self.bn(x)
@torch.jit.export
def make_prediction(self, x):
return self.forward(x) + self.amt
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).eval()
scripted_mod = torch.jit.script(mod_eager)
torch._C._jit_pass_inline(scripted_mod.make_prediction.graph)
FileCheck().check("conv").check("aten::batch_norm").run(scripted_mod.make_prediction.graph)
# _jit_pass_optimize_frozen_graph should not be called on non-method attributes (e.g. "amt")
scripted_mod = torch.jit.freeze(scripted_mod, preserved_attrs=["make_prediction", "amt"])
FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.make_prediction.graph)
# During freezing this creates tensors constants that are attached to the frozen graph,
# which is then kept alive by the compilation unit (which causes a leak)
@skipCUDAMemoryLeakCheckIf(True)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_conv_bn_folding_autocast_scenario_cuda(self):
# CUDA conv takes input tensors which must all be the same dtype,
# which can cause issues if folding produces inputs of different dtypes.
class ConvBN(torch.nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(ConvBN, self).__init__()
self.conv = torch.nn.Conv2d(in_channels, out_channels, bias=False, dtype=torch.half, **kwargs)
self.bn = torch.nn.BatchNorm2d(out_channels, eps=0.001, dtype=torch.float)
def forward(self, x):
return self.bn(self.conv(x))
mod_eager = ConvBN(3, 32, kernel_size=3, stride=2).cuda().eval()
scripted_mod = torch.jit.script(mod_eager)
scripted_mod = torch.jit.freeze(scripted_mod)
FileCheck().check("conv").check_not("aten::batch_norm").run(scripted_mod.graph)
conv_node = scripted_mod.graph.findNode("aten::conv2d", True)
self.assertTrue(conv_node is not None)
bias_input = conv_node.namedInput("bias")
self.assertTrue(bias_input is not None)
self.assertTrue(bias_input.type().dtype() == torch.half)
x = torch.rand((3, 3, 32, 32), dtype=torch.half).cuda()
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
self.assertEqual(mod_eager(x), scripted_mod(x), atol=1e-2, rtol=1e-2)
def test_conv_add_folding(self):
@torch.no_grad()
def test_conv_fusion(use_bias, module, tracing, op, scalar, add_tensor, expect_success):
class ConvOp(torch.nn.Module):
__constants__ = ['use_scalar']
def __init__(self, in_channels, out_channels, tensor=None, **kwargs):
super(ConvOp, self).__init__()
self.conv = module(in_channels, out_channels, bias=use_bias, **kwargs)
self.conv2 = module(in_channels, out_channels, bias=use_bias, **kwargs)
self.use_scalar = scalar
tensor_size = [1 for _ in range(self.conv.weight.ndim)]
tensor_size[1] = self.conv.weight.size(0)
self.tensor = add_tensor if add_tensor is not None else torch.rand(tensor_size)
self.op = op
def forward(self, x):
x = self.conv(x)
if self.use_scalar:
return self.op(x, 2.)
else:
return self.op(x, self.tensor)
mod_eager = ConvOp(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if module == nn.Conv2d:
inps.append(inps[-1])
if module == nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp,))
else:
scripted_mod = torch.jit.script(mod_eager)
self.run_pass("inline", scripted_mod.graph)
op_str = "aten::" + op.__name__
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
# successively no-ops with non-const inputs
self.run_pass("fold_frozen_conv_mul_or_div", scripted_mod.graph)
self.run_pass("fold_frozen_conv_add_or_sub", scripted_mod.graph)
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("fold_frozen_conv_mul_or_div", scripted_mod.graph)
self.run_pass("fold_frozen_conv_add_or_sub", scripted_mod.graph)
if expect_success:
FileCheck().check("conv").check_not(op_str).run(scripted_mod.graph)
else:
FileCheck().check("conv").check(op_str).run(scripted_mod.graph)
self.assertEqual(mod_eager(inp), scripted_mod(inp))
self.assertEqual(mod_eager(inp), scripted_mod(inp))
conv_bias = [True, False]
modules = [nn.Conv1d, nn.Conv2d, nn.Conv3d]
use_tracing = [False, True]
use_scalar = [False, True]
ops = [torch.add, torch.sub, torch.mul, torch.div]
for use_bias, module, tracing, pytorch_op, scalar in product(conv_bias, modules, use_tracing, ops, use_scalar):
test_conv_fusion(use_bias, module, tracing, pytorch_op, scalar, add_tensor=None, expect_success=True)
for use_bias, pytorch_op in product(conv_bias, ops):
# broadcasting add
test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False,
add_tensor=torch.rand(32, 1, 32), expect_success=False)
# broadcasting add
test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False, add_tensor=torch.rand(1, 1), expect_success=True)
# add with different dtype
test_conv_fusion(use_bias, nn.Conv2d, False, pytorch_op, False,
add_tensor=torch.tensor([2]).to(torch.int), expect_success=True)
def test_conv_mul_add_bn(self):
class Conv_Mul_Add_Bn(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(Conv_Mul_Add_Bn, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, **kwargs)
self.bn = nn.BatchNorm2d(out_channels, eps=0.001)
self.tensor1 = torch.tensor(2.2)
self.tensor2 = torch.tensor(2)
def forward(self, x):
return self.bn(torch.add(torch.mul(self.conv(x), self.tensor1), self.tensor2))
input = torch.randn(8, 3, 64, 64)
model = Conv_Mul_Add_Bn(3, 32, kernel_size=3, stride=1).eval()
with torch.no_grad():
result = model(input)
traced_model = torch.jit.trace(model, input).eval()
traced_model = torch.jit.freeze(traced_model)
tresult = traced_model(input)
self.assertEqual(result, tresult)
FileCheck().check("conv").check_not("aten::batch_norm").run(traced_model.graph)
FileCheck().check("conv").check_not("aten::add").run(traced_model.graph)
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat(self):
out_dimms = [[5, 10], [1, 5]]
for w1_dim, w2_dim in out_dimms:
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super(ModMultLinear, self).__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b2)
return res1, res2
mod_eager = ModMultLinear(w1_dim, w2_dim).eval()
test_val1 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 2, 1, (test_val1, ))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_complex(self):
"""
Testing that the interleaving of multiple optimizations does not
cause errors, and gets optimized as expected
"""
class ModMultLinear(nn.Module):
def __init__(self):
super(ModMultLinear, self).__init__()
w1_dim = 5
w2_dim = 10
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res3 = torch._C._nn.linear(res1, self.w2, self.b2)
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b2)
res4 = torch._C._nn.linear(res1, self.w1, self.b1)
return res2, res3, res4
mod_eager = ModMultLinear().eval()
test_val1 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 4, 2, (test_val1, ))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_concat_different_input(self):
"""
There should be no change to the graph due to the optimization pass
due to the two input tensors being different
"""
# Freezing requires that the graph be a module
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super(ModMultLinear, self).__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1, in_tensor2):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
res2 = torch._C._nn.linear(in_tensor2, self.w2, self.b2)
return res1, res2
mod_eager = ModMultLinear(5, 5).eval()
test_val1 = torch.rand([50, 5])
test_val2 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 2, 2, (test_val1, test_val2))
@unittest.skipIf(not TEST_CUDA, "Optimization currently only run for GPU")
def test_linear_multiple_blocks(self):
class ModMultLinear(nn.Module):
def __init__(self, w1_dim, w2_dim):
super(ModMultLinear, self).__init__()
self.w1 = nn.Parameter(torch.rand([w1_dim, 5]))
self.b1 = nn.Parameter(torch.rand([w1_dim]))
self.w2 = nn.Parameter(torch.rand([w2_dim, 5]))
self.b2 = nn.Parameter(torch.rand([w2_dim]))
def forward(self, in_tensor1, in_tensor2, cond: bool):
res1 = torch._C._nn.linear(in_tensor1, self.w1, self.b1)
if cond:
res3 = torch._C._nn.linear(in_tensor2, self.w2, self.b2)
res4 = torch._C._nn.linear(in_tensor1, self.w2, self.b1)
else:
raise AssertionError()
res2 = torch._C._nn.linear(in_tensor1, self.w2, self.b1)
return res1, res2, res3, res4
mod_eager = ModMultLinear(5, 5).eval()
test_val1 = torch.rand([50, 5])
test_val2 = torch.rand([50, 5])
self.check_linear_optimizations(mod_eager, 4, 3, (test_val1, test_val2, True))
def check_linear_optimizations(self, eager_mod, orig_linears, new_linears, test_vals):
for is_cuda in [False, True]:
if is_cuda:
mod_to_device = eager_mod.cuda()
test_vals_to_device = [t.cuda() if isinstance(t, torch.Tensor) else t for t in test_vals]
else:
mod_to_device = eager_mod
test_vals_to_device = test_vals
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
# successively no-ops with non-const inputs
self.run_pass("concat_frozen_linear", op_graph)
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass("concat_frozen_linear", op_graph)
if is_cuda:
FileCheck().check_count("aten::linear", new_linears, exactly=True).run(op_graph)
else:
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
self.assertEqual(mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device))
def test_optimize_freeze_module(self):
in_channels, out_channels = 3, 32
conv = torch.nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=True)
bn = torch.nn.BatchNorm2d(out_channels, eps=.001)
mod = torch.nn.Sequential(conv, bn)
# set optimize to False here, by default freezing runs run_frozen_optimizations
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()), optimize_numerics=False)
# inspect frozen mod
FileCheck().check("batch_norm").run(frozen_mod.graph)
torch.jit.run_frozen_optimizations(frozen_mod)
FileCheck().check_not("batch_norm").run(frozen_mod.graph)
# run_frozen_optimizations should be run
frozen_mod = torch.jit.freeze(torch.jit.script(mod.eval()))
FileCheck().check_not("batch_norm").run(frozen_mod.graph)
def test_freeze_remove_dropout(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.dropout = nn.Dropout(0.5)
def forward(self, x):
return self.dropout(x)
mod = torch.jit.script(Net())
# inspect mod
torch._C._jit_pass_inline(mod.graph)
FileCheck().check("aten::dropout").run(mod.graph)
frozen_mod = torch.jit.freeze(mod.eval())
FileCheck().check_not("aten::dropout").run(frozen_mod.graph)
input = torch.randn(2)
output_s = mod.forward(input)
output_f = frozen_mod.forward(input)
self.assertEqual(output_s, output_f)
def test_freeze_remove_feature_dropout(self):
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.dropout = nn.Dropout2d(0.5)
def forward(self, x):
return self.dropout(x)
mod = torch.jit.script(Net().eval())
# inspect mod
torch._C._jit_pass_inline(mod.graph)
FileCheck().check("aten::feature_dropout").run(mod.graph)
frozen_mod = torch.jit.freeze(mod)
FileCheck().check_not("aten::feature_dropout").run(frozen_mod.graph)
input = torch.randn(2, 2, 1, 1)
output_s = mod.forward(input)
output_f = frozen_mod.forward(input)
self.assertEqual(output_s, output_f)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_freeze_mkdlnn(self):
conv = torch.nn.Conv2d(3, 32, kernel_size=3, stride=2).eval().float()
convmkl = mkldnn_utils.to_mkldnn(conv)
out = torch.jit.freeze(torch.jit.script(convmkl.eval()))
inp = torch.rand([4, 3, 4, 4]).float()
self.assertEqual(out(inp.to_mkldnn()).to_dense(), conv(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_conv_to_mkldnn(self):
with set_default_dtype(torch.float):
for module, trace in product([nn.Conv2d, nn.Conv3d], [False, True]):
mod = module(3, 32, kernel_size=3, stride=2).eval()
inps = [4, 3, 4]
if module == nn.Conv2d:
inps.append(inps[-1])
if module == nn.Conv3d:
inps.append(inps[-1])
inps.append(inps[-1])
inp = torch.rand(inps)
if trace:
scripted_mod = torch.jit.script(mod)
else:
scripted_mod = torch.jit.trace(mod, (inp,))
self.run_pass("inline", scripted_mod.graph)
FileCheck().check("conv").run(scripted_mod.graph)
# successfully no-ops with non-const inputs
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check_not("to_mkldnn").run(scripted_mod.graph)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("to_mkldnn").check("prim::mkldnn_convolution").check("to_dense").run(scripted_mod.graph)
self.assertEqual(mod(inp), scripted_mod(inp))
self.assertEqual(mod(inp), scripted_mod(inp))
def test_linear_transpose(self):
class ModLinear(torch.nn.Module):
def __init__(self):
super(ModLinear, self).__init__()
self.bias = torch.nn.Parameter(torch.rand(30))
self.weight = torch.nn.Parameter(torch.rand([30, 20]))
def forward(self, x):
return torch._C._nn.linear(x, self.weight, self.bias)
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
self.check_linear_optimizations_2(mod_eager, 1, 0, "transpose_frozen_linear", (test_val,))
def test_linear_non_constant_weight(self):
class ModLinear(torch.nn.Module):
def __init__(self):
super(ModLinear, self).__init__()
self.bias = torch.nn.Parameter(torch.rand(30))
def forward(self, x, weight):
return torch._C._nn.linear(x, weight, self.bias)
mod_eager = ModLinear().eval()
test_val = torch.rand([50, 20])
test_weight = torch.rand([30, 20])
self.check_linear_optimizations_2(mod_eager, 1, 1, "transpose_frozen_linear", (test_val, test_weight))
def check_linear_optimizations_2(self, eager_mod, orig_linears, new_linears, opt_pass, test_vals):
# TODO: merge with check_linear_optimizations once both diffs land
mod_to_device = eager_mod
test_vals_to_device = test_vals
script_mod = torch.jit.script(mod_to_device)
op_graph = script_mod.graph
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
# successively no-ops with non-const inputs
self.run_pass(opt_pass, op_graph)
FileCheck().check_count("aten::linear", orig_linears, exactly=True).run(op_graph)
script_mod = torch.jit.freeze(script_mod)
op_graph = script_mod.graph
self.run_pass(opt_pass, op_graph)
FileCheck().check_count("aten::linear", new_linears, exactly=True).run(op_graph)
self.assertEqual(mod_to_device(*test_vals_to_device), script_mod(*test_vals_to_device))
@staticmethod
def conv():
# Generic composable conv for testing purposes
return nn.Conv2d(8, 8, 1)
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_collapse_adjacent_conversions(self):
with set_default_dtype(torch.float):
mod = nn.Sequential(self.conv(), self.conv()).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("to_mkldnn") \
.check("prim::mkldnn_convolution") \
.check("prim::mkldnn_convolution") \
.check("to_dense") \
.run(scripted_mod.graph)
FileCheck().check_count("to_mkldnn", 1, exactly=True).run(scripted_mod.graph)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_mkldnn_fuser_broadcasting(self):
class Add(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
return x + self.tensor
with set_default_dtype(torch.float):
for add_inp in [8], [8, 8, 1]:
mod = nn.Sequential(self.conv(), Add(torch.rand(add_inp))).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
FileCheck().check("prim::BroadcastMKLDNNTensors").run(scripted_mod.graph)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
# for good measure, check that broadcasting does not work without this op
# so we can remove the op if it ever gets supported
with self.assertRaisesRegex(RuntimeError, ""):
torch.rand([1, 8, 8, 8]).to_mkldnn() + torch.rand(add_inp).to_mkldnn()
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_mkldnn_inplace_removal(self):
class AddMul(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
return x.add_(self.tensor).div_(self.tensor) - 4
with set_default_dtype(torch.float):
mod = nn.Sequential(self.conv(), AddMul(torch.rand([8]))).eval()
scripted_mod = torch.jit.script(mod)
scripted_mod = torch.jit.freeze(scripted_mod)
self.run_pass("convert_frozen_ops_to_mkldnn", scripted_mod.graph)
# add gets uninplaced and reinplaced
FileCheck().check("aten::to_mkldnn").check("aten::add_").check("aten::div_").run(scripted_mod.graph)
inp = torch.rand([1, 8, 8, 8])
self.assertEqual(scripted_mod(inp), mod(inp))
self.assertEqual(scripted_mod(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
@skipIfNoTorchVision
def test_maxpool_mkldnn(self):
with set_default_dtype(torch.float):
model = torchvision.models.resnet18()
sub_model = torch.nn.Sequential(model.conv1, model.bn1, model.relu, model.maxpool)
mod = torch.jit.freeze(torch.jit.script(sub_model.eval()))
N, C, H, W, = 10, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("max_pool").check("to_dense").run(mod.graph)
FileCheck().check_count("to_dense", 1, exactly=True).run(mod.graph)
self.assertEqual(mod(inp), sub_model(inp))
@unittest.skipIf(torch._C.has_mkldnn, "Testing no mkldnn")
def test_conv_to_mkldnn_no_mkldnn(self):
# test no error when mkldnn not available
with set_default_dtype(torch.float):
mod = torch.jit.script(nn.Conv2d(3, 32, kernel_size=3, stride=2).eval())
frozen = torch.jit.freeze(mod)
self.run_pass("convert_frozen_ops_to_mkldnn", frozen.graph)
inp = torch.rand([4, 3, 4, 4])
self.assertEqual(frozen(inp), mod(inp))
@unittest.skipIf(not TEST_CUDNN, "requires CUDNN")
def test_freeze_conv_relu_fusion(self):
conv_bias = [True, False]
conv_ops = [nn.Conv2d, nn.Conv3d]
add_z = [True, False]
use_tracing = [True, False]
for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, add_z, use_tracing):
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(Net, self).__init__()
self.conv = conv(in_channels, out_channels, bias=use_bias, **kwargs)
self.relu = nn.ReLU(inplace=True)
self.add_z = add_z
def forward(self, x):
z = self.conv(x)
out = self.conv(x)
if self.add_z:
out += z
out = self.relu(out)
return out
mod_eager = Net(3, 6, kernel_size=3, stride=2).eval().cuda()
inps = [5, 3, 4, 4]
if conv == nn.Conv3d:
inps.append(inps[-1])
inp = torch.rand(inps).cuda()
if tracing:
scripted_mod = torch.jit.trace(mod_eager, (inp))
else:
scripted_mod = torch.jit.script(mod_eager)
frozen_mod = torch.jit.optimize_for_inference(scripted_mod)
if add_z:
FileCheck().check("aten::cudnn_convolution_add_relu").run(frozen_mod.graph)
else:
FileCheck().check("aten::cudnn_convolution_relu").run(frozen_mod.graph)
self.assertEqual(mod_eager(inp), frozen_mod(inp))
@unittest.skipIf(not TEST_CUDNN, "requires CUDNN")
def test_freeze_conv_relu_fusion_not_forward(self):
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super(Net, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, bias=None, **kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
z = self.conv(x)
out = self.conv(x)
out = self.relu(out)
return out
@torch.jit.export
def make_prediction(self, x):
return self.forward(x)
mod_eager = Net(3, 6, kernel_size=3, stride=2).eval().cuda()
inps = [5, 3, 4, 4]
inp = torch.rand(inps).cuda()
scripted_mod = torch.jit.script(mod_eager)
frozen_mod = torch.jit.freeze(scripted_mod, preserved_attrs=['make_prediction'])
optimized_mod = torch.jit.optimize_for_inference(frozen_mod, other_methods=['make_prediction'])
FileCheck().check("aten::cudnn_convolution_relu").run(optimized_mod.make_prediction.graph)
self.assertEqual(mod_eager.make_prediction(inp), optimized_mod.make_prediction(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_incompatible_perf_formats(self):
with set_default_dtype(torch.float):
class Mod(nn.Module):
def __init__(self):
super().__init__()
self.conv = torch.nn.Conv2d(3, 64, 3, 2)
self.max_pool = torch.nn.MaxPool2d(111, 111)
def forward(self, x):
a = self.conv(x)
b = self.max_pool(a)
return a + b
model = Mod()
model.eval()
mod = torch.jit.freeze(torch.jit.script(model))
N, C, H, W, = 10, 3, 224, 224
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
self.assertEqual(model(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_pool2d_batchnorm(self):
with set_default_dtype(torch.float):
pooling_layers = [torch.nn.AdaptiveAvgPool2d(4),
# torch.nn.AdaptiveMaxPool2d(4), # return tuples
torch.nn.MaxPool2d(4),
torch.nn.AvgPool2d(4),
torch.nn.BatchNorm2d(64).eval()]
for pl in pooling_layers:
sub_model = torch.nn.Sequential(torch.nn.Conv2d(3, 64, 2, 2), torch.nn.ReLU(), pl, torch.nn.Hardswish())
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
N, C, H, W, = 10, 3, 224, 224
inp = torch.randn(N, C, H, W)
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
self.run_pass('dce', mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_pool3d_batchnorm(self):
with set_default_dtype(torch.float):
pooling_layers = [torch.nn.MaxPool3d(4),
# torch.nn.AdaptiveAvgPool3d(4), # no ideep bindings
# torch.nn.AdaptiveMaxPool3d(4), # return tuples
torch.nn.AvgPool3d(4),
torch.nn.BatchNorm3d(64).eval()]
for pl in pooling_layers:
sub_model = torch.nn.Sequential(torch.nn.Conv3d(3, 64, 2, 2), torch.nn.ReLU(), pl, torch.nn.Hardswish())
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
N, C, H, W, D = 10, 3, 64, 64, 64
inp = torch.randn(N, C, D, H, W)
# these two passes needed to remove
# a size check in BatchNorm2d
removeExceptions(mod.graph)
self.run_pass('dce', mod.graph)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check("aten::to_dense").check_next("return").run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
@skipIfNoTorchVision
def test_conv_hardswish(self):
with set_default_dtype(torch.float):
class Clamp(torch.nn.Module):
def __init__(self, min_val, max_val, **kwargs):
super(Clamp, self).__init__()
self.min_val = min_val
self.max_val = max_val
def forward(self, x):
return torch.clamp(x, self.min_val, self.max_val)
N, C, H, W, = 10, 3, 224, 224
activations = [
torch.nn.Hardswish(),
torch.nn.Hardsigmoid(),
torch.nn.ReLU6(),
torch.nn.Tanh(),
torch.nn.Hardtanh(0., 6.),
torch.nn.Hardtanh(1., 100.),
torch.nn.Hardtanh(-100., -1.),
torch.nn.GELU(),
Clamp(-100., -1.),
Clamp(1., 100.),
Clamp(0., 6.),
Clamp(-1., 0.),
]
model = torchvision.models.resnet18()
for activation in activations:
sub_model = torch.nn.Sequential(model.conv1, activation)
sub_model.eval()
mod = torch.jit.freeze(torch.jit.script(sub_model))
inp = torch.randn(N, C, H, W)
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
FileCheck().check_count("aten::to_dense", 1, exactly=True).run(mod.graph)
self.assertEqual(sub_model(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_hardswish_hardsigmoid(self):
with set_default_dtype(torch.float):
op_map = {
'prim::MKLDNNHardSwish' : F.hardswish,
'prim::MKLDNNHardSigmoid' : F.hardsigmoid,
}
input_sizes = ([0], [1], [3], [1, 3, 8, 8])
for (mkldnn_opname, aten_op) in op_map.items():
for size in input_sizes:
for inplace in (True, False):
inplace_str = "_" if inplace else ""
inplace_tgt = "%34" if inplace else "%35"
graph_str = f"""graph(%input.1 : Tensor):
%33 : None = prim::Constant()
%34 : Tensor = aten::to_mkldnn(%input.1, %33)
%35 : Tensor = {mkldnn_opname}{inplace_str}(%34)
return ({inplace_tgt})
"""
g = torch._C.parse_ir(graph_str)
m = self.createFunctionFromGraph(g)
x = torch.rand(size)
# `inplace=False` is intentional, otherwise we modify the input
# and we aren't testing aten impls anyways
self.assertEqual(aten_op(x, inplace=False), m(x).to_dense())
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
def test_scalar_mul(self):
with set_default_dtype(torch.float):
class Mod(nn.Module):
def __init__(self):
super().__init__()
self.mod = nn.Conv2d(8, 8, 1, padding=1)
def forward(self, x):
a1 = self.mod(x) * 4
return a1 * 4 + a1 * 5.
mod = Mod().eval()
scripted = torch.jit.freeze(torch.jit.script(mod))
optimized = torch.jit.optimize_for_inference(scripted)
inp = torch.rand([1, 8, 8, 8])
# a1 cant be inplaced for first use, can for second
FileCheck().check("ScalarMul(").check("ScalarMul_").run(optimized.graph)
self.assertEqual(optimized(inp), mod(inp))
def test_remove_detach(self):
class Mod(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.detach()
return y * y
mod = Mod().eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
inp = torch.randn((2, 2))
FileCheck().check_not("aten::detach").run(frozen_mod.graph)
self.assertEqual(frozen_mod(inp), mod(inp))
def test_remove_detach_not_applied(self):
class Mod(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x):
y = x.detach()
return x is y
mod = Mod().eval()
frozen_mod = torch.jit.freeze(torch.jit.script(mod))
inp = torch.randn((2, 2))
FileCheck().check("aten::detach").run(frozen_mod.graph)
self.assertEqual(frozen_mod(inp), mod(inp))
@unittest.skipIf(not torch._C.has_mkldnn, "MKL-DNN build is disabled")
class TestMKLDNNReinplacing(JitTestCase):
def setUp(self):
self.default_dtype = torch.get_default_dtype()
torch.set_default_dtype(torch.float)
def tearDown(self):
torch.set_default_dtype(self.default_dtype)
def getConv(self):
return nn.Conv2d(3, 32, kernel_size=3, stride=2).eval()
def getInput(self):
return torch.rand([4, 3, 4, 4])
def freezeAndConvert(self, mod):
mod = torch.jit.freeze(torch.jit.script(mod.eval()))
self.run_pass("convert_frozen_ops_to_mkldnn", mod.graph)
return mod
def checkResults(self, mod1, mod2):
inp = self.getInput()
self.assertEqual(mod1(inp), mod2(inp))
def test_successful(self):
# simple conv-relu
mod_eager = nn.Sequential(self.getConv(), nn.Hardswish(), nn.ReLU())
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("mkldnn_convolution").check_next("prim::MKLDNNHardSwish_").check_next("aten::relu_").run(mod.graph)
self.checkResults(mod_eager, mod)
def test_merge_liveness(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# this mul can be inplaced since x is dead after this use
temporary = x * self.tensor
# temporary livespan is the return node,
# add can not be inplaced
return temporary + temporary, temporary
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("aten::mul_").check_not("aten::add_").run(mod.graph)
self.checkResults(mod_eager, mod)
def test_always_alive_values(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# x can't be inplaced because its a return value,
# check that the inplacing pass doesnt try to inplace
# self.tensor because its always alive
return x * self.tensor, x
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check_not("aten::mul_").run(mod.graph)
self.checkResults(mod_eager, mod)
conv = self.getConv()
class Mod(nn.Module):
def __init__(self):
super().__init__()
self.tensor = torch.rand([4, 32, 1, 1])
self.conv = conv
def forward(self, x):
# the shapes dont add up on this just testing a particular pattern
conv_output = self.conv(x)
return conv_output, self.conv(torch.add(x, x))
mod = self.freezeAndConvert(Mod())
# x is an input to the graph, and so it should not be inplaced
# in the torch.add(x, x) call
FileCheck().check_not("aten::add_").run(mod.graph)
def test_switch_inputs_to_inplace(self):
class Mod(nn.Module):
def __init__(self, tensor):
super().__init__()
self.tensor = tensor
def forward(self, x):
# self.tensor cannot be inplaced, however x can,
# and bc add is commutative we can reverse inputs to add_
return self.tensor + x
mod_eager = nn.Sequential(self.getConv(), Mod(torch.rand([4, 32, 1, 1])))
mod = self.freezeAndConvert(mod_eager)
FileCheck().check("aten::add_").run(mod.graph)
self.checkResults(mod_eager, mod)
|
pytorch-master
|
test/jit/test_freezing.py
|
# Owner(s): ["oncall: jit"]
import os
import sys
import torch
from typing import List
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
from torch.testing._internal.jit_utils import JitTestCase
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# Tests that Python slice class is supported in TorchScript
class TestSlice(JitTestCase):
def test_slice_kwarg(self):
def slice_kwarg(x: List[int]):
return x[slice(1, stop=2)]
with self.assertRaisesRegex(RuntimeError, "Slice does not accept any keyword arguments"):
torch.jit.script(slice_kwarg)
def test_slice_three_nones(self):
def three_nones(x: List[int]):
return x[slice(None, None, None)]
self.checkScript(three_nones, (range(10),))
def test_slice_two_nones(self):
def two_nones(x: List[int]):
return x[slice(None, None)]
self.checkScript(two_nones, (range(10),))
def test_slice_one_none(self):
def one_none(x: List[int]):
return x[slice(None)]
self.checkScript(one_none, (range(10),))
def test_slice_stop_only(self):
def fn(x: List[int]):
return x[slice(5)]
self.checkScript(fn, (range(10),))
def test_slice_stop_only_with_nones(self):
def fn(x: List[int]):
return x[slice(None, 5, None)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop(self):
def fn(x: List[int]):
return x[slice(1, 5)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop_with_none(self):
def fn(x: List[int]):
return x[slice(1, 5, None)]
self.checkScript(fn, (range(10),))
def test_slice_start_stop_step(self):
def fn(x: List[int]):
return x[slice(0, 6, 2)]
self.checkScript(fn, (range(10),))
def test_slice_string(self):
def fn(x: str):
return x[slice(None, 3, 1)]
self.checkScript(fn, ("foo_bar",))
def test_slice_tensor(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1)]
self.checkScript(fn, (torch.ones(10),))
def test_slice_tensor_multidim(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1), 0]
self.checkScript(fn, (torch.ones((10, 10)),))
def test_slice_tensor_multidim_with_dots(self):
def fn(x: torch.Tensor):
return x[slice(None, 3, 1), ...]
self.checkScript(fn, (torch.ones((10, 10)),))
def test_slice_as_variable(self):
def fn(x: List[int]):
a = slice(1)
return x[a]
self.checkScript(fn, (range(10),))
def test_slice_stop_clipped(self):
def fn(x: List[int]):
return x[slice(1000)]
self.checkScript(fn, (range(10),))
def test_slice_dynamic_index(self):
def t(x):
slice1 = x[0:1]
zero = 0
one = zero + 1
slice2 = x[zero:one]
return slice1 + slice2
self.checkScript(t, (torch.zeros(3, 2, 3),))
def test_tuple_slicing(self):
def tuple_slice(a):
if bool(a):
b = (1, 2, 3, 4)
else:
b = (4, 3, 2, 1)
c = b[-4:4]
e = c[1:-1]
return e
self.checkScript(tuple_slice, (torch.tensor([1]),), optimize=True)
scripted_fn = torch.jit.script(tuple_slice)
self.assertEqual(scripted_fn(torch.tensor(1)), (2, 3))
tuple_graph = scripted_fn.graph
slices = tuple_graph.findAllNodes("prim::TupleConstruct")
num_outputs = set(len(x.output().type().elements()) for x in slices)
# there should be only one tupleSlice with length of 2
self.assertTrue(num_outputs == {2})
self.run_pass('lower_all_tuples', tuple_graph)
self.assertTrue('Tuple' not in str(tuple_graph))
def test_module_list_slicing(self):
class Bar(torch.nn.Module):
def __init__(self, identifier: str):
super().__init__()
self.identifier = identifier
def forward(self):
return 0
class Foo(torch.nn.Module):
def __init__(self):
super().__init__()
module_list = [Bar("A"), Bar("B"), Bar("C"), Bar("D"), Bar("E")]
self.test = torch.nn.ModuleList(module_list)
def forward(self):
return self.test[::-2], self.test[1:4:]
scripted_foo = torch.jit.script(Foo())
result1, result2 = scripted_foo()
self.assertEqual(len(result1), 3)
self.assertEqual(result1[0].identifier, "E")
self.assertEqual(result1[1].identifier, "C")
self.assertEqual(result1[2].identifier, "A")
self.assertEqual(len(result2), 3)
self.assertEqual(result2[0].identifier, "B")
self.assertEqual(result2[1].identifier, "C")
self.assertEqual(result2[2].identifier, "D")
|
pytorch-master
|
test/jit/test_slice.py
|
# Owner(s): ["oncall: jit"]
import operator
import unittest
from textwrap import dedent
import torch
from torch import nn
from torch.testing import FileCheck
from torch.testing._internal.common_methods_invocations import sample_inputs_cat_concat
from torch.testing._internal.common_utils import make_tensor
from torch.testing._internal.jit_utils import JitTestCase, execWrapper
from typing import List, Any
if __name__ == '__main__':
raise RuntimeError("This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead.")
# XXX: still in prototype
class TestSymbolicShapeAnalysis(JitTestCase):
def setUp(self):
self.prev_symbolic_shapes_test_enabled = torch._C._jit_symbolic_shapes_test_mode_enabled()
torch._C._jit_set_symbolic_shapes_test_mode(True)
def tearDown(self):
torch._C._jit_set_symbolic_shapes_test_mode(self.prev_symbolic_shapes_test_enabled)
def test_shape_analysis(self):
@torch.jit.script
def foo(x, y):
return x * y
inputs = list(foo.graph.inputs())
def prop_shapes_on_graph(inp0, inp1):
inputs[0].setType(inputs[0].type().with_sizes(inp0))
inputs[1].setType(inputs[1].type().with_sizes(inp1))
torch._C._jit_pass_propagate_shapes_on_graph(foo.graph)
prop_shapes_on_graph([1, 6, 5], [1, 7, 1, 5])
FileCheck().check("1, 7, 6, 5").run(foo.graph)
# None implicitly creates a new symbolic symbol
prop_shapes_on_graph([None, None], [None, None, None])
output_shape = foo.graph.findNode("aten::mul").output().type().symbolic_sizes()
inp0_shape = inputs[0].type().symbolic_sizes()
inp1_shape = inputs[1].type().symbolic_sizes()
# output shape dim 0 should be taken from the second inp dim0
# other two dims we cannot infer and are given a new symbolic shape
self.assertEqual(output_shape[0], inp1_shape[0])
self.assertFalse(output_shape[1] in inp0_shape + inp1_shape)
self.assertFalse(output_shape[2] in inp0_shape + inp1_shape)
# XXX: symbolic shapes are represented with an increasing counter of unique
# values, use `_new_symbolic_shape_symbol` api instead of specifying negative
# dimensions directly so there is no chance of collision between manual number
# and current counter value.
sym1 = torch._C._new_symbolic_shape_symbol()
sym2 = torch._C._new_symbolic_shape_symbol()
sym3 = torch._C._new_symbolic_shape_symbol()
prop_shapes_on_graph([sym1, 1, sym3], [1, sym2, sym3])
output_shape = foo.graph.findNode("aten::mul").output().type().symbolic_sizes()
self.assertEqual(output_shape[0], sym1)
self.assertEqual(output_shape[1], sym2)
self.assertEqual(output_shape[2], sym3)
def test_shared_shape_graph(self):
@torch.jit.script
def foo(x, y):
return x * y, x / y
mul_node = foo.graph.findNode("aten::mul")
div_node = foo.graph.findNode("aten::div")
mul_graph = torch._C._jit_shape_compute_graph_for_node(mul_node)
div_graph = torch._C._jit_shape_compute_graph_for_node(div_node)
self.assertIsNotNone(mul_graph)
self.assertIs(mul_graph, div_graph)
def test_write(self):
@torch.jit.script
def foo(a, b):
return a * b
# broadcast appends cant be removed, so we bail on propagation
torch._C._jit_pass_propagate_shapes_on_graph(foo.graph)
FileCheck().check("Tensor = aten::mul").run(foo.graph)
@torch.jit.script
def foo(y):
x = [1, 2, 3, 4]
x[0] = 5
return y.view(x)
torch._C._jit_pass_propagate_shapes_on_graph(foo.graph)
FileCheck().check("Tensor = aten::view").run(foo.graph)
def test_if_propagation(self):
@torch.jit.script
def foo(i: int, z):
x = torch.ones([2, 3, 4, 5])
y = z.view([z.size(i), 3, 2, z.size(i)])
if i == 4:
return x
else:
return y
torch._C._jit_pass_constant_propagation(foo.graph)
torch._C._jit_pass_propagate_shapes_on_graph(foo.graph)
view = foo.graph.findNode("aten::view")
def neg_to_one(li):
return [elem if elem >= 0 else -1 for elem in li]
self.assertEqual(neg_to_one(view.output().type().symbolic_sizes()), [-1, 3, 2, -1])
if_out = next(foo.graph.findNode("prim::If").outputs())
self.assertEqual(neg_to_one(if_out.type().symbolic_sizes()), [-1, 3, -1, -1])
def test_unary_shape_functions(self):
unary_ops = [
torch.nn.functional.hardtanh,
]
for fn in unary_ops:
t = torch.jit.trace(fn, (torch.rand([4, 4])))
ten_input = next(t.graph.inputs())
ten_input.setType(ten_input.type().with_sizes([2, 2]))
torch._C._jit_pass_propagate_shapes_on_graph(t.graph)
self.assertEqual(next(t.graph.outputs()).type().symbolic_sizes(), [2, 2])
def test_unary_shape_fns_inplace(self):
def mul_inplace(x: torch.Tensor):
y = x.mul_(2)
return y
unary_ops = [
mul_inplace
]
for fn in unary_ops:
# t = torch.jit.trace(fn, torch.rand([4, 4])) # For some reason tracing is erroring out.
t = torch.jit.script(fn)
ten_input = next(t.graph.inputs())
ten_input.setType(ten_input.type().with_sizes([2, 2]))
torch._C._jit_pass_propagate_shapes_on_graph(t.graph)
self.assertEqual(next(t.graph.outputs()).type().symbolic_sizes(), [2, 2])
def test_binary_shape_functions(self):
binary_ops = [
operator.__mul__,
operator.__truediv__,
operator.__gt__,
operator.__add__,
]
for fn in binary_ops:
size_1 = [1, 4, 8]
size_2 = [4, 1, 8]
t = torch.jit.trace(fn, (torch.rand([4]), torch.rand([4])))
inputs = list(t.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes(size_1))
inputs[1].setType(inputs[1].type().with_sizes(size_2))
torch._C._jit_pass_propagate_shapes_on_graph(t.graph)
self.assertEqual(next(t.graph.outputs()).type().symbolic_sizes(), [4, 4, 8])
break
def test_binary_shape_fns_inplace(self):
def div_inplace_tensor(x: torch.Tensor, y: torch.Tensor):
z = x.div_(y)
return z
def add_inplace_tensor(x: torch.Tensor, y: torch.Tensor):
z = x.add_(y)
return z
binary_ops = [
div_inplace_tensor,
add_inplace_tensor,
]
for fn in binary_ops:
size_1 = [4, 4, 8] # x (can't broadcast because it's an inplace op)
t = torch.jit.script(fn)
inputs = list(t.graph.inputs())
inputs[0].setType(inputs[0].type().with_sizes(size_1))
# Intentionally not populate the type of inputs[1]
torch._C._jit_pass_propagate_shapes_on_graph(t.graph)
self.assertEqual(next(t.graph.outputs()).type().symbolic_sizes(), [4, 4, 8])
def test_size_and_sizes(self):
@torch.jit.script
def foo(x, y):
return x.view(y.size(0), 8, y.size(-1))
@torch.jit.script
def foo2(x, y):
return x.view(y.size())
for graph in [foo.graph, foo2.graph]:
inputs = list(graph.inputs())
sym1 = torch._C._new_symbolic_shape_symbol()
inputs[1].setType(inputs[1].type().with_sizes([5, 8, sym1]))
torch._C._jit_pass_propagate_shapes_on_graph(graph)
self.assertEqual(next(graph.outputs()).type().symbolic_sizes(), [5, 8, sym1])
def test_adaptive_avg_pool2d(self):
inps = [
[(1, 64, 8, 9), (5, 7)],
[(1, 64, 10, 9), (7)],
[(1, 64, 10, 9), (5, None)],
[(1, 8, 4, 3), (None, None)],
[(1, 8, 4, 3), (None, 5)],
]
for inp in inps:
t = torch.randn(*inp[0])
out_size = torch.nn.functional.adaptive_avg_pool2d(t, inp[1]).size()
def foo(x):
return torch.nn.functional.adaptive_avg_pool2d(x, inp[1])
fn = torch.jit.trace(foo, (t,))
torch._C._jit_erase_non_input_shape_information(fn.graph)
torch._C._jit_pass_peephole(fn.graph)
torch._C._jit_pass_constant_propagation(fn.graph)
self.checkShapeAnalysis(out_size, fn.graph, assert_propagation=True)
def test_arange_shape(self):
# no opinfo for tensor constructors
inps = [
(10,),
(10, 10),
(0, 10),
(0, 1000),
(1, -1, -1),
(1, 0, -1),
(1, 2, 1),
(0.6, 0.89, 0.1),
(1, 10, 0.3),
(1, 10, 4),
(0.6, 0.7, 0.8),
(1, 10, 0.3),
# (True,), TODO: https://github.com/pytorch/pytorch/issues/63405
# (False,), TODO: https://github.com/pytorch/pytorch/issues/63405
(0, 5),
(0, 5, 2),
(0, 5 + 1e-6),
(0, 5 - 1e-6),
(10, -1 + 1e-6, -1),
(10, -1, -1),
(10, -1 - 1e-6, -1),
]
for inp in inps:
funcs_template = dedent('''
def func():
return torch.arange({args})
''')
inp_s = str(inp)[1:-1] # remove tuple parens
funcs_str = funcs_template.format(args=inp_s)
scope = {}
execWrapper(funcs_str, globals(), scope)
cu = torch.jit.CompilationUnit(funcs_str)
self.checkShapeAnalysis(list(cu.func().size()), cu.func.graph, assert_propagation=True, constant_prop=False)
def test_shape_embedding_bag(self):
# TODO: merge into opinfos, having difficulties there
with torch.no_grad():
def make_arg(shape, low=None, high=None):
return make_tensor(shape, device='cpu', dtype=torch.int64,
low=low, high=high, requires_grad=False)
nn_inps = (
(make_arg((40,), 0, 9), torch.nn.Embedding(20, embedding_dim=64, max_norm=1.0)),
(make_arg((2, 4), 0, 9), torch.nn.Embedding(10, 20, sparse=True)),
(make_arg((0,)), torch.nn.Embedding(0, 0, sparse=True)),
(make_arg((2, 4), 0, 9), torch.nn.Embedding(10, 0, sparse=True)),
(make_arg((4,), 0, 21), torch.nn.Embedding(22, 5, max_norm=1.0)),
(make_arg((2,), 0, 1), torch.nn.Embedding.from_pretrained(torch.arange(6.).view(2, 3), max_norm=2.,
norm_type=.5, scale_grad_by_freq=False, sparse=True)),
)
for inp, module in nn_inps:
kwargs = {
"weight": module.weight.detach(),
"padding_idx": module.padding_idx,
"max_norm": module.max_norm,
"norm_type": module.norm_type,
"scale_grad_by_freq": module.scale_grad_by_freq,
"sparse": module.sparse,
}
out_size = torch.nn.functional.embedding(inp, **kwargs).size()
def foo(x):
return torch.nn.functional.embedding(inp, **kwargs)
fn = torch.jit.trace(foo, (inp.detach(),), check_trace=False)
self.checkShapeAnalysis(out_size, fn.graph, assert_propagation=True, constant_prop=False)
def test_shape_concat(self):
# TODO: unify with opinfo tests, traces of lists dont preserve sizes in IR
sample_inputs = sample_inputs_cat_concat(None, "cpu", torch.float, False)
class CatMod(nn.Module):
__constants__ = ['dim']
def __init__(self, dim=0):
super(CatMod, self).__init__()
self.dim = dim
def forward(self, x, y):
return torch.cat([x, y], dim=self.dim)
for inp in sample_inputs:
mod = torch.jit.script(CatMod(**inp.kwargs).eval())
args = inp.input
self.assertTrue(len(args) == 2)
out_size = mod(*args).size()
inps = list(mod.graph.inputs())
inps[1].setType(inps[1].type().with_sizes(args[0].size()))
inps[2].setType(inps[2].type().with_sizes(args[1].size()))
self.checkShapeAnalysis(out_size, mod.graph, assert_propagation=True)
def assert_shape_equal_scripted(self, script_fn, given_ins):
expected_res = script_fn(*given_ins)
g = script_fn.graph
graph_ins = list(g.inputs())
self.assertEqual(len(given_ins), len(graph_ins))
for inp, graph_in in zip(given_ins, graph_ins):
graph_in.setType(graph_in.type().with_sizes(inp.size()))
out_sizes = [out.size() for out in expected_res]
self.checkShapeAnalysis(out_sizes, g, assert_propagation=True)
def test_convolution_backward(self):
# No opinfos for ops that are not part of the Python API
# Also, as the return shapes are the input, weight, and bias shape, there is no point
# in a really complicated test
input = torch.randn((16, 16, 8, 8), dtype=torch.float32, device="cpu", requires_grad=True)
weight = torch.randn((8, 4, 3, 3), dtype=torch.float32, device="cpu", requires_grad=True)
out_grad = torch.randn((16, 8, 8, 8), dtype=torch.float32, device="cpu")
@torch.jit.script
def conv_bwd(input, weight, grad):
bias_sizes = [8, ]
args = ([1, 1], [1, 1], [1, 1], False, [0, 0], 4, [True, True, True])
return torch.ops.aten.convolution_backward(grad, input, weight, bias_sizes, *args)
self.assert_shape_equal_scripted(conv_bwd, (input, weight, out_grad))
@torch.jit.script
def conv_bwd_2(input, weight, grad):
bias_sizes = None
args = ([1, 1], [1, 1], [1, 1], False, [0, 0], 4, [True, True, True])
return torch.ops.aten.convolution_backward(grad, input, weight, bias_sizes, *args)
self.assert_shape_equal_scripted(conv_bwd_2, (input, weight, out_grad))
def test_returning_input_symbolic_shapes(self):
mm = torch.jit.freeze(torch.jit.script(nn.Conv2d(16, 33, 3, stride=2).eval()))
inps = list(mm.graph.inputs())
inps[1].setType(inps[1].type().with_sizes([None, None, None, None]))
shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
g = shape_compute_graph.partial_eval_shape_graph()
# to make into a jit function cant have multiple outputs
g.makeMultiOutputIntoTuple()
func = torch._C._create_function_from_graph("partial_eval_graph", g)
out = func([20, 16, 5, 10])
# first four outputs should be unknown symbolic shapes from input
self.assertEqual(out[0:4], [20, 16, 5, 10])
# last two are two new symbolic dims - height and width
self.assertEqual(out[4:], list(mm(torch.rand([20, 16, 5, 10])).size()[2:]))
def test_partial_eval_graph_conv(self):
mm = torch.jit.freeze(torch.jit.script(nn.Conv2d(16, 33, 3, stride=2).eval()))
shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mm.graph)
output_sizes = mm.graph.findNode("aten::conv2d").output().type().symbolic_sizes()
# calculating 0, 2 and 3 index
for i in [0, 2, 3]:
self.assertTrue(output_sizes[i] < 0)
self.assertTrue(output_sizes[1] >= 0)
g = shape_compute_graph.partial_eval_shape_graph()
# to make into a jit function cant have multiple outputs
g.makeMultiOutputIntoTuple()
func = torch._C._create_function_from_graph("partial_eval_graph", g)
inp = torch.randn(20, 16, 5, 10)
output = func([20, 16, 5, 10])
output_eager = list(mm(inp).size())
for o, oe in zip(output, output_eager[0:1] + output_eager[2:]):
self.assertEqual(o, oe)
def checkSymShapeCompute(self, shape_compute_graph, nodes, node_output_sizes, shape_inputs):
g = shape_compute_graph.partial_eval_shape_graph()
self.assertTrue(len(list(g.inputs())) == len(shape_inputs))
output_sym_map = shape_compute_graph.graph_output_to_symbolic_shape_dim()
# map from sym shape -> index
sym_shape_to_index = {}
for index, output in enumerate(g.outputs()):
sym_shape_to_index[output_sym_map[output]] = index
g.makeMultiOutputIntoTuple()
func = torch._C._create_function_from_graph("partial_eval_graph", g)
sym_outputs = func(*shape_inputs)
for node, output_shape in zip(nodes, node_output_sizes):
output_type_sizes = node.output().type().symbolic_sizes()
for i, sym_shape in enumerate(output_type_sizes):
if sym_shape >= 0:
self.assertEqual(sym_shape, output_shape[i])
else:
sym_shape_index = sym_shape_to_index[sym_shape]
self.assertEqual(sym_outputs[sym_shape_index], output_shape[i])
def test_partial_eval_stitching(self):
conv1 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
conv2 = nn.Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
mod = torch.jit.freeze(torch.jit.script(nn.Sequential(conv1, max_pool, conv2).eval()))
conv1_output = conv1(torch.rand(1, 3, 224, 224))
max_pool_output = max_pool(conv1_output)
conv2_output = conv2(max_pool_output)
shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
nodes = [mod.graph.findNode("aten::max_pool2d")] + list(mod.graph.findAllNodes("aten::conv2d"))
output_shapes = [max_pool_output.size(), conv1_output.size(), conv2_output.size()]
self.checkSymShapeCompute(shape_compute_graph, nodes, output_shapes, ([1, 3, 224, 224],))
def test_refinement_through_graph_stitching(self):
class TwoConvs(torch.nn.Module):
def __init__(self):
super(TwoConvs, self).__init__()
self.conv1 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
self.conv2 = torch.nn.Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
def forward(self, x):
a = self.conv1(x)
b = self.conv2(x)
return a + b
mod = torch.jit.freeze(torch.jit.script(TwoConvs()).eval())
inp_tensor = list(mod.graph.inputs())[1]
inp_tensor.setType(inp_tensor.type().with_sizes([None, None, None, None]))
torch._C._jit_pass_propagate_shapes_on_graph(mod.graph)
outs = list(next(mod.graph.outputs()).node().inputs())
out1 = outs[0].type().symbolic_sizes()
out2 = outs[1].type().symbolic_sizes()
self.assertTrue(out1[2] != out2[2])
self.assertTrue(out1[3] != out2[3])
# by joining partial eval graphs of both convs we are able to recognize the output shapes
# are equivalent
torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
out1 = outs[0].type().symbolic_sizes()
out2 = outs[1].type().symbolic_sizes()
self.assertEqual(out1, out2)
def test_stitching_multi_output(self):
max_pool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False, return_indices=True)
tensor = torch.rand(1, 3, 224, 224)
mod = torch.jit.trace(max_pool, (tensor,))
mod = torch.jit.freeze(mod.eval())
inp = list(mod.graph.inputs())[1]
inp.setType(inp.type().with_sizes([None, None, None, None]))
output_tensor = list(mod(tensor)[0].size())
self.run_pass('lower_all_tuples', mod.graph)
shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(mod.graph)
max_pool_node = mod.graph.findNode("aten::max_pool2d_with_indices")
outs = list(max_pool_node.outputs())
self.assertEqual(outs[0].type().symbolic_sizes(), outs[1].type().symbolic_sizes())
g = shape_compute_graph.partial_eval_shape_graph()
# to make into a jit function cant have multiple outputs
g.makeMultiOutputIntoTuple()
func = torch._C._create_function_from_graph("partial_eval_graph", g)
mapping = shape_compute_graph.graph_output_to_symbolic_shape_dim()
output_shape = func(tensor.size())
# the first 4 dims are input sym dimensions, then the ,
self.assertEqual(list(output_shape[0:4]), list(tensor.size()))
self.assertEqual(list(output_shape[4:]), output_tensor[2:])
def test_sym_ir_parsing(self):
graph_str1 = """graph(%x.1 : Float(SS(-2), SS(-3))):
%3 : int = prim::Constant[value=1]()
%4 : Tensor = aten::add(%x.1, %x.1, %3)
return (%4)"""
g = torch._C.parse_ir(graph_str1)
inp = next(g.inputs())
out = inp.type().symbolic_sizes()
self.assertEqual(out, [-2, -3])
def test_stitching_concat(self):
@torch.jit.script
def foo1(a, b, x, y):
return (a / b) + torch.cat([x, y])
@torch.jit.script
def foo2(a, b, x, y):
return (a / b) + torch.cat([x, y], dim=-2)
for foo in [foo1, foo2]:
g = foo.graph
for inp in foo.graph.inputs():
inp.setType(inp.type().with_sizes([None, None]))
shape_compute_graph = torch._C._jit_pass_propagate_shapes_on_graph_and_build_compute(foo.graph)
nodes = [g.findNode("aten::div")] + [g.findNode("aten::add")] + [g.findNode("aten::cat")]
inps = [1, 10], [20, 10], [15, 1], [5, 1]
output_shapes = [[20, 10], [20, 10], [20, 1]]
self.checkSymShapeCompute(shape_compute_graph, nodes, output_shapes, inps)
@unittest.skipIf(not hasattr(torch.jit, "_shapes"), "shape functions not loaded in python")
def test_shape_function_includes(self):
inp_shape = [1, 16, 5, 10]
weight_shape = [33, 16, 3, 3]
bias = None
stride = [2, 2]
padding = [0, 0]
dilation = [1, 1]
groups = 1
res = torch.jit._shapes.conv2d(inp_shape, weight_shape, bias, stride, padding, dilation, groups)
self.assertEqual(res, [1, 33, 2, 4])
m1_shape = [10, 20]
m2_shape = [20, 10]
res = torch.jit._shapes.matmul(m1_shape, m2_shape)
self.assertEqual(res, [10, 10])
def test_register_function_error_checking(self):
# this will error before registering on global map, so
# no issue in overwriting schema mappings
@torch.jit.script
def foo(x, y):
return x + y
node = foo.graph.findNode("aten::add")
@torch.jit.script
def wrong_input_types(x, y):
x: List[int] = []
return x
with self.assertRaisesRegex(RuntimeError, "Expected supertype of int"):
torch._C._jit_register_shape_compute_graph_for_node(node, wrong_input_types.graph)
@torch.jit.script
def wrong_output_types(x: List[int], y: List[int]):
x: List[Tensor] = []
return x
with self.assertRaisesRegex(RuntimeError, "but got graph_type"):
torch._C._jit_register_shape_compute_graph_for_node(node, wrong_output_types.graph)
@torch.jit.script
def too_many_inputs(x: List[int], y: List[int], z: Any, z2: Any):
x: List[int] = []
return x
with self.assertRaises(RuntimeError) as error:
torch._C._jit_register_shape_compute_graph_for_node(node, too_many_inputs.graph)
self.assertTrue("fewer arguments than schema" in str(error.exception))
|
pytorch-master
|
test/jit/test_symbolic_shape_analysis.py
|
# Owner(s): ["oncall: jit"]
from torch.testing._internal.jit_utils import JitTestCase
import io
import os
import sys
import unittest
import torch
import torch._C
from torch.testing import FileCheck
from torch.jit.mobile import _load_for_lite_interpreter
from torch.testing._internal.common_utils import (
IS_FBCODE,
IS_MACOS,
IS_SANDCASTLE,
IS_WINDOWS,
TEST_WITH_ROCM,
skipIfRocm,
find_library_location,
)
# Make the helper files in test/ importable
pytorch_test_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
sys.path.append(pytorch_test_dir)
if __name__ == "__main__":
raise RuntimeError(
"This test file is not meant to be run directly, use:\n\n"
"\tpython test/test_jit.py TESTNAME\n\n"
"instead."
)
def to_test_backend(module, method_compile_spec):
return torch._C._jit_to_backend("test_backend", module, {"forward": method_compile_spec})
def to_test_backend_multi(module, method_compile_spec):
return torch._C._jit_to_backend("test_backend", module, method_compile_spec)
def to_test_backend_selective(module, method_compile_spec, submodules):
def _to_test_backend(module):
return to_test_backend(module, method_compile_spec)
return torch._C._jit_to_backend_selective(module, _to_test_backend, submodules)
class BasicModule(torch.nn.Module):
"""
A simple Module used to test to_backend lowering machinery.
"""
def __init__(self):
super().__init__()
def forward(self, x, h):
return self.accum(x, h), self.sub_accum(x, h)
def accum(self, x, h):
return x + h
def sub_accum(self, x, h):
return x - h
# This is ignored in IS_WINDOWS or IS_MACOS cases. Hence we need the one in TestBackends.
@unittest.skipIf(TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
"Non-portable load_library call used in test")
class JitBackendTestCase(JitTestCase):
"""
A common base class for JIT backend tests that contains common utility
functions for output comparison and serialization/deserialization.
"""
def setUp(self):
super().setUp()
lib_file_path = find_library_location('libjitbackend_test.so')
torch.ops.load_library(str(lib_file_path))
# Subclasses are expected to set up three variables in their setUp methods:
# module - a regular, Python version of the module being tested
# scripted_module - a scripted version of module
# lowered_module - a version of module lowered to a backend
def check_function(self, function_name, input):
"""
Check that the function named 'function_name' produces the same output using
Python, regular JIT and the backend for the given 'input'.
"""
# Get handles for Python, JIT and backend methods.
python_method = self.module.__getattribute__(function_name)
jit_method = self.scripted_module.__getattr__(function_name)
backend_method = self.lowered_module.__getattr__(function_name)
# Run methods.
python_output = python_method(*input)
jit_output = jit_method(*input)
backend_output = backend_method(*input)
# The answers returned by Python, JIT and to_backend should all match.
self.assertEqual(python_output, backend_output)
self.assertEqual(jit_output, backend_output)
def save_load(self):
"""
Save and load the lowered module.
"""
self.lowered_module = self.getExportImportCopy(self.lowered_module)
def test_execution(self):
"""
Stub for correctness tests.
"""
pass
def test_save_load(self):
"""
Stub for serialization tests.
"""
pass
def test_errors(self):
"""
Stub for testing error checking.
"""
pass
class BasicModuleTest(JitBackendTestCase):
"""
Tests for BasicModule.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModule.
self.module = BasicModule()
self.scripted_module = torch.jit.script(BasicModule())
self.lowered_module = to_test_backend_multi(
self.scripted_module,
{"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}},
)
def test_execution(self):
# Test execution with backend against Python and JIT.
input = torch.randn(5)
# Test all three module methods.
self.check_function("accum", (input, input))
self.check_function("sub_accum", (input, input))
self.check_function("forward", (input, input))
@skipIfRocm
def test_save_load(self):
# Lowered module should produce the same outputs.
self.test_execution()
# Save the compile spec to compare against the version retrieved after loading.
pre_compile_spec = self.lowered_module.__getattr__("__loweredModule__").__getattr__("__method_compile_spec")
# Save and load the lowered module.
self.save_load()
# Get the compile spec after loading.
post_compile_spec = self.lowered_module.__getattr__("__loweredModule__").__getattr__("__method_compile_spec")
# Compile specs should match.
self.assertEqual(pre_compile_spec, post_compile_spec)
# Loaded module should produce the same outputs.
self.test_execution()
class BasicModuleUnavailableTest(JitBackendTestCase):
"""
Tests for BasicModule with a backend that is not available.
Fundamentally:
* _jit_to_backend is successful.
* Execution fails with an exception.
* Saving is successful.
* Loading fails with an exception.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModule.
self.module = BasicModule()
self.scripted_module = torch.jit.script(BasicModule())
self.lowered_module = torch._C._jit_to_backend(
"test_backend_unavailable",
self.scripted_module,
{"forward": {"": ""}},
)
def test_execution(self):
# Test execution with backend fails because the backend that is not available.
input = torch.randn(5)
# Test exception is thrown.
with self.assertRaisesRegexWithHighlight(Exception,
r"Backend is not available.",
"raise Exception(\"Backend is not available.\""):
backend_method = self.lowered_module.__getattr__("forward")
backend_output = backend_method(*(input, input))
@skipIfRocm
def test_save_load(self):
# Test that saving the lowered module is OK but loading fails because the backend is not available.
buffer = io.BytesIO()
torch.jit.save(self.lowered_module, buffer)
buffer.seek(0)
with self.assertRaisesRegexWithHighlight(Exception,
r"Backend is not available.",
"raise Exception(\"Backend is not available.\""):
imported = torch.jit.load(buffer)
class NestedModuleTest(JitBackendTestCase):
"""
Tests for NestedModule that check that a module lowered to a backend can be used
as a submodule.
"""
class NestedModule(torch.nn.Module):
"""
A Module with one submodule that is used to test that lowered Modules
can be used as submodules.
"""
def __init__(self, submodule):
super().__init__()
self.submodule = submodule
def forward(self, x, h):
return self.submodule.forward(x, h)
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of NestedModule.
# Both modules in self.module are regular Python modules.
self.module = NestedModuleTest.NestedModule(BasicModule())
# Both modules in self.scripted_module are ScriptModules.
self.scripted_module = torch.jit.script(NestedModuleTest.NestedModule(BasicModule()))
# First, script another instance of NestedModule with share_types=False so that it can be
# selectively lowered without modifying the type of self.scripted_module.
lowered_module = to_test_backend_multi(
torch.jit.script(BasicModule()),
{"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}},
)
# self.lowered_module is a ScriptModule, but its submodule is a lowered module.
self.lowered_module = torch.jit.script(NestedModuleTest.NestedModule(lowered_module))
def test_execution(self):
# Test execution with backend against Python and JIT.
input = torch.randn(5)
# Test forward.
self.check_function("forward", (input, input))
def test_save_load(self):
# Lowered module should produce the same outputs.
self.test_execution()
# Save and load the lowered module.
self.save_load()
# Loaded module should produce the same outputs.
self.test_execution()
class SelectiveLoweringTest(JitBackendTestCase):
"""
Tests for the selective lowering API.
"""
class OuterModule(torch.nn.Module):
def __init__(self, sub1, sub2, other):
super().__init__()
self.sub1 = sub1
self.sub2 = sub2
self.other = other
def forward(self, x, y):
# Call the module that will be lowered directly to test
# type remapping in modules that are not its parent.
a, b = self.sub1.submodule.forward(x, y)
c, d = self.sub2.forward(x, y)
e, f = self.other.forward(x, y)
return a + c + e, b + d + f
class MiddleModule(torch.nn.Module):
def __init__(self, submodule):
super().__init__()
self.submodule = submodule
def forward(self, x, y):
return self.submodule.forward(x, y)
def setUp(self):
super().setUp()
OuterModule = SelectiveLoweringTest.OuterModule
MiddleModule = SelectiveLoweringTest.MiddleModule
def script_without_type_sharing(mod):
return torch.jit._recursive.create_script_module(mod, torch.jit._recursive.infer_methods_to_compile, share_types=False)
# Create Python, JIT and backend versions of a hierarchy that looks like this:
# --------- OuterModule --------
# | | |
# MiddleModule MiddleModule MiddleModule
# | | |
# BasicModule BasicModule BasicModule
#
# Two BasicModules will be lowered and the third will not.
self.module = OuterModule(MiddleModule(BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule()))
self.scripted_module = script_without_type_sharing(OuterModule(MiddleModule(
BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule())))
self.lowered_module = script_without_type_sharing(OuterModule(MiddleModule(
BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule())))
self.lowered_module = to_test_backend_selective(self.lowered_module, {"forward": ""}, [
"sub1.submodule", "sub2.submodule"])
def test_execution(self):
input = torch.randn(5)
self.check_function("forward", (input, input))
self.test_selective_lowering_type_remap()
def test_save_load(self):
self.test_execution()
self.save_load()
self.test_execution()
self.test_selective_lowering_type_remap()
def test_selective_lowering_type_remap(self):
"""
Check that type remapping and replacement occurred during selective lowering.
"""
# Check that self.lowered_module was not lowered, but that it does contain test_backendLoweredModule due to it
# calling the lowered module directly.
FileCheck() \
.check("OuterModule") \
.check("BasicModule") \
.run(self.scripted_module.graph)
FileCheck() \
.check("OuterModule") \
.check_not("__torch__.torch.classes.__backends__.test_backend") \
.check("LoweredWrapper.test_backend") \
.run(self.lowered_module.graph)
# Check that self.lowered_module.sub1/sub2 were not lowered but that BasicModule has been replaced in their graphs.
FileCheck() \
.check("MiddleModule") \
.check("BasicModule") \
.check_not("LoweredWrapper.test_backend") \
.run(self.scripted_module.sub1.graph)
FileCheck() \
.check("MiddleModule") \
.check_not("__torch__.torch.classes.__backends__.test_backend") \
.check("LoweredWrapper.test_backend") \
.run(self.lowered_module.sub1.graph)
FileCheck() \
.check("MiddleModule") \
.check("BasicModule") \
.check_not("LoweredWrapper.test_backend") \
.run(self.scripted_module.sub2.graph)
FileCheck() \
.check("MiddleModule") \
.check_not("__torch__.torch.classes.__backends__.test_backend") \
.check("LoweredWrapper.test_backend") \
.run(self.lowered_module.sub2.graph)
# Check that self.lowered_module.sub1/sub2.submodule were lowered. They should have a new attribute
# __loweredModule__ whose graph should mention __torch__.torch.classes.__backends__.test_backend,
# the TorchBind class for executing functions on the test JIT backend.
FileCheck() \
.check("LoweredModule.test_backend") \
.check("__torch__.torch.classes.__backends__.test_backend") \
.run(self.lowered_module.sub1.submodule.__loweredModule__.graph)
FileCheck() \
.check("LoweredModule.test_backend") \
.check("__torch__.torch.classes.__backends__.test_backend") \
.run(self.lowered_module.sub2.submodule.__loweredModule__.graph)
# Check that self.other and self.other.submodule have been left untouched by the selective lowering process.
FileCheck() \
.check("MiddleModule") \
.check("BasicModule") \
.check_not("__torch__.torch.classes.__backends__.test_backend") \
.check_not("LoweredWrapper.test_backend") \
.run(self.scripted_module.other.graph)
FileCheck() \
.check("BasicModule") \
.check_not("__torch__.torch.classes.__backends__.test_backend") \
.check_not("LoweredModule.test_backend") \
.run(self.scripted_module.other.submodule.graph)
def test_errors(self):
"""
Check errors associated with selective lowering.
"""
# Check error messages thrown when attempting to lower something that is not a ScriptModule.
with self.assertRaisesRegexWithHighlight(RuntimeError, r"Object .* is not a ScriptModule", ""):
to_test_backend_selective(torch.nn.ReLU(), {"forward": ""}, ["submodule"])
MiddleModule = SelectiveLoweringTest.MiddleModule
mod = MiddleModule(BasicModule())
mod.new_attr = 3
with self.assertRaisesRegexWithHighlight(RuntimeError, r"Attribute named new_attr is not a Module", ""):
to_test_backend_selective(torch.jit.script(mod), {"forward": ""}, ["new_attr"])
# Check error message thrown when module hierarchy doesn't have unique types.
OuterModule = SelectiveLoweringTest.OuterModule
mod = OuterModule(MiddleModule(BasicModule()), MiddleModule(BasicModule()), MiddleModule(BasicModule()))
with self.assertRaisesRegexWithHighlight(RuntimeError,
r"Selective lowering is only supported for module hierarchies with unique types",
""):
to_test_backend_selective(torch.jit.script(mod), {"forward": ""}, ["sub1.submodule"])
# This is needed for IS_WINDOWS or IS_MACOS to skip the tests.
@unittest.skipIf(TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
"Non-portable load_library call used in test")
class TestBackends(JitTestCase):
"""
This class wraps and invokes all subclasses of JitBackendTestCase so that each one
does not have to be individually imported in test_jit.py.
"""
def __init__(self, name):
super().__init__(name)
self.basic_module_test = BasicModuleTest(name)
self.basic_module_unavailable_test = BasicModuleUnavailableTest(name)
self.nested_module_test = NestedModuleTest(name)
self.selective_lowering_test = SelectiveLoweringTest(name)
def setUp(self):
super().setUp()
if not TEST_WITH_ROCM:
self.basic_module_test.setUp()
self.basic_module_unavailable_test.setUp()
self.nested_module_test.setUp()
self.selective_lowering_test.setUp()
@skipIfRocm
def test_execution(self):
self.basic_module_test.test_execution()
self.basic_module_unavailable_test.test_execution()
self.nested_module_test.test_execution()
self.selective_lowering_test.test_execution()
@skipIfRocm
def test_save_load(self):
self.basic_module_test.test_save_load()
self.basic_module_unavailable_test.test_save_load()
self.nested_module_test.test_save_load()
self.selective_lowering_test.test_save_load()
@skipIfRocm
def test_errors(self):
self.selective_lowering_test.test_errors()
"""
Unit Tests for backend with compiler
This test case and the existing TestBackends are separate because they cover different aspects.
The actual backend implementation in this test is different.
It has a simple demo compiler to test the end-to-end flow in mobile.
However, this test cannot cover the selective_lowering for now, which is covered in TestBackends.
"""
class BasicModuleAdd(torch.nn.Module):
"""
A simple add Module used to test to_backend lowering machinery.
"""
def __init__(self):
super().__init__()
def forward(self, x, h):
return x + h
# This is ignored in IS_WINDOWS or IS_MACOS cases. Hence we need the one in TestBackends.
@unittest.skipIf(TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
"Non-portable load_library call used in test")
class JitBackendTestCaseWithCompiler(JitTestCase):
"""
A common base class for JIT backend tests with compilers that contains common utility
functions for output comparison.
"""
def setUp(self):
super().setUp()
lib_file_path = find_library_location('libbackend_with_compiler.so')
torch.ops.load_library(str(lib_file_path))
# Subclasses are expected to set up four variables in their setUp methods:
# module - a regular, Python version of the module being tested
# scripted_module - a scripted version of module
# lowered_module - a version of module lowered to a backend
# mobile_module - a module with a format that Pytorch Mobile can execute
def check_forward(self, input):
"""
Check that the forward function produces the same output using
Python, regular JIT, the backend, and mobile for the given 'input'.
"""
# Get outputs from forward.
python_output = self.module.forward(*input)
jit_output = self.scripted_module.forward(*input)
backend_output = self.lowered_module(*input)
mobile_output = self.mobile_module(*input)
# The answers returned by Python, JIT, to_backend, and mobile should all match.
self.assertEqual(python_output, backend_output)
self.assertEqual(jit_output, backend_output)
self.assertEqual(mobile_output, backend_output)
def test_execution(self):
"""
Stub for correctness tests.
"""
pass
def test_errors(self):
"""
Stub for testing error checking.
"""
pass
class BasicModuleTestWithCompiler(JitBackendTestCaseWithCompiler):
"""
Tests for BasicModuleAdd.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModuleAdd.
self.module = BasicModuleAdd()
self.scripted_module = torch.jit.script(BasicModuleAdd())
compile_spec = {
"forward": {
"input_shapes": "((1, 1, 320, 240), (1, 3))",
"some_other_option": "True",
},
}
self.lowered_module = torch._C._jit_to_backend(
"backend_with_compiler_demo", self.scripted_module, compile_spec)
# Create mobile version of BasicModuleAdd
buffer = io.BytesIO(self.lowered_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
self.mobile_module = _load_for_lite_interpreter(buffer)
def test_execution(self):
# Test execution with backend against Python and JIT.
input = torch.ones(1, dtype=torch.float)
self.check_forward((input, input))
class ErrorMessagesWithCompiler(JitBackendTestCase):
"""
Tests for errors that occur with compiler, specifically:
* an operator is not supported by the backend
"""
class ModuleNotSupported(torch.nn.Module):
"""
A module with an operator that is not supported.
"""
def __init__(self):
super().__init__()
def forward(self, x, h):
return x * h
self._loweredmodule.forward()
def setUp(self):
super().setUp()
def test_errors(self):
scripted_module_n = torch.jit.script(ErrorMessagesWithCompiler.ModuleNotSupported())
# Test exception is thrown when lowering a module with an unsupported operator
with self.assertRaisesRegexWithHighlight(RuntimeError,
# Special escape characters are replaced with '.'
r"""The node of aten::mul is not supported in this compiler. .*
def forward.self, x, h.:
return x . h
~~~~~ <--- HERE
self._loweredmodule.forward..
""", ""):
lowered_module_n = torch._C._jit_to_backend("backend_with_compiler_demo", scripted_module_n, {"forward": {"": ""}})
class CompModuleTestWithCompiler(JitBackendTestCase):
"""
Tests for CompModule, which is a module with two lowered submodules
"""
class BasicModuleSub(torch.nn.Module):
"""
A simple subtraction Module to be used in CompModule.
"""
def __init__(self):
super().__init__()
def forward(self, x, h):
return x - h
class CompModule(torch.nn.Module):
"""
A module with two lowered submodules.
"""
def __init__(self, addmodule, submodule):
super().__init__()
self.lowered_add = addmodule
self.lowered_sub = submodule
def forward(self, a, b, s):
c = self.lowered_add.forward(a, b)
d = self.lowered_sub.forward(a, b)
y = s * (c * d)
return y
def setUp(self):
super().setUp()
# Create Python and JIT versions of CompModule with lowered submodules.
compile_spec = {
"forward": {
"input_shapes": "((1, 1, 320, 240), (1, 3))",
"some_other_option": "True",
},
}
lowered_add = torch._C._jit_to_backend(
"backend_with_compiler_demo", torch.jit.script(BasicModuleAdd()), compile_spec)
lowered_sub = torch._C._jit_to_backend(
"backend_with_compiler_demo",
torch.jit.script(CompModuleTestWithCompiler.BasicModuleSub()),
{"forward": {"": ""}}
)
self.module = CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub)
self.scripted_module = torch.jit.script(CompModuleTestWithCompiler.CompModule(lowered_add, lowered_sub))
# No backend version of CompModule currently, so this is filler.
self.lowered_module = self.scripted_module
# Create a mobile version of CompModule from JIT version
buffer = io.BytesIO(self.scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
self.mobile_module = _load_for_lite_interpreter(buffer)
def test_execution(self):
# Test execution with backend against Python and JIT.
input1 = torch.ones(1, dtype=torch.float)
input2 = torch.ones(1, dtype=torch.float)
# Test forward.
self.check_function("forward", (input1, input2, input2))
# This is needed for IS_WINDOWS or IS_MACOS to skip the tests.
@unittest.skipIf(TEST_WITH_ROCM or IS_SANDCASTLE or IS_WINDOWS or IS_MACOS or IS_FBCODE,
"Non-portable load_library call used in test")
class TestBackendsWithCompiler(JitTestCase):
"""
This class wraps and invokes all subclasses of JitBackendTestCaseWithCompiler
so that each one does not have to be individually imported in test_jit.py.
"""
def __init__(self, name):
super().__init__(name)
self.basic_module_compiler_test = BasicModuleTestWithCompiler(name)
self.error_module_compiler_test = ErrorMessagesWithCompiler(name)
self.comp_module_compiler_test = CompModuleTestWithCompiler(name)
def setUp(self):
super().setUp()
if not TEST_WITH_ROCM:
self.basic_module_compiler_test.setUp()
self.error_module_compiler_test.setUp()
self.comp_module_compiler_test.setUp()
@skipIfRocm
def test_execution(self):
self.basic_module_compiler_test.test_execution()
self.comp_module_compiler_test.test_execution()
@skipIfRocm
def test_errors(self):
self.error_module_compiler_test.test_errors()
class CompModuleTestSameNameWithCompiler(JitBackendTestCase):
"""
Tests for CompModule, which is a module with two lowered submodules with same module name
"""
class ModuleAdd(torch.nn.Module):
"""
A simple Module used to test to_backend lowering machinery.
"""
def __init__(self):
super().__init__()
def forward(self, x, h):
return x + h
class CompModule(torch.nn.Module):
"""
A module with two lowered submodules.
"""
def __init__(self):
super().__init__()
compile_spec = {
"forward": {
"some_other_option": "True",
},
}
self.add = torch._C._jit_to_backend(
"backend_with_compiler_demo",
torch.jit.script(ModuleAdd()),
compile_spec,
)
self.sub = torch._C._jit_to_backend(
"backend_with_compiler_demo",
torch.jit.script(ModuleAdd()),
compile_spec,
)
def forward(self, a, b, s: int):
c = self.add.forward(a, b)
d = self.sub.forward(a, b)
y = s * (c * d)
return y
def setUp(self):
super().setUp()
self.module = CompModule()
self.scripted_module = torch.jit.script(self.module)
buffer = io.BytesIO(self.scripted_module._save_to_buffer_for_lite_interpreter())
buffer.seek(0)
self.mobile_module = _load_for_lite_interpreter(buffer)
def test_execution(self):
a = torch.ones(1)
b = 3 * torch.ones(1)
s = 3
# Test forward.
self.check_function("forward", (a, b, s))
class AddedAttributesTest(JitBackendTestCase):
"""
Tests for adding attributes to a model after lowering.
"""
def setUp(self):
super().setUp()
# Create Python, JIT and backend versions of BasicModule.
self.module = BasicModule()
self.scripted_module = torch.jit.script(BasicModule())
self.lowered_module = to_test_backend_multi(
self.scripted_module,
{"accum": {"": ""}, "sub_accum": {"": ""}, "forward": {"": ""}},
)
def test_attribute(self):
input = [(torch.ones(5),)]
pre_bundled = self.lowered_module(*input[0])
# Attach bundled inputs which adds several attributes and functions to the model
self.lowered_module = torch.utils.bundled_inputs.augment_model_with_bundled_inputs(lowered_module, input)
post_bundled = self.lowered_module(*self.lowered_module.get_all_bundled_inputs()[0])
# Save and load the lowered module.
self.save_load()
# Use bundled after save and load to prove its preserved
post_load = self.lowered_module(*self.lowered_module.get_all_bundled_inputs()[0])
self.assertEqual(pre_bundled, post_bundled)
self.assertEqual(post_bundled, post_load)
|
pytorch-master
|
test/jit/test_backends.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.